/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CacheIRCompiler.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/FunctionTypeTraits.h"
#include "mozilla/MaybeOneOf.h"
#include "mozilla/ScopeExit.h"
#include <type_traits>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "builtin/DataViewObject.h"
#include "builtin/Object.h"
#include "gc/GCEnum.h"
#include "jit/BaselineCacheIRCompiler.h"
#include "jit/CacheIRGenerator.h"
#include "jit/IonCacheIRCompiler.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitZone.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/VMFunctions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
#include "js/ScalarType.h" // js::Scalar::Type
#include "js/SweepingAPI.h"
#include "proxy/DOMProxy.h"
#include "proxy/Proxy.h"
#include "proxy/ScriptedProxyHandler.h"
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BigIntType.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/GeneratorObject.h"
#include "vm/GetterSetter.h"
#include "vm/Interpreter.h"
#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Uint8Clamped.h"
#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/VMFunctionList-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::Maybe;
using JS::ExpandoAndGeneration;
ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
ValOperandId op) {
OperandLocation& loc = operandLocations_[op.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
currentOpRegs_.add(loc.valueReg());
return loc.valueReg();
case OperandLocation::ValueStack: {
ValueOperand reg = allocateValueRegister(masm);
popValue(masm, &loc, reg);
return reg;
}
case OperandLocation::BaselineFrame: {
ValueOperand reg = allocateValueRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Constant: {
ValueOperand reg = allocateValueRegister(masm);
masm.moveValue(loc.constant(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadReg: {
// Temporarily add the payload register to currentOpRegs_ so
// allocateValueRegister will stay away from it.
currentOpRegs_.add(loc.payloadReg());
ValueOperand reg = allocateValueRegister(masm);
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
currentOpRegs_.take(loc.payloadReg());
availableRegs_.add(loc.payloadReg());
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadStack: {
ValueOperand reg = allocateValueRegister(masm);
popPayload(masm, &loc, reg.scratchReg());
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::DoubleReg: {
ValueOperand reg = allocateValueRegister(masm);
{
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), reg, fpscratch);
}
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
// Load a value operand directly into a float register. Caller must have
// guarded isNumber on the provided val.
void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
NumberOperandId op,
FloatRegister dest)
const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ?
sizeof(
double) : 0;
const OperandLocation& loc = operandLocations_[op.id()];
Label failure, done;
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.ensureDouble(loc.valueReg(), dest, &failure);
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::DoubleReg: {
masm.moveDouble(loc.doubleReg(), dest);
return;
}
case OperandLocation::Constant: {
MOZ_ASSERT(loc.constant().isNumber(),
"Caller must ensure the operand is a number value");
masm.loadConstantDouble(loc.constant().toNumber(), dest);
return;
}
case OperandLocation::PayloadReg: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
masm.convertInt32ToDouble(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.convertInt32ToDouble(addr, dest);
return;
}
case OperandLocation::Uninitialized:
MOZ_CRASH(
"Unhandled operand type in ensureDoubleRegister");
return;
}
masm.jump(&done);
masm.bind(&failure);
masm.assumeUnreachable(
"Missing guard allowed non-number to hit ensureDoubleRegister");
masm.bind(&done);
}
void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
TypedOperandId typedId,
Register dest)
const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ?
sizeof(
double) : 0;
const OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::PayloadReg: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
masm.mov(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.loadPtr(addr, dest);
return;
}
case OperandLocation::DoubleReg:
case OperandLocation::Constant:
case OperandLocation::Uninitialized:
MOZ_CRASH(
"Unhandled operand location");
}
}
void CacheRegisterAllocator::copyToScratchValueRegister(
MacroAssembler& masm, ValOperandId valId, ValueOperand dest)
const {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
const OperandLocation& loc = operandLocations_[valId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(loc.valueReg(), dest);
break;
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
masm.loadValue(addr, dest);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, dest);
break;
}
case OperandLocation::Constant:
masm.moveValue(loc.constant(), dest);
break;
case OperandLocation::PayloadReg:
masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
break;
case OperandLocation::PayloadStack: {
Address addr = payloadAddress(masm, &loc);
masm.loadPtr(addr, dest.scratchReg());
masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
break;
}
case OperandLocation::DoubleReg: {
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), dest, fpscratch);
break;
}
case OperandLocation::Uninitialized:
MOZ_CRASH();
}
}
Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
currentOpRegs_.add(loc.payloadReg());
return loc.payloadReg();
case OperandLocation::ValueReg: {
// It's possible the value is still boxed: as an optimization, we unbox
// the first time we use a value as object.
ValueOperand val = loc.valueReg();
availableRegs_.add(val);
Register reg = val.scratchReg();
availableRegs_.take(reg);
masm.unboxNonDouble(val, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
currentOpRegs_.add(reg);
return reg;
}
case OperandLocation::PayloadStack: {
Register reg = allocateRegister(masm);
popPayload(masm, &loc, reg);
return reg;
}
case OperandLocation::ValueStack: {
// The value is on the stack, but boxed. If it's on top of the stack we
// unbox it and then remove it from the stack, else we just unbox.
Register reg = allocateRegister(masm);
if (loc.valueStack() == stackPushed_) {
masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
typedId.type());
masm.addToStackPtr(Imm32(
sizeof(js::Value)));
MOZ_ASSERT(stackPushed_ >=
sizeof(js::Value));
stackPushed_ -=
sizeof(js::Value);
}
else {
MOZ_ASSERT(loc.valueStack() < stackPushed_);
masm.unboxNonDouble(
Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
reg, typedId.type());
}
loc.setPayloadReg(reg, typedId.type());
return reg;
}
case OperandLocation::BaselineFrame: {
Register reg = allocateRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.unboxNonDouble(addr, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
return reg;
};
case OperandLocation::Constant: {
Value v = loc.constant();
Register reg = allocateRegister(masm);
if (v.isString()) {
masm.movePtr(ImmGCPtr(v.toString()), reg);
}
else if (v.isSymbol()) {
masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
}
else if (v.isBigInt()) {
masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
}
else if (v.isBoolean()) {
masm.movePtr(ImmWord(v.toBoolean() ? 1 : 0), reg);
}
else {
MOZ_CRASH(
"Unexpected Value");
}
loc.setPayloadReg(reg, v.extractNonDoubleType());
return reg;
}
case OperandLocation::DoubleReg:
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
MacroAssembler& masm, ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::Constant:
return loc.constant();
case OperandLocation::PayloadReg:
case OperandLocation::PayloadStack: {
JSValueType payloadType = loc.payloadType();
Register reg = useRegister(masm, TypedOperandId(val, payloadType));
return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
AnyRegister(reg));
}
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return TypedOrValueRegister(useValueRegister(masm, val));
case OperandLocation::DoubleReg:
return TypedOrValueRegister(MIRType::
Double,
AnyRegister(loc.doubleReg()));
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
Register reg = allocateRegister(masm);
loc.setPayloadReg(reg, typedId.type());
return reg;
}
ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
ValueOperand reg = allocateValueRegister(masm);
loc.setValueReg(reg);
return reg;
}
void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
// See if any operands are dead so we can reuse their registers. Note that
// we skip the input operands, as those are also used by failure paths, and
// we currently don't track those uses.
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
i++) {
if (!writer_.operandIsDead(i, currentInstruction_)) {
continue;
}
OperandLocation& loc = operandLocations_[i];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::ValueReg:
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::PayloadStack:
masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
break;
case OperandLocation::ValueStack:
masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
break;
case OperandLocation::Uninitialized:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
break;
}
loc.setUninitialized();
}
}
void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
// This should only be called when we are no longer using the operands,
// as we're discarding everything from the native stack. Set all operand
// locations to Uninitialized to catch bugs.
for (size_t i = 0; i < operandLocations_.length(); i++) {
operandLocations_[i].setUninitialized();
}
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
freePayloadSlots_.clear();
freeValueSlots_.clear();
}
Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
if (availableRegs_.empty()) {
freeDeadOperandLocations(masm);
}
if (availableRegs_.empty()) {
// Still no registers available, try to spill unused operands to
// the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
Register reg = loc.payloadReg();
if (currentOpRegs_.has(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break;
// We got a register, so break out of the loop.
}
if (loc.kind() == OperandLocation::ValueReg) {
ValueOperand reg = loc.valueReg();
if (currentOpRegs_.aliases(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break;
// Break out of the loop.
}
}
}
if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
Register reg = availableRegsAfterSpill_.takeAny();
masm.push(reg);
stackPushed_ +=
sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
availableRegs_.add(reg);
}
// At this point, there must be a free register.
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
Register reg = availableRegs_.takeAny();
currentOpRegs_.add(reg);
return reg;
}
void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
Register reg) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
// Fixed registers should be allocated first, to ensure they're
// still available.
MOZ_ASSERT(!currentOpRegs_.has(reg),
"Register is in use");
freeDeadOperandLocations(masm);
if (availableRegs_.has(reg)) {
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
// Register may be available only after spilling contents.
if (availableRegsAfterSpill_.has(reg)) {
availableRegsAfterSpill_.take(reg);
masm.push(reg);
stackPushed_ +=
sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
currentOpRegs_.add(reg);
return;
}
// The register must be used by some operand. Spill it to the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
if (loc.payloadReg() != reg) {
continue;
}
spillOperandToStackOrRegister(masm, &loc);
currentOpRegs_.add(reg);
return;
}
if (loc.kind() == OperandLocation::ValueReg) {
if (!loc.valueReg().aliases(reg)) {
continue;
}
ValueOperand valueReg = loc.valueReg();
spillOperandToStackOrRegister(masm, &loc);
availableRegs_.add(valueReg);
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
}
MOZ_CRASH(
"Invalid register");
}
void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
ValueOperand reg) {
#ifdef JS_NUNBOX32
allocateFixedRegister(masm, reg.payloadReg());
allocateFixedRegister(masm, reg.typeReg());
#else
allocateFixedRegister(masm, reg.valueReg());
#endif
}
#ifdef JS_NUNBOX32
// Possible miscompilation in clang-12 (bug 1689641)
MOZ_NEVER_INLINE
#endif
ValueOperand CacheRegisterAllocator::allocateValueRegister(
MacroAssembler& masm) {
#ifdef JS_NUNBOX32
Register reg1 = allocateRegister(masm);
Register reg2 = allocateRegister(masm);
return ValueOperand(reg1, reg2);
#else
Register reg = allocateRegister(masm);
return ValueOperand(reg);
#endif
}
bool CacheRegisterAllocator::init() {
if (!origInputLocations_.resize(writer_.numInputOperands())) {
return false;
}
if (!operandLocations_.resize(writer_.numOperandIds())) {
return false;
}
return true;
}
void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
// Registers not in availableRegs_ and not used by input operands are
// available after being spilled.
availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
GeneralRegisterSet::
Not(availableRegs_.set()),
GeneralRegisterSet::
Not(inputRegisterSet()));
}
void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
// If IC inputs alias each other, make sure they are stored in different
// locations so we don't have to deal with this complexity in the rest of
// the allocator.
//
// Note that this can happen in IonMonkey with something like |o.foo = o|
// or |o[i] = i|.
size_t numInputs = writer_.numInputOperands();
MOZ_ASSERT(origInputLocations_.length() == numInputs);
for (size_t i = 1; i < numInputs; i++) {
OperandLocation& loc1 = operandLocations_[i];
if (!loc1.isInRegister()) {
continue;
}
for (size_t j = 0; j < i; j++) {
OperandLocation& loc2 = operandLocations_[j];
if (!loc1.aliasesReg(loc2)) {
continue;
}
// loc1 and loc2 alias so we spill one of them. If one is a
// ValueReg and the other is a PayloadReg, we have to spill the
// PayloadReg: spilling the ValueReg instead would leave its type
// register unallocated on 32-bit platforms.
if (loc1.kind() == OperandLocation::ValueReg) {
spillOperandToStack(masm, &loc2);
}
else {
MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
spillOperandToStack(masm, &loc1);
break;
// Spilled loc1, so nothing else will alias it.
}
}
}
#ifdef DEBUG
assertValidState();
#endif
}
GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet()
const {
MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
AllocatableGeneralRegisterSet result;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(loc == origInputLocations_[i]);
switch (loc.kind()) {
case OperandLocation::PayloadReg:
result.addUnchecked(loc.payloadReg());
continue;
case OperandLocation::ValueReg:
result.addUnchecked(loc.valueReg());
continue;
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
continue;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH(
"Invalid kind");
}
return result.set();
}
JSValueType CacheRegisterAllocator::knownType(ValOperandId val)
const {
const OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return JSVAL_TYPE_UNKNOWN;
case OperandLocation::PayloadStack:
case OperandLocation::PayloadReg:
return loc.payloadType();
case OperandLocation::Constant:
return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
: loc.constant().extractNonDoubleType();
case OperandLocation::DoubleReg:
return JSVAL_TYPE_DOUBLE;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH(
"Invalid kind");
}
void CacheRegisterAllocator::initInputLocation(
size_t i,
const TypedOrValueRegister& reg) {
if (reg.hasValue()) {
initInputLocation(i, reg.valueReg());
}
else if (reg.typedReg().isFloat()) {
MOZ_ASSERT(reg.type() == MIRType::
Double);
initInputLocation(i, reg.typedReg().fpu());
}
else {
initInputLocation(i, reg.typedReg().gpr(),
ValueTypeFromMIRType(reg.type()));
}
}
void CacheRegisterAllocator::initInputLocation(
size_t i,
const ConstantOrRegister& value) {
if (value.constant()) {
initInputLocation(i, value.value());
}
else {
initInputLocation(i, value.reg());
}
}
void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
if (loc->kind() == OperandLocation::ValueReg) {
if (!freeValueSlots_.empty()) {
uint32_t stackPos = freeValueSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storeValue(loc->valueReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setValueStack(stackPos);
return;
}
stackPushed_ +=
sizeof(js::Value);
masm.pushValue(loc->valueReg());
loc->setValueStack(stackPushed_);
return;
}
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!freePayloadSlots_.empty()) {
uint32_t stackPos = freePayloadSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storePtr(loc->payloadReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setPayloadStack(stackPos, loc->payloadType());
return;
}
stackPushed_ +=
sizeof(uintptr_t);
masm.push(loc->payloadReg());
loc->setPayloadStack(stackPushed_, loc->payloadType());
}
void CacheRegisterAllocator::spillOperandToStackOrRegister(
MacroAssembler& masm, OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
// If enough registers are available, use them.
if (loc->kind() == OperandLocation::ValueReg) {
static const size_t BoxPieces =
sizeof(Value) /
sizeof(uintptr_t);
if (availableRegs_.set().size() >= BoxPieces) {
ValueOperand reg = availableRegs_.takeAnyValue();
masm.moveValue(loc->valueReg(), reg);
loc->setValueReg(reg);
return;
}
}
else {
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!availableRegs_.empty()) {
Register reg = availableRegs_.takeAny();
masm.movePtr(loc->payloadReg(), reg);
loc->setPayloadReg(reg, loc->payloadType());
return;
}
}
// Not enough registers available, spill to the stack.
spillOperandToStack(masm, loc);
}
void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
OperandLocation* loc,
Register dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >=
sizeof(uintptr_t));
// The payload is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->payloadStack() == stackPushed_) {
masm.pop(dest);
stackPushed_ -=
sizeof(uintptr_t);
}
else {
MOZ_ASSERT(loc->payloadStack() < stackPushed_);
masm.loadPtr(payloadAddress(masm, loc), dest);
masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
}
loc->setPayloadReg(dest, loc->payloadType());
}
Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
const OperandLocation* loc)
const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
}
Address CacheRegisterAllocator::payloadAddress(
MacroAssembler& masm,
const OperandLocation* loc)
const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
}
void CacheRegisterAllocator::popValue(MacroAssembler& masm,
OperandLocation* loc, ValueOperand dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >=
sizeof(js::Value));
// The Value is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->valueStack() == stackPushed_) {
masm.popValue(dest);
stackPushed_ -=
sizeof(js::Value);
}
else {
MOZ_ASSERT(loc->valueStack() < stackPushed_);
masm.loadValue(
Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
dest);
masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
}
loc->setValueReg(dest);
}
#ifdef DEBUG
void CacheRegisterAllocator::assertValidState()
const {
// Assert different operands don't have aliasing storage. We depend on this
// when spilling registers, for instance.
if (!JitOptions.fullDebugChecks) {
return;
}
for (size_t i = 0; i < operandLocations_.length(); i++) {
const auto& loc1 = operandLocations_[i];
if (loc1.isUninitialized()) {
continue;
}
for (size_t j = 0; j < i; j++) {
const auto& loc2 = operandLocations_[j];
if (loc2.isUninitialized()) {
continue;
}
MOZ_ASSERT(!loc1.aliasesReg(loc2));
}
}
}
#endif
bool OperandLocation::aliasesReg(
const OperandLocation& other)
const {
MOZ_ASSERT(&other !=
this);
switch (other.kind_) {
case PayloadReg:
return aliasesReg(other.payloadReg());
case ValueReg:
return aliasesReg(other.valueReg());
case PayloadStack:
case ValueStack:
case BaselineFrame:
case Constant:
case DoubleReg:
return false;
case Uninitialized:
break;
}
MOZ_CRASH(
"Invalid kind");
}
void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
bool shouldDiscardStack) {
size_t numInputOperands = origInputLocations_.length();
MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
for (size_t j = 0; j < numInputOperands; j++) {
const OperandLocation& dest = origInputLocations_[j];
OperandLocation& cur = operandLocations_[j];
if (dest == cur) {
continue;
}
auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
// We have a cycle if a destination register will be used later
// as source register. If that happens, just push the current value
// on the stack and later get it from there.
for (size_t k = j + 1; k < numInputOperands; k++) {
OperandLocation& laterSource = operandLocations_[k];
if (dest.aliasesReg(laterSource)) {
spillOperandToStack(masm, &laterSource);
}
}
if (dest.kind() == OperandLocation::ValueReg) {
// We have to restore a Value register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(cur.valueReg(), dest.valueReg());
continue;
case OperandLocation::PayloadReg:
masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
continue;
case OperandLocation::PayloadStack: {
Register scratch = dest.valueReg().scratchReg();
popPayload(masm, &cur, scratch);
masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
continue;
}
case OperandLocation::ValueStack:
popValue(masm, &cur, dest.valueReg());
continue;
case OperandLocation::DoubleReg:
masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
continue;
case OperandLocation::Constant:
case OperandLocation::BaselineFrame:
case OperandLocation::Uninitialized:
break;
}
}
else if (dest.kind() == OperandLocation::PayloadReg) {
// We have to restore a payload register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
dest.payloadType());
continue;
case OperandLocation::PayloadReg:
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
masm.mov(cur.payloadReg(), dest.payloadReg());
continue;
case OperandLocation::PayloadStack: {
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
popPayload(masm, &cur, dest.payloadReg());
continue;
}
case OperandLocation::ValueStack:
MOZ_ASSERT(stackPushed_ >=
sizeof(js::Value));
MOZ_ASSERT(cur.valueStack() <= stackPushed_);
MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
masm.unboxNonDouble(
Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
dest.payloadReg(), dest.payloadType());
continue;
case OperandLocation::Constant:
case OperandLocation::BaselineFrame:
case OperandLocation::DoubleReg:
case OperandLocation::Uninitialized:
break;
}
}
else if (dest.kind() == OperandLocation::Constant ||
dest.kind() == OperandLocation::BaselineFrame ||
dest.kind() == OperandLocation::DoubleReg) {
// Nothing to do.
continue;
}
MOZ_CRASH(
"Invalid kind");
}
for (
const SpilledRegister& spill : spilledRegs_) {
MOZ_ASSERT(stackPushed_ >=
sizeof(uintptr_t));
if (spill.stackPushed == stackPushed_) {
masm.pop(spill.reg);
stackPushed_ -=
sizeof(uintptr_t);
}
else {
MOZ_ASSERT(spill.stackPushed < stackPushed_);
masm.loadPtr(
Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
spill.reg);
}
}
if (shouldDiscardStack) {
discardStack(masm);
}
}
size_t CacheIRStubInfo::stubDataSize()
const {
size_t field = 0;
size_t size = 0;
while (
true) {
StubField::Type type = fieldType(field++);
if (type == StubField::Type::Limit) {
return size;
}
size += StubField::sizeInBytes(type);
}
}
template <
typename T>
static GCPtr<T>* AsGCPtr(
void* ptr) {
return static_cast<GCPtr<T>*>(ptr);
}
void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
uintptr_t oldWord,
uintptr_t newWord)
const {
MOZ_ASSERT(uintptr_t(stubData + offset) %
sizeof(uintptr_t) == 0);
uintptr_t* addr =
reinterpret_cast<uintptr_t*>(stubData + offset);
MOZ_ASSERT(*addr == oldWord);
*addr = newWord;
}
void CacheIRStubInfo::replaceStubRawValueBits(uint8_t* stubData,
uint32_t offset, uint64_t oldBits,
uint64_t newBits)
const {
MOZ_ASSERT(uint64_t(stubData + offset) %
sizeof(uint64_t) == 0);
uint64_t* addr =
reinterpret_cast<uint64_t*>(stubData + offset);
MOZ_ASSERT(*addr == oldBits);
*addr = newBits;
}
template <
class Stub, StubField::Type type>
typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
Stub* stub, uint32_t offset)
const {
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
MOZ_ASSERT(uintptr_t(stubData + offset) %
sizeof(uintptr_t) == 0);
using WrappedType =
typename MapStubFieldToType<type>::WrappedType;
return *
reinterpret_cast<WrappedType*>(stubData + offset);
}
#define INSTANTIATE_GET_STUB_FIELD(Type) \
template typename MapStubFieldToType<Type>::WrappedType& \
CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
uint32_t offset)
const;
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
#undef INSTANTIATE_GET_STUB_FIELD
template <
class Stub,
class T>
T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset)
const {
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
MOZ_ASSERT(uintptr_t(stubData + offset) %
sizeof(uintptr_t) == 0);
return *
reinterpret_cast<T**>(stubData + offset);
}
template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
uint32_t offset)
const;
template <StubField::Type type,
typename V>
static void InitWrappedPtr(
void* ptr, V val) {
using RawType =
typename MapStubFieldToType<type>::RawType;
using WrappedType =
typename MapStubFieldToType<type>::WrappedType;
auto* wrapped =
static_cast<WrappedType*>(ptr);
new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
}
static void InitWordStubField(StubField::Type type,
void* dest,
uintptr_t value) {
MOZ_ASSERT(StubField::sizeIsWord(type));
MOZ_ASSERT((uintptr_t(dest) %
sizeof(uintptr_t)) == 0,
"Unaligned stub field");
switch (type) {
case StubField::Type::RawInt32:
case StubField::Type::RawPointer:
case StubField::Type::AllocSite:
*
static_cast<uintptr_t*>(dest) = value;
break;
case StubField::Type::Shape:
InitWrappedPtr<StubField::Type::Shape>(dest, value);
break;
case StubField::Type::WeakShape:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
break;
case StubField::Type::WeakGetterSetter:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
break;
case StubField::Type::JSObject:
InitWrappedPtr<StubField::Type::JSObject>(dest, value);
break;
case StubField::Type::WeakObject:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
break;
case StubField::Type::Symbol:
InitWrappedPtr<StubField::Type::Symbol>(dest, value);
break;
case StubField::Type::String:
InitWrappedPtr<StubField::Type::String>(dest, value);
break;
case StubField::Type::WeakBaseScript:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
break;
case StubField::Type::JitCode:
InitWrappedPtr<StubField::Type::JitCode>(dest, value);
break;
case StubField::Type::Id:
AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
break;
case StubField::Type::RawInt64:
case StubField::Type::
Double:
case StubField::Type::Value:
case StubField::Type::Limit:
MOZ_CRASH(
"Invalid type");
}
}
static void InitInt64StubField(StubField::Type type,
void* dest,
uint64_t value) {
MOZ_ASSERT(StubField::sizeIsInt64(type));
MOZ_ASSERT((uintptr_t(dest) %
sizeof(uint64_t)) == 0,
"Unaligned stub field");
switch (type) {
case StubField::Type::RawInt64:
case StubField::Type::
Double:
*
static_cast<uint64_t*>(dest) = value;
break;
case StubField::Type::Value:
AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
break;
case StubField::Type::RawInt32:
case StubField::Type::RawPointer:
case StubField::Type::AllocSite:
case StubField::Type::Shape:
case StubField::Type::WeakShape:
case StubField::Type::WeakGetterSetter:
case StubField::Type::JSObject:
case StubField::Type::WeakObject:
case StubField::Type::Symbol:
case StubField::Type::String:
case StubField::Type::WeakBaseScript:
case StubField::Type::JitCode:
case StubField::Type::Id:
case StubField::Type::Limit:
MOZ_CRASH(
"Invalid type");
}
}
void CacheIRWriter::copyStubData(uint8_t* dest)
const {
MOZ_ASSERT(!failed());
for (
const StubField& field : stubFields_) {
if (field.sizeIsWord()) {
InitWordStubField(field.type(), dest, field.asWord());
dest +=
sizeof(uintptr_t);
}
else {
InitInt64StubField(field.type(), dest, field.asInt64());
dest +=
sizeof(uint64_t);
}
}
}
ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
const CacheIRStubInfo* info = stubInfo();
MOZ_ASSERT(info->makesGCCalls());
size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
AutoEnterOOMUnsafeRegion oomUnsafe;
void* newStubMem = newSpace.alloc(bytesNeeded);
if (!newStubMem) {
oomUnsafe.crash(
"ICCacheIRStub::clone");
}
ICCacheIRStub* newStub =
new (newStubMem) ICCacheIRStub(*
this);
const uint8_t* src = this->stubDataStart();
uint8_t* dest = newStub->stubDataStart();
// Because this can be called during sweeping when discarding JIT code, we
// have to lock the store buffer
gc::AutoLockStoreBuffer lock(rt);
uint32_t field = 0;
while (
true) {
StubField::Type type = info->fieldType(field);
if (type == StubField::Type::Limit) {
break;
// Done.
}
if (StubField::sizeIsWord(type)) {
const uintptr_t* srcField =
reinterpret_cast<
const uintptr_t*>(src);
InitWordStubField(type, dest, *srcField);
src +=
sizeof(uintptr_t);
dest +=
sizeof(uintptr_t);
}
else {
const uint64_t* srcField =
reinterpret_cast<
const uint64_t*>(src);
InitInt64StubField(type, dest, *srcField);
src +=
sizeof(uint64_t);
dest +=
sizeof(uint64_t);
}
field++;
}
return newStub;
}
template <
typename T>
static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
if constexpr (std::is_same_v<T, IonICStub>) {
// 'Weak' edges are traced strongly in IonICs.
return true;
}
else {
static_assert(std::is_same_v<T, ICCacheIRStub>);
return trc->traceWeakEdges();
}
}
template <
typename T>
void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
uint32_t field = 0;
size_t offset = 0;
while (
true) {
Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case Type::RawInt32:
case Type::RawPointer:
case Type::RawInt64:
case Type::
Double:
break;
case Type::Shape: {
// For CCW IC stubs, we can store same-zone but cross-compartment
// shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
// GC. Note: CacheIRWriter::writeShapeField asserts we never store
// cross-zone shapes.
GCPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::Shape>(stub, offset);
TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
"cacheir-shape");
break;
}
case Type::WeakShape:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
WeakHeapPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
if (shapeField) {
TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
"cacheir-weak-shape");
}
}
break;
case Type::WeakGetterSetter:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc,
&stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
"cacheir-weak-getter-setter");
}
break;
case Type::JSObject: {
TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
"cacheir-object");
break;
}
case Type::WeakObject:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
"cacheir-weak-object");
}
break;
case Type::Symbol:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
"cacheir-symbol");
break;
case Type::String:
TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
"cacheir-string");
break;
case Type::WeakBaseScript:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc,
&stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
"cacheir-weak-script");
}
break;
case Type::JitCode:
TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
"cacheir-jitcode");
break;
case Type::Id:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
"cacheir-id");
break;
case Type::Value:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
"cacheir-value");
break;
case Type::AllocSite: {
gc::AllocSite* site =
stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
site->trace(trc);
break;
}
case Type::Limit:
return;
// Done.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
const CacheIRStubInfo* stubInfo);
template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
const CacheIRStubInfo* stubInfo);
template <
typename T>
bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
// Trace all fields before returning because this stub can be traced again
// later through TraceBaselineStubFrame.
bool isDead =
false;
uint32_t field = 0;
size_t offset = 0;
while (
true) {
Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case Type::WeakShape: {
WeakHeapPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
auto r = TraceWeakEdge(trc, &shapeField,
"cacheir-weak-shape");
if (r.isDead()) {
isDead =
true;
}
break;
}
case Type::WeakObject: {
WeakHeapPtr<JSObject*>& objectField =
stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
auto r = TraceWeakEdge(trc, &objectField,
"cacheir-weak-object");
if (r.isDead()) {
isDead =
true;
}
break;
}
case Type::WeakBaseScript: {
WeakHeapPtr<BaseScript*>& scriptField =
stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
auto r = TraceWeakEdge(trc, &scriptField,
"cacheir-weak-script");
if (r.isDead()) {
isDead =
true;
}
break;
}
case Type::WeakGetterSetter: {
WeakHeapPtr<GetterSetter*>& getterSetterField =
stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
auto r = TraceWeakEdge(trc, &getterSetterField,
"cacheir-weak-getter-setter");
if (r.isDead()) {
isDead =
true;
}
break;
}
case Type::Limit:
// Done.
return !isDead;
case Type::RawInt32:
case Type::RawPointer:
case Type::Shape:
case Type::JSObject:
case Type::Symbol:
case Type::String:
case Type::JitCode:
case Type::Id:
case Type::AllocSite:
case Type::RawInt64:
case Type::Value:
case Type::
Double:
break;
// Skip non-weak fields.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
const CacheIRStubInfo* stubInfo);
template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
const CacheIRStubInfo* stubInfo);
bool CacheIRWriter::stubDataEquals(
const uint8_t* stubData)
const {
MOZ_ASSERT(!failed());
const uintptr_t* stubDataWords =
reinterpret_cast<
const uintptr_t*>(stubData);
for (
const StubField& field : stubFields_) {
if (field.sizeIsWord()) {
if (field.asWord() != *stubDataWords) {
return false;
}
stubDataWords++;
continue;
}
if (field.asInt64() != *
reinterpret_cast<
const uint64_t*>(stubDataWords)) {
return false;
}
stubDataWords +=
sizeof(uint64_t) /
sizeof(uintptr_t);
}
return true;
}
bool CacheIRWriter::stubDataEqualsIgnoring(
const uint8_t* stubData,
uint32_t ignoreOffset)
const {
MOZ_ASSERT(!failed());
uint32_t offset = 0;
for (
const StubField& field : stubFields_) {
if (offset != ignoreOffset) {
if (field.sizeIsWord()) {
uintptr_t raw = *
reinterpret_cast<
const uintptr_t*>(stubData + offset);
if (field.asWord() != raw) {
return false;
}
}
else {
uint64_t raw = *
reinterpret_cast<
const uint64_t*>(stubData + offset);
if (field.asInt64() != raw) {
return false;
}
}
}
offset += StubField::sizeInBytes(field.type());
}
return true;
}
HashNumber CacheIRStubKey::hash(
const CacheIRStubKey::Lookup& l) {
HashNumber hash = mozilla::HashBytes(l.code, l.length);
hash = mozilla::AddToHash(hash, uint32_t(l.kind));
hash = mozilla::AddToHash(hash, uint32_t(l.engine));
return hash;
}
bool CacheIRStubKey::match(
const CacheIRStubKey& entry,
const CacheIRStubKey::Lookup& l) {
if (entry.stubInfo->kind() != l.kind) {
return false;
}
if (entry.stubInfo->engine() != l.engine) {
return false;
}
if (entry.stubInfo->codeLength() != l.length) {
return false;
}
if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
return false;
}
return true;
}
CacheIRReader::CacheIRReader(
const CacheIRStubInfo* stubInfo)
: CacheIRReader(stubInfo->code(),
stubInfo->code() + stubInfo->codeLength()) {}
CacheIRStubInfo* CacheIRStubInfo::
New(CacheKind kind, ICStubEngine engine,
bool makesGCCalls,
uint32_t stubDataOffset,
const CacheIRWriter& writer) {
size_t numStubFields = writer.numStubFields();
size_t bytesNeeded =
sizeof(CacheIRStubInfo) + writer.codeLength() +
(numStubFields + 1);
// +1 for the GCType::Limit terminator.
uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
if (!p) {
return nullptr;
}
// Copy the CacheIR code.
uint8_t* codeStart = p +
sizeof(CacheIRStubInfo);
mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
static_assert(
sizeof(StubField::Type) ==
sizeof(uint8_t),
"StubField::Type must fit in uint8_t");
// Copy the stub field types.
uint8_t* fieldTypes = codeStart + writer.codeLength();
for (size_t i = 0; i < numStubFields; i++) {
fieldTypes[i] = uint8_t(writer.stubFieldType(i));
}
fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
writer.codeLength());
}
bool OperandLocation::
operator==(
const OperandLocation& other)
const {
if (kind_ != other.kind_) {
return false;
}
switch (kind()) {
case Uninitialized:
return true;
case PayloadReg:
return payloadReg() == other.payloadReg() &&
payloadType() == other.payloadType();
case ValueReg:
return valueReg() == other.valueReg();
case PayloadStack:
return payloadStack() == other.payloadStack() &&
payloadType() == other.payloadType();
case ValueStack:
return valueStack() == other.valueStack();
case BaselineFrame:
return baselineFrameSlot() == other.baselineFrameSlot();
case Constant:
return constant() == other.constant();
case DoubleReg:
return doubleReg() == other.doubleReg();
}
MOZ_CRASH(
"Invalid OperandLocation kind");
}
AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
: output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
if (output_.hasValue()) {
alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
}
else if (!output_.typedReg().isFloat()) {
alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
}
}
AutoOutputRegister::~AutoOutputRegister() {
if (output_.hasValue()) {
alloc_.releaseValueRegister(output_.valueReg());
}
else if (!output_.typedReg().isFloat()) {
alloc_.releaseRegister(output_.typedReg().gpr());
}
}
bool FailurePath::canShareFailurePath(
const FailurePath& other)
const {
if (stackPushed_ != other.stackPushed_) {
return false;
}
if (spilledRegs_.length() != other.spilledRegs_.length()) {
return false;
}
for (size_t i = 0; i < spilledRegs_.length(); i++) {
if (spilledRegs_[i] != other.spilledRegs_[i]) {
return false;
}
}
MOZ_ASSERT(inputs_.length() == other.inputs_.length());
for (size_t i = 0; i < inputs_.length(); i++) {
if (inputs_[i] != other.inputs_[i]) {
return false;
}
}
return true;
}
bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
#ifdef DEBUG
allocator.setAddedFailurePath();
#endif
MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
FailurePath newFailure;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
if (!newFailure.appendInput(allocator.operandLocation(i))) {
return false;
}
}
if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
return false;
}
newFailure.setStackPushed(allocator.stackPushed());
// Reuse the previous failure path if the current one is the same, to
// avoid emitting duplicate code.
if (failurePaths.length() > 0 &&
failurePaths.back().canShareFailurePath(newFailure)) {
*failure = &failurePaths.back();
return true;
}
if (!failurePaths.append(std::move(newFailure))) {
return false;
}
*failure = &failurePaths.back();
return true;
}
bool CacheIRCompiler::emitFailurePath(size_t index) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
FailurePath& failure = failurePaths[index];
allocator.setStackPushed(failure.stackPushed());
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
allocator.setOperandLocation(i, failure.input(i));
}
if (!allocator.setSpilledRegs(failure.spilledRegs())) {
return false;
}
masm.bind(failure.label());
allocator.restoreInputState(masm);
return true;
}
bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
// Doubles and ints are numbers!
if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestObject(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label success;
masm.branchTestNull(Assembler::Equal, input, &success);
masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
masm.bind(&success);
return true;
}
bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_NULL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestNull(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_UNDEFINED) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
ValueOperand val = allocator.useValueRegister(masm, valId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
Register input =
allocator.useRegister(masm, BooleanOperandId(inputId.id()));
masm.move32(input, output);
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.fallibleUnboxBoolean(input, output, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestString(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestGCThing(Assembler::Equal, input, failure->label());
return true;
}
// Infallible |emitDouble| emitters can use this implementation to avoid
// generating extra clean-up instructions to restore the scratch float register.
// To select this function simply omit the |Label* fail| parameter for the
// emitter lambda function.
template <
typename EmitDouble>
static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
void>
EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
ValueOperand input, FailurePath* failure,
EmitDouble emitDouble) {
AutoScratchFloatRegister floatReg(compiler);
masm.unboxDouble(input, floatReg);
emitDouble(floatReg.get());
}
template <
typename EmitDouble>
static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
void>
EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
ValueOperand input, FailurePath* failure,
EmitDouble emitDouble) {
AutoScratchFloatRegister floatReg(compiler, failure);
masm.unboxDouble(input, floatReg);
emitDouble(floatReg.get(), floatReg.failure());
}
template <
typename EmitInt32,
typename EmitDouble>
static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
MacroAssembler& masm, ValueOperand input,
Register output, FailurePath* failure,
EmitInt32 emitInt32, EmitDouble emitDouble) {
Label done;
{
ScratchTagScope tag(masm, input);
masm.splitTagForTest(input, tag);
Label notInt32;
masm.branchTestInt32(Assembler::NotEqual, tag, ¬Int32);
{
ScratchTagScopeRelease _(&tag);
masm.unboxInt32(input, output);
emitInt32();
masm.jump(&done);
}
masm.bind(¬Int32);
masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
{
ScratchTagScopeRelease _(&tag);
EmitGuardDouble(compiler, masm, input, failure, emitDouble);
}
}
masm.bind(&done);
}
bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
masm.move32(input, output);
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitGuardInt32OrDouble(
this, masm, input, output, failure,
[]() {
// No-op if the value is already an int32.
},
[&](FloatRegister floatReg, Label* fail) {
// ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
masm.convertDoubleToInt32(floatReg, output, fail,
false);
});
return true;
}
bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
masm.move32SignExtendToPtr(input, output);
return true;
}
bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
bool supportOOB,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen,
"%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure = nullptr;
if (!supportOOB) {
if (!addFailurePath(&failure)) {
return false;
}
}
AutoScratchFloatRegister floatReg(
this, failure);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
// ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
if (supportOOB) {
Label done, fail;
masm.convertDoubleToPtr(floatReg, output, &fail,
false);
masm.jump(&done);
// Substitute the invalid index with an arbitrary out-of-bounds index.
masm.bind(&fail);
masm.movePtr(ImmWord(-1), output);
masm.bind(&done);
}
else {
masm.convertDoubleToPtr(floatReg, output, floatReg.failure(),
false);
}
return true;
}
static void TruncateDoubleModUint32(MacroAssembler& masm,
FloatRegister floatReg,
Register result,
const LiveRegisterSet& liveVolatileRegs) {
Label truncateABICall;
masm.branchTruncateDoubleMaybeModUint32(floatReg, result, &truncateABICall);
if (truncateABICall.used()) {
Label done;
masm.jump(&done);
--> --------------------
--> maximum size reached
--> --------------------