/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* Everything needed to build actual MIR instructions: the actual opcodes and
* instructions, the instruction interface, and use chains.
*/
#ifndef jit_MIR_h
#define jit_MIR_h
#include "mozilla/Array.h"
#include "mozilla/HashFunctions.h"
#ifdef JS_JITSPEW
# include
"mozilla/Attributes.h" // MOZ_STACK_CLASS
#endif
#include "mozilla/MacroForEach.h"
#ifdef JS_JITSPEW
# include
"mozilla/Sprintf.h"
# include
"mozilla/Vector.h"
#endif
#include <algorithm>
#include <initializer_list>
#include "NamespaceImports.h"
#include "jit/AtomicOp.h"
#include "jit/FixedList.h"
#include "jit/InlineList.h"
#include "jit/JitAllocPolicy.h"
#include "jit/MacroAssembler.h"
#include "jit/MIROpsGenerated.h"
#include "jit/ShuffleAnalysis.h"
#include "jit/TypeData.h"
#include "jit/TypePolicy.h"
#include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}Op, JSJitInfo
#include "js/HeapAPI.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "js/Value.h"
#include "js/Vector.h"
#include "util/DifferentialTesting.h"
#include "vm/BigIntType.h"
#include "vm/EnvironmentObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/JSContext.h"
#include "vm/RegExpObject.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmJS.h" // for WasmInstanceObject
namespace JS {
struct ExpandoAndGeneration;
}
namespace js {
namespace wasm {
class FuncExport;
extern uint32_t MIRTypeToABIResultSize(jit::MIRType);
}
// namespace wasm
class JS_PUBLIC_API GenericPrinter;
class NativeIteratorListHead;
class StringObject;
enum class UnaryMathFunction : uint8_t;
bool CurrentThreadIsIonCompiling();
namespace jit {
class CallInfo;
#ifdef JS_JITSPEW
// Helper for debug printing. Avoids creating a MIR.h <--> MIRGraph.h cycle.
// Implementation of this needs to see inside `MBasicBlock`; that is possible
// in MIR.cpp since it also includes MIRGraph.h, whereas this file does not.
class MBasicBlock;
uint32_t GetMBasicBlockId(
const MBasicBlock* block);
// Helper class for debug printing. This class allows `::getExtras` methods
// to add strings to be printed, on a per-MIR-node basis. The strings are
// copied into storage owned by this class when `::add` is called, so the
// `::getExtras` methods do not need to be concerned about storage management.
class MOZ_STACK_CLASS ExtrasCollector {
mozilla::Vector<UniqueChars, 4> strings_;
public:
// Add `str` to the collection. A copy, owned by this object, is made. In
// case of OOM the call has no effect.
void add(
const char* str) {
UniqueChars dup = DuplicateString(str);
if (dup) {
(
void)strings_.append(std::move(dup));
}
}
size_t count()
const {
return strings_.length(); }
UniqueChars get(size_t ix) {
return std::move(strings_[ix]); }
};
#endif
// Forward declarations of MIR types.
#define FORWARD_DECLARE(op)
class M
##op;
MIR_OPCODE_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
// MDefinition visitor which ignores non-overloaded visit functions.
class MDefinitionVisitorDefaultNoop {
public:
#define VISIT_INS(op) \
void visit
##op(M
##op*) {}
MIR_OPCODE_LIST(VISIT_INS)
#undef VISIT_INS
};
class BytecodeSite;
class CompactBufferWriter;
class Range;
#define MIR_FLAG_LIST(_) \
_(InWorklist) \
_(EmittedAtUses) \
_(Commutative) \
_(Movable)
/* Allow passes like LICM to move this instruction */ \
_(Lowered)
/* (Debug only) has a virtual register */ \
_(Guard)
/* Not removable if uses == 0 */ \
\
/* Flag an instruction to be considered as a Guard if the instructions \
* bails out on some inputs. \
* \
* Some optimizations can replace an instruction, and leave its operands \
* unused. When the type information of the operand got used as a \
* predicate of the transformation, then we have to flag the operands as \
* GuardRangeBailouts. \
* \
* This flag prevents further optimization of instructions, which \
* might remove the run-time checks (bailout conditions) used as a \
* predicate of the previous transformation. \
*/
_(GuardRangeBailouts) \
\
/* Some instructions have uses that aren't directly represented in the \
* graph, and need to be handled specially. As an example, this is used to \
* keep the flagged instruction in resume points, not substituting with an \
* UndefinedValue. This can be used by call inlining when a function \
* argument is not used by the inlined instructions. It is also used \
* to annotate instructions which were used in removed branches. \
*/
_(ImplicitlyUsed) \
\
/* The instruction has been marked dead for lazy removal from resume \
* points. \
*/
_(Unused) \
\
/* Marks if the current instruction should go to the bailout paths instead \
* of producing code as part of the control flow. This flag can only be set \
* on instructions which are only used by ResumePoint or by other flagged \
* instructions. \
*/
_(RecoveredOnBailout) \
\
/* Some instructions might represent an object, but the memory of these \
* objects might be incomplete if we have not recovered all the stores which \
* were supposed to happen before. This flag is used to annotate \
* instructions which might return a pointer to a memory area which is not \
* yet fully initialized. This flag is used to ensure that stores are \
* executed before returning the value. \
*/
_(IncompleteObject) \
\
/* For WebAssembly, there are functions with multiple results. Instead of \
* having the results defined by one call instruction, they are instead \
* captured in subsequent result capture instructions, because modelling \
* multi-value results in Ion is too complicated. However since they \
* capture ambient live registers, it would be an error to move an unrelated \
* instruction between the call and the result capture. This flag is used \
* to prevent code motion from moving instructions in invalid ways. \
*/
_(CallResultCapture) \
\
/* The current instruction got discarded from the MIR Graph. This is useful \
* when we want to iterate over resume points and instructions, while \
* handling instructions which are discarded without reporting to the \
* iterator. \
*/
_(Discarded)
class MDefinition;
class MInstruction;
class MBasicBlock;
class MNode;
class MUse;
class MPhi;
class MIRGraph;
class MResumePoint;
class MControlInstruction;
// Represents a use of a node.
class MUse :
public TempObject,
public InlineListNode<MUse> {
// Grant access to setProducerUnchecked.
friend class MDefinition;
friend class MPhi;
MDefinition* producer_;
// MDefinition that is being used.
MNode* consumer_;
// The node that is using this operand.
// Low-level unchecked edit method for replaceAllUsesWith and
// MPhi::removeOperand. This doesn't update use lists!
// replaceAllUsesWith and MPhi::removeOperand do that manually.
void setProducerUnchecked(MDefinition* producer) {
MOZ_ASSERT(consumer_);
MOZ_ASSERT(producer_);
MOZ_ASSERT(producer);
producer_ = producer;
}
public:
// Default constructor for use in vectors.
MUse() : producer_(nullptr), consumer_(nullptr) {}
// Move constructor for use in vectors. When an MUse is moved, it stays
// in its containing use list.
MUse(MUse&& other)
: InlineListNode<MUse>(std::move(other)),
producer_(other.producer_),
consumer_(other.consumer_) {}
// Construct an MUse initialized with |producer| and |consumer|.
MUse(MDefinition* producer, MNode* consumer) {
initUnchecked(producer, consumer);
}
// Set this use, which was previously clear.
inline void init(MDefinition* producer, MNode* consumer);
// Like init, but works even when the use contains uninitialized data.
inline void initUnchecked(MDefinition* producer, MNode* consumer);
// Like initUnchecked, but set the producer to nullptr.
inline void initUncheckedWithoutProducer(MNode* consumer);
// Set this use, which was not previously clear.
inline void replaceProducer(MDefinition* producer);
// Clear this use.
inline void releaseProducer();
MDefinition* producer()
const {
MOZ_ASSERT(producer_ != nullptr);
return producer_;
}
bool hasProducer()
const {
return producer_ != nullptr; }
MNode* consumer()
const {
MOZ_ASSERT(consumer_ != nullptr);
return consumer_;
}
#ifdef DEBUG
// Return the operand index of this MUse in its consumer. This is DEBUG-only
// as normal code should instead call indexOf on the cast consumer directly,
// to allow it to be devirtualized and inlined.
size_t index()
const;
#endif
};
using MUseIterator = InlineList<MUse>::iterator;
// A node is an entry in the MIR graph. It has two kinds:
// MInstruction: an instruction which appears in the IR stream.
// MResumePoint: a list of instructions that correspond to the state of the
// interpreter/Baseline stack.
//
// Nodes can hold references to MDefinitions. Each MDefinition has a list of
// nodes holding such a reference (its use chain).
class MNode :
public TempObject {
protected:
enum class Kind { Definition = 0, ResumePoint };
private:
static const uintptr_t KindMask = 0x1;
uintptr_t blockAndKind_;
Kind kind()
const {
return Kind(blockAndKind_ & KindMask); }
protected:
explicit MNode(
const MNode& other) : blockAndKind_(other.blockAndKind_) {}
MNode(MBasicBlock* block, Kind kind) { setBlockAndKind(block, kind); }
void setBlockAndKind(MBasicBlock* block, Kind kind) {
blockAndKind_ = uintptr_t(block) | uintptr_t(kind);
MOZ_ASSERT(this->block() == block);
}
MBasicBlock* definitionBlock()
const {
MOZ_ASSERT(isDefinition());
static_assert(
unsigned(Kind::Definition) == 0,
"Code below relies on low bit being 0");
return reinterpret_cast<MBasicBlock*>(blockAndKind_);
}
MBasicBlock* resumePointBlock()
const {
MOZ_ASSERT(isResumePoint());
static_assert(
unsigned(Kind::ResumePoint) == 1,
"Code below relies on low bit being 1");
// Use a subtraction: if the caller does block()->foo, the compiler
// will be able to fold it with the load.
return reinterpret_cast<MBasicBlock*>(blockAndKind_ - 1);
}
public:
// Returns the definition at a given operand.
virtual MDefinition* getOperand(size_t index)
const = 0;
virtual size_t numOperands()
const = 0;
virtual size_t indexOf(
const MUse* u)
const = 0;
bool isDefinition()
const {
return kind() == Kind::Definition; }
bool isResumePoint()
const {
return kind() == Kind::ResumePoint; }
MBasicBlock* block()
const {
return reinterpret_cast<MBasicBlock*>(blockAndKind_ & ~KindMask);
}
MBasicBlock* caller()
const;
// Sets an already set operand, updating use information. If you're looking
// for setOperand, this is probably what you want.
virtual void replaceOperand(size_t index, MDefinition* operand) = 0;
// Resets the operand to an uninitialized state, breaking the link
// with the previous operand's producer.
void releaseOperand(size_t index) { getUseFor(index)->releaseProducer(); }
bool hasOperand(size_t index)
const {
return getUseFor(index)->hasProducer();
}
inline MDefinition* toDefinition();
inline MResumePoint* toResumePoint();
[[nodiscard]]
virtual bool writeRecoverData(
CompactBufferWriter& writer)
const;
#ifdef JS_JITSPEW
virtual void dump(GenericPrinter& out)
const = 0;
virtual void dump()
const = 0;
#endif
protected:
// Need visibility on getUseFor to avoid O(n^2) complexity.
friend void AssertBasicGraphCoherency(MIRGraph& graph,
bool force);
// Gets the MUse corresponding to given operand.
virtual MUse* getUseFor(size_t index) = 0;
virtual const MUse* getUseFor(size_t index)
const = 0;
};
class AliasSet {
private:
uint32_t flags_;
public:
enum Flag {
None_ = 0,
ObjectFields = 1 << 0,
// shape, class, slots, length etc.
Element = 1 << 1,
// A Value member of obj->elements or
// a typed object.
UnboxedElement = 1 << 2,
// An unboxed scalar or reference member of
// typed object.
DynamicSlot = 1 << 3,
// A Value member of obj->slots.
FixedSlot = 1 << 4,
// A Value member of obj->fixedSlots().
DOMProperty = 1 << 5,
// A DOM property
WasmInstanceData = 1 << 6,
// An asm.js/wasm private global var
WasmHeap = 1 << 7,
// An asm.js/wasm heap load
WasmHeapMeta = 1 << 8,
// The asm.js/wasm heap base pointer and
// bounds check limit, in Instance.
ArrayBufferViewLengthOrOffset =
1 << 9,
// An array buffer view's length or byteOffset
WasmGlobalCell = 1 << 10,
// A wasm global cell
WasmTableElement = 1 << 11,
// An element of a wasm table
WasmTableMeta = 1 << 12,
// A wasm table elements pointer and
// length field, in instance data.
WasmStackResult = 1 << 13,
// A stack result from the current function
// JSContext's exception state. This is used on instructions like MThrow
// or MNewArrayDynamicLength that throw exceptions (other than OOM) but have
// no other side effect, to ensure that they get their own up-to-date resume
// point. (This resume point will be used when constructing the Baseline
// frame during exception bailouts.)
ExceptionState = 1 << 14,
// Used for instructions that load the privateSlot of DOM proxies and
// the ExpandoAndGeneration.
DOMProxyExpando = 1 << 15,
// Hash table of a Map or Set object.
MapOrSetHashTable = 1 << 16,
// Internal state of the random number generator
RNG = 1 << 17,
// The pendingException slot on the wasm instance object.
WasmPendingException = 1 << 18,
// The fuzzilliHash slot
FuzzilliHash = 1 << 19,
// The WasmStructObject::inlineData_[..] storage area
WasmStructInlineDataArea = 1 << 20,
// The WasmStructObject::outlineData_ pointer only
WasmStructOutlineDataPointer = 1 << 21,
// The malloc'd block that WasmStructObject::outlineData_ points at
WasmStructOutlineDataArea = 1 << 22,
// The WasmArrayObject::numElements_ field
WasmArrayNumElements = 1 << 23,
// The WasmArrayObject::data_ pointer only
WasmArrayDataPointer = 1 << 24,
// The malloc'd block that WasmArrayObject::data_ points at
WasmArrayDataArea = 1 << 25,
// The generation counter associated with the global object
GlobalGenerationCounter = 1 << 26,
// The SharedArrayRawBuffer::length field.
SharedArrayRawBufferLength = 1 << 27,
Last = SharedArrayRawBufferLength,
Any = Last | (Last - 1),
NumCategories = 28,
// Indicates load or store.
Store_ = 1 << 31
};
static_assert((1 << NumCategories) - 1 == Any,
"NumCategories must include all flags present in Any");
explicit AliasSet(uint32_t flags) : flags_(flags) {}
public:
inline bool isNone()
const {
return flags_ == None_; }
uint32_t flags()
const {
return flags_ & Any; }
inline bool isStore()
const {
return !!(flags_ & Store_); }
inline bool isLoad()
const {
return !isStore() && !isNone(); }
inline AliasSet
operator|(
const AliasSet& other)
const {
return AliasSet(flags_ | other.flags_);
}
inline AliasSet
operator&(
const AliasSet& other)
const {
return AliasSet(flags_ & other.flags_);
}
inline AliasSet
operator~()
const {
return AliasSet(~flags_); }
static AliasSet None() {
return AliasSet(None_); }
static AliasSet Load(uint32_t flags) {
MOZ_ASSERT(flags && !(flags & Store_));
return AliasSet(flags);
}
static AliasSet Store(uint32_t flags) {
MOZ_ASSERT(flags && !(flags & Store_));
return AliasSet(flags | Store_);
}
};
using MDefinitionVector = Vector<MDefinition*, 6, JitAllocPolicy>;
using MInstructionVector = Vector<MInstruction*, 6, JitAllocPolicy>;
// When a floating-point value is used by nodes which would prefer to
// receive integer inputs, we may be able to help by computing our result
// into an integer directly.
//
// A value can be truncated in 4 differents ways:
// 1. Ignore Infinities (x / 0 --> 0).
// 2. Ignore overflow (INT_MIN / -1 == (INT_MAX + 1) --> INT_MIN)
// 3. Ignore negative zeros. (-0 --> 0)
// 4. Ignore remainder. (3 / 4 --> 0)
//
// Indirect truncation is used to represent that we are interested in the
// truncated result, but only if it can safely flow into operations which
// are computed modulo 2^32, such as (2) and (3). Infinities are not safe,
// as they would have absorbed other math operations. Remainders are not
// safe, as fractions can be scaled up by multiplication.
//
// Division is a particularly interesting node here because it covers all 4
// cases even when its own operands are integers.
//
// Note that these enum values are ordered from least value-modifying to
// most value-modifying, and code relies on this ordering.
enum class TruncateKind {
// No correction.
NoTruncate = 0,
// An integer is desired, but we can't skip bailout checks.
TruncateAfterBailouts = 1,
// The value will be truncated after some arithmetic (see above).
IndirectTruncate = 2,
// Direct and infallible truncation to int32.
Truncate = 3
};
// An MDefinition is an SSA name.
class MDefinition :
public MNode {
friend class MBasicBlock;
public:
enum class Opcode : uint16_t {
#define DEFINE_OPCODES(op) op,
MIR_OPCODE_LIST(DEFINE_OPCODES)
#undef DEFINE_OPCODES
};
private:
InlineList<MUse> uses_;
// Use chain.
uint32_t id_;
// Instruction ID, which after block re-ordering
// is sorted within a basic block.
Opcode op_;
// Opcode.
uint16_t flags_;
// Bit flags.
Range* range_;
// Any computed range for this def.
union {
MDefinition*
loadDependency_;
// Implicit dependency (store, call, etc.) of this
// instruction. Used by alias analysis, GVN and LICM.
uint32_t virtualRegister_;
// Used by lowering to map definitions to
// virtual registers.
};
// Track bailouts by storing the current pc in MIR instruction. Also used
// for profiling and keeping track of what the last known pc was.
const BytecodeSite* trackedSite_;
// If we generate a bailout path for this instruction, this is the
// bailout kind that will be encoded in the snapshot. When we bail out,
// FinishBailoutToBaseline may take action based on the bailout kind to
// prevent bailout loops. (For example, if an instruction bails out after
// being hoisted by LICM, we will disable LICM when recompiling the script.)
BailoutKind bailoutKind_;
MIRType resultType_;
// Representation of result type.
private:
enum Flag {
None = 0,
#define DEFINE_FLAG(flag) flag,
MIR_FLAG_LIST(DEFINE_FLAG)
#undef DEFINE_FLAG
Total
};
bool hasFlags(uint32_t flags)
const {
return (flags_ & flags) == flags; }
void removeFlags(uint32_t flags) { flags_ &= ~flags; }
void setFlags(uint32_t flags) { flags_ |= flags; }
// Calling isDefinition or isResumePoint on MDefinition is unnecessary.
bool isDefinition()
const =
delete;
bool isResumePoint()
const =
delete;
protected:
void setInstructionBlock(MBasicBlock* block,
const BytecodeSite* site) {
MOZ_ASSERT(isInstruction());
setBlockAndKind(block, Kind::Definition);
setTrackedSite(site);
}
void setPhiBlock(MBasicBlock* block) {
MOZ_ASSERT(isPhi());
setBlockAndKind(block, Kind::Definition);
}
static HashNumber addU32ToHash(HashNumber hash, uint32_t data) {
return data + (hash << 6) + (hash << 16) - hash;
}
static HashNumber addU64ToHash(HashNumber hash, uint64_t data) {
hash = addU32ToHash(hash, uint32_t(data));
hash = addU32ToHash(hash, uint32_t(data >> 32));
return hash;
}
public:
explicit MDefinition(Opcode op)
: MNode(nullptr, Kind::Definition),
id_(0),
op_(op),
flags_(0),
range_(nullptr),
loadDependency_(nullptr),
trackedSite_(nullptr),
bailoutKind_(BailoutKind::Unknown),
resultType_(MIRType::None) {}
// Copying a definition leaves the list of uses empty.
explicit MDefinition(
const MDefinition& other)
: MNode(other),
id_(0),
op_(other.op_),
flags_(other.flags_),
range_(other.range_),
loadDependency_(other.loadDependency_),
trackedSite_(other.trackedSite_),
bailoutKind_(other.bailoutKind_),
resultType_(other.resultType_) {}
Opcode op()
const {
return op_; }
#ifdef JS_JITSPEW
const char* opName()
const;
void printName(GenericPrinter& out)
const;
static void PrintOpcodeName(GenericPrinter& out, Opcode op);
virtual void printOpcode(GenericPrinter& out)
const;
void dump(GenericPrinter& out)
const override;
void dump()
const override;
void dumpLocation(GenericPrinter& out)
const;
void dumpLocation()
const;
// Dump any other stuff the node wants to have printed in `extras`. The
// added strings are copied, with the `ExtrasCollector` taking ownership of
// the copies.
virtual void getExtras(ExtrasCollector* extras)
const {}
#endif
// Also for LICM. Test whether this definition is likely to be a call, which
// would clobber all or many of the floating-point registers, such that
// hoisting floating-point constants out of containing loops isn't likely to
// be worthwhile.
virtual bool possiblyCalls()
const {
return false; }
MBasicBlock* block()
const {
return definitionBlock(); }
private:
void setTrackedSite(
const BytecodeSite* site) {
MOZ_ASSERT(site);
trackedSite_ = site;
}
public:
const BytecodeSite* trackedSite()
const {
MOZ_ASSERT(trackedSite_,
"missing tracked bytecode site; node not assigned to a block?");
return trackedSite_;
}
BailoutKind bailoutKind()
const {
return bailoutKind_; }
void setBailoutKind(BailoutKind kind) { bailoutKind_ = kind; }
// Return the range of this value, *before* any bailout checks. Contrast
// this with the type() method, and the Range constructor which takes an
// MDefinition*, which describe the value *after* any bailout checks.
//
// Warning: Range analysis is removing the bit-operations such as '| 0' at
// the end of the transformations. Using this function to analyse any
// operands after the truncate phase of the range analysis will lead to
// errors. Instead, one should define the collectRangeInfoPreTrunc() to set
// the right set of flags which are dependent on the range of the inputs.
Range* range()
const {
MOZ_ASSERT(type() != MIRType::None);
return range_;
}
void setRange(Range* range) {
MOZ_ASSERT(type() != MIRType::None);
range_ = range;
}
virtual HashNumber valueHash()
const;
virtual bool congruentTo(
const MDefinition* ins)
const {
return false; }
const MDefinition* skipObjectGuards()
const;
// Note that, for a call `congruentIfOperandsEqual(ins)` inside some class
// MFoo, if `true` is returned then we are ensured that `ins` is also an
// MFoo, so it is safe to do `ins->toMFoo()` without first checking whether
// `ins->isMFoo()`.
bool congruentIfOperandsEqual(
const MDefinition* ins)
const;
virtual MDefinition* foldsTo(TempAllocator& alloc);
virtual void analyzeEdgeCasesForward();
virtual void analyzeEdgeCasesBackward();
// |canTruncate| reports if this instruction supports truncation. If
// |canTruncate| function returns true, then the |truncate| function is
// called on the same instruction to mutate the instruction, such as updating
// the return type, the range and the specialization of the instruction.
virtual bool canTruncate()
const;
virtual void truncate(TruncateKind kind);
// Determine what kind of truncate this node prefers for the operand at the
// given index.
virtual TruncateKind operandTruncateKind(size_t index)
const;
// Compute an absolute or symbolic range for the value of this node.
virtual void computeRange(TempAllocator& alloc) {}
// Collect information from the pre-truncated ranges.
virtual void collectRangeInfoPreTrunc() {}
uint32_t id()
const {
MOZ_ASSERT(block());
return id_;
}
void setId(uint32_t id) { id_ = id; }
#define FLAG_ACCESSOR(flag) \
bool is
##flag()
const { \
static_assert(Flag::Total <=
sizeof(flags_) * 8, \
"Flags should fit in flags_ field"); \
return hasFlags(1 << flag); \
} \
void set
##flag() { \
MOZ_ASSERT(!hasFlags(1 << flag)); \
setFlags(1 << flag); \
} \
void setNot
##flag() { \
MOZ_ASSERT(hasFlags(1 << flag)); \
removeFlags(1 << flag); \
} \
void set
##flag
##Unchecked() { setFlags(1 << flag); } \
void setNot
##flag
##Unchecked() { removeFlags(1 << flag); }
MIR_FLAG_LIST(FLAG_ACCESSOR)
#undef FLAG_ACCESSOR
// Return the type of this value. This may be speculative, and enforced
// dynamically with the use of bailout checks. If all the bailout checks
// pass, the value will have this type.
//
// Unless this is an MUrsh that has bailouts disabled, which, as a special
// case, may return a value in (INT32_MAX,UINT32_MAX] even when its type()
// is MIRType::Int32.
MIRType type()
const {
return resultType_; }
bool mightBeType(MIRType type)
const {
MOZ_ASSERT(type != MIRType::Value);
if (type == this->type()) {
return true;
}
if (this->type() == MIRType::Value) {
return true;
}
return false;
}
bool mightBeMagicType()
const;
// Return true if the result-set types are a subset of the given types.
bool definitelyType(std::initializer_list<MIRType> types)
const;
// Float32 specialization operations (see big comment in IonAnalysis before
// the Float32 specialization algorithm).
virtual bool isFloat32Commutative()
const {
return false; }
virtual bool canProduceFloat32()
const {
return false; }
virtual bool canConsumeFloat32(MUse* use)
const {
return false; }
virtual void trySpecializeFloat32(TempAllocator& alloc) {}
#ifdef DEBUG
// Used during the pass that checks that Float32 flow into valid MDefinitions
virtual bool isConsistentFloat32Use(MUse* use)
const {
return type() == MIRType::Float32 || canConsumeFloat32(use);
}
#endif
// Returns the beginning of this definition's use chain.
MUseIterator usesBegin()
const {
return uses_.begin(); }
// Returns the end of this definition's use chain.
MUseIterator usesEnd()
const {
return uses_.end(); }
bool canEmitAtUses()
const {
return !isEmittedAtUses(); }
// Removes a use at the given position
void removeUse(MUse* use) { uses_.remove(use); }
#if defined(DEBUG) ||
defined(JS_JITSPEW)
// Number of uses of this instruction. This function is only available
// in DEBUG mode since it requires traversing the list. Most users should
// use hasUses() or hasOneUse() instead.
size_t useCount()
const;
// Number of uses of this instruction (only counting MDefinitions, ignoring
// MResumePoints). This function is only available in DEBUG mode since it
// requires traversing the list. Most users should use hasUses() or
// hasOneUse() instead.
size_t defUseCount()
const;
#endif
// Test whether this MDefinition has exactly one use.
bool hasOneUse()
const;
// Test whether this MDefinition has exactly one use.
// (only counting MDefinitions, ignoring MResumePoints)
bool hasOneDefUse()
const;
// Test whether this MDefinition has exactly one live use. (only counting
// MDefinitions which are not recovered on bailout and ignoring MResumePoints)
bool hasOneLiveDefUse()
const;
// Test whether this MDefinition has at least one use.
// (only counting MDefinitions, ignoring MResumePoints)
bool hasDefUses()
const;
// Test whether this MDefinition has at least one non-recovered use.
// (only counting MDefinitions, ignoring MResumePoints)
bool hasLiveDefUses()
const;
bool hasUses()
const {
return !uses_.empty(); }
// If this MDefinition has a single use (ignoring MResumePoints), returns that
// use's definition. Else returns nullptr.
MDefinition* maybeSingleDefUse()
const;
// Returns the most recently added use (ignoring MResumePoints) for this
// MDefinition. Returns nullptr if there are no uses. Note that this relies on
// addUse adding new uses to the front of the list, and should only be called
// during MIR building (before optimization passes make changes to the uses).
MDefinition* maybeMostRecentlyAddedDefUse()
const;
void addUse(MUse* use) {
MOZ_ASSERT(use->producer() ==
this);
uses_.pushFront(use);
}
void addUseUnchecked(MUse* use) {
MOZ_ASSERT(use->producer() ==
this);
uses_.pushFrontUnchecked(use);
}
void replaceUse(MUse* old, MUse* now) {
MOZ_ASSERT(now->producer() ==
this);
uses_.replace(old, now);
}
// Replace the current instruction by a dominating instruction |dom| in all
// uses of the current instruction.
void replaceAllUsesWith(MDefinition* dom);
// Like replaceAllUsesWith, but doesn't set ImplicitlyUsed on |this|'s
// operands.
void justReplaceAllUsesWith(MDefinition* dom);
// Replace the current instruction by an optimized-out constant in all uses
// of the current instruction. Note, that optimized-out constant should not
// be observed, and thus they should not flow in any computation.
[[nodiscard]]
bool optimizeOutAllUses(TempAllocator& alloc);
// Replace the current instruction by a dominating instruction |dom| in all
// instruction, but keep the current instruction for resume point and
// instruction which are recovered on bailouts.
void replaceAllLiveUsesWith(MDefinition* dom);
void setVirtualRegister(uint32_t vreg) {
virtualRegister_ = vreg;
setLoweredUnchecked();
}
uint32_t virtualRegister()
const {
MOZ_ASSERT(isLowered());
return virtualRegister_;
}
public:
// Opcode testing and casts.
template <
typename MIRType>
bool is()
const {
return op() == MIRType::classOpcode;
}
template <
typename MIRType>
MIRType* to() {
MOZ_ASSERT(this->is<MIRType>());
return static_cast<MIRType*>(
this);
}
template <
typename MIRType>
const MIRType* to()
const {
MOZ_ASSERT(this->is<MIRType>());
return static_cast<
const MIRType*>(
this);
}
#define OPCODE_CASTS(opcode) \
bool is
##opcode()
const {
return this->is<M
##opcode>(); } \
M
##opcode* to
##opcode() {
return this->to<M
##opcode>(); } \
const M
##opcode* to
##opcode()
const {
return this->to<M
##opcode>(); }
MIR_OPCODE_LIST(OPCODE_CASTS)
#undef OPCODE_CASTS
inline MConstant* maybeConstantValue();
inline MInstruction* toInstruction();
inline const MInstruction* toInstruction()
const;
bool isInstruction()
const {
return !isPhi(); }
virtual bool isControlInstruction()
const {
return false; }
inline MControlInstruction* toControlInstruction();
void setResultType(MIRType type) { resultType_ = type; }
virtual AliasSet getAliasSet()
const {
// Instructions are effectful by default.
return AliasSet::Store(AliasSet::Any);
}
#ifdef DEBUG
bool hasDefaultAliasSet()
const {
AliasSet set = getAliasSet();
return set.isStore() && set.flags() == AliasSet::Flag::Any;
}
#endif
MDefinition* dependency()
const {
if (getAliasSet().isStore()) {
return nullptr;
}
return loadDependency_;
}
void setDependency(MDefinition* dependency) {
MOZ_ASSERT(!getAliasSet().isStore());
loadDependency_ = dependency;
}
bool isEffectful()
const {
return getAliasSet().isStore(); }
#ifdef DEBUG
bool needsResumePoint()
const {
// Return whether this instruction should have its own resume point.
return isEffectful();
}
#endif
enum class AliasType : uint32_t { NoAlias = 0, MayAlias = 1, MustAlias = 2 };
virtual AliasType mightAlias(
const MDefinition* store)
const {
// Return whether this load may depend on the specified store, given
// that the alias sets intersect. This may be refined to exclude
// possible aliasing in cases where alias set flags are too imprecise.
if (!(getAliasSet().flags() & store->getAliasSet().flags())) {
return AliasType::NoAlias;
}
MOZ_ASSERT(!isEffectful() && store->isEffectful());
return AliasType::MayAlias;
}
virtual bool canRecoverOnBailout()
const {
return false; }
};
// An MUseDefIterator walks over uses in a definition, skipping any use that is
// not a definition. Items from the use list must not be deleted during
// iteration.
class MUseDefIterator {
const MDefinition* def_;
MUseIterator current_;
MUseIterator search(MUseIterator start) {
MUseIterator i(start);
for (; i != def_->usesEnd(); i++) {
if (i->consumer()->isDefinition()) {
return i;
}
}
return def_->usesEnd();
}
public:
explicit MUseDefIterator(
const MDefinition* def)
: def_(def), current_(search(def->usesBegin())) {}
explicit operator bool()
const {
return current_ != def_->usesEnd(); }
MUseDefIterator
operator++() {
MOZ_ASSERT(current_ != def_->usesEnd());
++current_;
current_ = search(current_);
return *
this;
}
MUseDefIterator
operator++(
int) {
MUseDefIterator old(*
this);
operator++();
return old;
}
MUse* use()
const {
return *current_; }
MDefinition* def()
const {
return current_->consumer()->toDefinition(); }
};
// Helper class to check that GC pointers embedded in MIR instructions are not
// in the nursery. Off-thread compilation and nursery GCs can happen in
// parallel. Nursery pointers are handled with MNurseryObject and the
// nurseryObjects lists in WarpSnapshot and IonScript.
//
// These GC things are rooted through the WarpSnapshot. Compacting GCs cancel
// off-thread compilations.
template <
typename T>
class CompilerGCPointer {
js::gc::Cell* ptr_;
public:
explicit CompilerGCPointer(T ptr) : ptr_(ptr) {
MOZ_ASSERT_IF(ptr, !IsInsideNursery(ptr));
MOZ_ASSERT_IF(!CurrentThreadIsIonCompiling(), TlsContext.get()->suppressGC);
}
operator T()
const {
return static_cast<T>(ptr_); }
T operator->()
const {
return static_cast<T>(ptr_); }
private:
CompilerGCPointer() =
delete;
CompilerGCPointer(
const CompilerGCPointer<T>&) =
delete;
CompilerGCPointer<T>&
operator=(
const CompilerGCPointer<T>&) =
delete;
};
using CompilerObject = CompilerGCPointer<JSObject*>;
using CompilerNativeObject = CompilerGCPointer<NativeObject*>;
using CompilerFunction = CompilerGCPointer<JSFunction*>;
using CompilerBaseScript = CompilerGCPointer<BaseScript*>;
using CompilerPropertyName = CompilerGCPointer<PropertyName*>;
using CompilerShape = CompilerGCPointer<Shape*>;
using CompilerGetterSetter = CompilerGCPointer<GetterSetter*>;
// An instruction is an SSA name that is inserted into a basic block's IR
// stream.
class MInstruction :
public MDefinition,
public InlineListNode<MInstruction> {
MResumePoint* resumePoint_;
protected:
// All MInstructions are using the "MFoo::New(alloc)" notation instead of
// the TempObject new operator. This code redefines the new operator as
// protected, and delegates to the TempObject new operator. Thus, the
// following code prevents calls to "new(alloc) MFoo" outside the MFoo
// members.
inline void*
operator new(size_t nbytes,
TempAllocator::Fallible view) noexcept(
true) {
return TempObject::
operator new(nbytes, view);
}
inline void*
operator new(size_t nbytes, TempAllocator& alloc) {
return TempObject::
operator new(nbytes, alloc);
}
template <
class T>
inline void*
operator new(size_t nbytes, T* pos) {
return TempObject::
operator new(nbytes, pos);
}
public:
explicit MInstruction(Opcode op) : MDefinition(op), resumePoint_(nullptr) {}
// Copying an instruction leaves the resume point as empty.
explicit MInstruction(
const MInstruction& other)
: MDefinition(other), resumePoint_(nullptr) {}
// Convenient function used for replacing a load by the value of the store
// if the types are match, and boxing the value if they do not match.
MDefinition* foldsToStore(TempAllocator& alloc);
void setResumePoint(MResumePoint* resumePoint);
void stealResumePoint(MInstruction* other);
void moveResumePointAsEntry();
void clearResumePoint();
MResumePoint* resumePoint()
const {
return resumePoint_; }
// For instructions which can be cloned with new inputs, with all other
// information being the same. clone() implementations do not need to worry
// about cloning generic MInstruction/MDefinition state like flags and
// resume points.
virtual bool canClone()
const {
return false; }
virtual MInstruction* clone(TempAllocator& alloc,
const MDefinitionVector& inputs)
const {
MOZ_CRASH();
}
// Instructions needing to hook into type analysis should return a
// TypePolicy.
virtual const TypePolicy* typePolicy() = 0;
virtual MIRType typePolicySpecialization() = 0;
};
// Note: GenerateOpcodeFiles.py generates MOpcodesGenerated.h based on the
// INSTRUCTION_HEADER* macros.
#define INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
static const Opcode classOpcode = Opcode::opcode; \
using MThisOpcode = M
##opcode;
#define INSTRUCTION_HEADER(opcode) \
INSTRUCTION_HEADER_WITHOUT_TYPEPOLICY(opcode) \
virtual const TypePolicy* typePolicy() override; \
virtual MIRType typePolicySpecialization() override;
#define ALLOW_CLONE(
typename) \
bool canClone()
const override {
return true; } \
MInstruction* clone(TempAllocator& alloc,
const MDefinitionVector& inputs) \
const override { \
MInstruction* res =
new (alloc)
typename(*
this); \
for (size_t i = 0; i < numOperands(); i++) \
res->replaceOperand(i, inputs[i]); \
return res; \
}
// Adds MFoo::New functions which are mirroring the arguments of the
// constructors. Opcodes which are using this macro can be called with a
// TempAllocator, or the fallible version of the TempAllocator.
#define TRIVIAL_NEW_WRAPPERS \
template <
typename... Args> \
static MThisOpcode*
New(TempAllocator& alloc, Args&&... args) { \
return new (alloc) MThisOpcode(std::forward<Args>(args)...); \
} \
template <
typename... Args> \
static MThisOpcode*
New(TempAllocator::Fallible alloc, Args&&... args) { \
return new (alloc) MThisOpcode(std::forward<Args>(args)...); \
}
// These macros are used as a syntactic sugar for writting getOperand
// accessors. They are meant to be used in the body of MIR Instructions as
// follows:
//
// public:
// INSTRUCTION_HEADER(Foo)
// NAMED_OPERANDS((0, lhs), (1, rhs))
//
// The above example defines 2 accessors, one named "lhs" accessing the first
// operand, and a one named "rhs" accessing the second operand.
#define NAMED_OPERAND_ACCESSOR(Index, Name) \
MDefinition* Name()
const {
return getOperand(Index); }
#define NAMED_OPERAND_ACCESSOR_APPLY(Args) NAMED_OPERAND_ACCESSOR Args
#define NAMED_OPERANDS(...) \
MOZ_FOR_EACH(NAMED_OPERAND_ACCESSOR_APPLY, (), (__VA_ARGS__))
template <size_t Arity>
class MAryInstruction :
public MInstruction {
mozilla::Array<MUse, Arity> operands_;
protected:
MUse* getUseFor(size_t index) final {
return &operands_[index]; }
const MUse* getUseFor(size_t index)
const final {
return &operands_[index]; }
void initOperand(size_t index, MDefinition* operand) {
operands_[index].init(operand,
this);
}
public:
MDefinition* getOperand(size_t index)
const final {
return operands_[index].producer();
}
size_t numOperands()
const final {
return Arity; }
#ifdef DEBUG
static const size_t staticNumOperands = Arity;
#endif
size_t indexOf(
const MUse* u)
const final {
MOZ_ASSERT(u >= &operands_[0]);
MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
return u - &operands_[0];
}
void replaceOperand(size_t index, MDefinition* operand) final {
operands_[index].replaceProducer(operand);
}
explicit MAryInstruction(Opcode op) : MInstruction(op) {}
explicit MAryInstruction(
const MAryInstruction<Arity>& other)
: MInstruction(other) {
for (
int i = 0; i < (
int)Arity;
i++) {
// N.B. use |int| to avoid warnings when Arity == 0
operands_[i].init(other.operands_[i].producer(),
this);
}
}
};
class MNullaryInstruction :
public MAryInstruction<0>,
public NoTypePolicy::Data {
protected:
explicit MNullaryInstruction(Opcode op) : MAryInstruction(op) {}
HashNumber valueHash()
const override;
};
class MUnaryInstruction :
public MAryInstruction<1> {
protected:
MUnaryInstruction(Opcode op, MDefinition* ins) : MAryInstruction(op) {
initOperand(0, ins);
}
HashNumber valueHash()
const override;
public:
NAMED_OPERANDS((0, input))
};
class MBinaryInstruction :
public MAryInstruction<2> {
protected:
MBinaryInstruction(Opcode op, MDefinition* left, MDefinition* right)
: MAryInstruction(op) {
initOperand(0, left);
initOperand(1, right);
}
public:
NAMED_OPERANDS((0, lhs), (1, rhs))
protected:
HashNumber valueHash()
const override;
bool binaryCongruentTo(
const MDefinition* ins)
const {
if (op() != ins->op()) {
return false;
}
if (type() != ins->type()) {
return false;
}
if (isEffectful() || ins->isEffectful()) {
return false;
}
const MDefinition* left = getOperand(0);
const MDefinition* right = getOperand(1);
if (isCommutative() && left->id() > right->id()) {
std::swap(left, right);
}
const MBinaryInstruction* bi =
static_cast<
const MBinaryInstruction*>(ins);
const MDefinition* insLeft = bi->getOperand(0);
const MDefinition* insRight = bi->getOperand(1);
if (bi->isCommutative() && insLeft->id() > insRight->id()) {
std::swap(insLeft, insRight);
}
return left == insLeft && right == insRight;
}
public:
// Return if the operands to this instruction are both unsigned.
static bool unsignedOperands(MDefinition* left, MDefinition* right);
bool unsignedOperands();
// Replace any wrapping operands with the underlying int32 operands
// in case of unsigned operands.
void replaceWithUnsignedOperands();
};
class MTernaryInstruction :
public MAryInstruction<3> {
protected:
MTernaryInstruction(Opcode op, MDefinition* first, MDefinition* second,
MDefinition* third)
: MAryInstruction(op) {
initOperand(0, first);
initOperand(1, second);
initOperand(2, third);
}
HashNumber valueHash()
const override;
};
class MQuaternaryInstruction :
public MAryInstruction<4> {
protected:
MQuaternaryInstruction(Opcode op, MDefinition* first, MDefinition* second,
MDefinition* third, MDefinition* fourth)
: MAryInstruction(op) {
initOperand(0, first);
initOperand(1, second);
initOperand(2, third);
initOperand(3, fourth);
}
HashNumber valueHash()
const override;
};
template <
class T>
class MVariadicT :
public T {
FixedList<MUse> operands_;
protected:
explicit MVariadicT(
typename T::Opcode op) : T(op) {}
[[nodiscard]]
bool init(TempAllocator& alloc, size_t length) {
return operands_.init(alloc, length);
}
void initOperand(size_t index, MDefinition* operand) {
// FixedList doesn't initialize its elements, so do an unchecked init.
operands_[index].initUnchecked(operand,
this);
}
MUse* getUseFor(size_t index) final {
return &operands_[index]; }
const MUse* getUseFor(size_t index)
const final {
return &operands_[index]; }
// The MWasmCallBase mixin performs initialization for it's subclasses.
friend class MWasmCallBase;
public:
// Will assert if called before initialization.
MDefinition* getOperand(size_t index)
const final {
return operands_[index].producer();
}
size_t numOperands()
const final {
return operands_.length(); }
size_t indexOf(
const MUse* u)
const final {
MOZ_ASSERT(u >= &operands_[0]);
MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
return u - &operands_[0];
}
void replaceOperand(size_t index, MDefinition* operand) final {
operands_[index].replaceProducer(operand);
}
};
// An instruction with a variable number of operands. Note that the
// MFoo::New constructor for variadic instructions fallibly
// initializes the operands_ array and must be checked for OOM.
using MVariadicInstruction = MVariadicT<MInstruction>;
// All barriered operations:
// - MCompareExchangeTypedArrayElement
// - MExchangeTypedArrayElement
// - MAtomicTypedArrayElementBinop
// - MGrowableSharedArrayBufferByteLength
//
// And operations which are optionally barriered:
// - MLoadUnboxedScalar
// - MStoreUnboxedScalar
// - MResizableTypedArrayLength
// - MResizableDataViewByteLength
//
// Must have the following attributes:
//
// - Not movable
// - Not removable
// - Not congruent with any other instruction
// - Effectful (they alias every TypedArray store)
//
// The intended effect of those constraints is to prevent all loads and stores
// preceding the barriered operation from being moved to after the barriered
// operation, and vice versa, and to prevent the barriered operation from being
// removed or hoisted.
enum class MemoryBarrierRequirement :
bool {
NotRequired,
Required,
};
MIR_OPCODE_CLASS_GENERATED
// Truncation barrier. This is intended for protecting its input against
// follow-up truncation optimizations.
class MLimitedTruncate :
public MUnaryInstruction,
public ConvertToInt32Policy<0>::Data {
TruncateKind truncate_;
TruncateKind truncateLimit_;
MLimitedTruncate(MDefinition* input, TruncateKind limit)
: MUnaryInstruction(classOpcode, input),
truncate_(TruncateKind::NoTruncate),
truncateLimit_(limit) {
setResultType(MIRType::Int32);
setMovable();
}
public:
INSTRUCTION_HEADER(LimitedTruncate)
TRIVIAL_NEW_WRAPPERS
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
void computeRange(TempAllocator& alloc) override;
bool canTruncate()
const override;
void truncate(TruncateKind kind) override;
TruncateKind operandTruncateKind(size_t index)
const override;
TruncateKind truncateKind()
const {
return truncate_; }
void setTruncateKind(TruncateKind kind) { truncate_ = kind; }
};
// Truncation barrier. This is intended for protecting its input against
// follow-up truncation optimizations.
class MIntPtrLimitedTruncate :
public MUnaryInstruction,
public NoTypePolicy::Data {
explicit MIntPtrLimitedTruncate(MDefinition* input)
: MUnaryInstruction(classOpcode, input) {
MOZ_ASSERT(input->type() == MIRType::IntPtr);
setResultType(MIRType::IntPtr);
setMovable();
}
public:
INSTRUCTION_HEADER(IntPtrLimitedTruncate)
TRIVIAL_NEW_WRAPPERS
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
};
// Truncation barrier. This is intended for protecting its input against
// follow-up truncation optimizations.
class MInt64LimitedTruncate :
public MUnaryInstruction,
public NoTypePolicy::Data {
explicit MInt64LimitedTruncate(MDefinition* input)
: MUnaryInstruction(classOpcode, input) {
MOZ_ASSERT(input->type() == MIRType::Int64);
setResultType(MIRType::Int64);
setMovable();
}
public:
INSTRUCTION_HEADER(Int64LimitedTruncate)
TRIVIAL_NEW_WRAPPERS
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
};
// A constant js::Value.
class MConstant :
public MNullaryInstruction {
struct Payload {
union {
bool b;
int32_t i32;
int64_t i64;
intptr_t iptr;
float f;
double d;
JSString* str;
JS::Symbol* sym;
BigInt* bi;
JSObject* obj;
Shape* shape;
uint64_t asBits;
};
Payload() : asBits(0) {}
};
Payload payload_;
static_assert(
sizeof(Payload) ==
sizeof(uint64_t),
"asBits must be big enough for all payload bits");
#ifdef DEBUG
void assertInitializedPayload()
const;
#else
void assertInitializedPayload()
const {}
#endif
MConstant(TempAllocator& alloc,
const Value& v);
explicit MConstant(JSObject* obj);
explicit MConstant(Shape* shape);
explicit MConstant(
float f);
explicit MConstant(MIRType type, int64_t i);
public:
INSTRUCTION_HEADER(Constant)
static MConstant*
New(TempAllocator& alloc,
const Value& v);
static MConstant*
New(TempAllocator::Fallible alloc,
const Value& v);
static MConstant*
New(TempAllocator& alloc,
const Value& v, MIRType type);
static MConstant* NewFloat32(TempAllocator& alloc,
double d);
static MConstant* NewInt64(TempAllocator& alloc, int64_t i);
static MConstant* NewIntPtr(TempAllocator& alloc, intptr_t i);
static MConstant* NewObject(TempAllocator& alloc, JSObject* v);
static MConstant* NewShape(TempAllocator& alloc, Shape* s);
static MConstant* Copy(TempAllocator& alloc, MConstant* src) {
return new (alloc) MConstant(*src);
}
// Try to convert this constant to boolean, similar to js::ToBoolean.
// Returns false if the type is MIRType::Magic* or MIRType::Object.
[[nodiscard]]
bool valueToBoolean(
bool* res)
const;
#ifdef JS_JITSPEW
void printOpcode(GenericPrinter& out)
const override;
#endif
HashNumber valueHash()
const override;
bool congruentTo(
const MDefinition* ins)
const override;
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
void computeRange(TempAllocator& alloc) override;
bool canTruncate()
const override;
void truncate(TruncateKind kind) override;
bool canProduceFloat32()
const override;
ALLOW_CLONE(MConstant)
bool equals(
const MConstant* other)
const {
assertInitializedPayload();
return type() == other->type() && payload_.asBits == other->payload_.asBits;
}
bool toBoolean()
const {
MOZ_ASSERT(type() == MIRType::Boolean);
return payload_.b;
}
int32_t toInt32()
const {
MOZ_ASSERT(type() == MIRType::Int32);
return payload_.i32;
}
int64_t toInt64()
const {
MOZ_ASSERT(type() == MIRType::Int64);
return payload_.i64;
}
intptr_t toIntPtr()
const {
MOZ_ASSERT(type() == MIRType::IntPtr);
return payload_.iptr;
}
bool isInt32(int32_t i)
const {
return type() == MIRType::Int32 && payload_.i32 == i;
}
bool isInt64(int64_t i)
const {
return type() == MIRType::Int64 && payload_.i64 == i;
}
const double& toDouble()
const {
MOZ_ASSERT(type() == MIRType::
Double);
return payload_.d;
}
const float& toFloat32()
const {
MOZ_ASSERT(type() == MIRType::Float32);
return payload_.f;
}
JSString* toString()
const {
MOZ_ASSERT(type() == MIRType::String);
return payload_.str;
}
JS::Symbol* toSymbol()
const {
MOZ_ASSERT(type() == MIRType::Symbol);
return payload_.sym;
}
BigInt* toBigInt()
const {
MOZ_ASSERT(type() == MIRType::BigInt);
return payload_.bi;
}
JSObject& toObject()
const {
MOZ_ASSERT(type() == MIRType::Object);
return *payload_.obj;
}
JSObject* toObjectOrNull()
const {
if (type() == MIRType::Object) {
return payload_.obj;
}
MOZ_ASSERT(type() == MIRType::Null);
return nullptr;
}
Shape* toShape()
const {
MOZ_ASSERT(type() == MIRType::Shape);
return payload_.shape;
}
bool isTypeRepresentableAsDouble()
const {
return IsTypeRepresentableAsDouble(type());
}
double numberToDouble()
const {
MOZ_ASSERT(isTypeRepresentableAsDouble());
if (type() == MIRType::Int32) {
return toInt32();
}
if (type() == MIRType::
Double) {
return toDouble();
}
return toFloat32();
}
// Convert this constant to a js::Value. Float32 constants will be stored
// as DoubleValue and NaNs are canonicalized. Callers must be careful: not
// all constants can be represented by js::Value (wasm supports int64).
Value toJSValue()
const;
};
inline HashNumber ConstantValueHash(MIRType type, uint64_t payload) {
// Build a 64-bit value holding both the payload and the type.
static const size_t TypeBits = 8;
static const size_t TypeShift = 64 - TypeBits;
MOZ_ASSERT(uintptr_t(type) <= (1 << TypeBits) - 1);
uint64_t bits = (uint64_t(type) << TypeShift) ^ payload;
// Fold all 64 bits into the 32-bit result. It's tempting to just discard
// half of the bits, as this is just a hash, however there are many common
// patterns of values where only the low or the high bits vary, so
// discarding either side would lead to excessive hash collisions.
return (HashNumber)bits ^ (HashNumber)(bits >> 32);
}
class MParameter :
public MNullaryInstruction {
int32_t index_;
explicit MParameter(int32_t index)
: MNullaryInstruction(classOpcode), index_(index) {
setResultType(MIRType::Value);
}
public:
INSTRUCTION_HEADER(Parameter)
TRIVIAL_NEW_WRAPPERS
static const int32_t THIS_SLOT = -1;
int32_t index()
const {
return index_; }
#ifdef JS_JITSPEW
void printOpcode(GenericPrinter& out)
const override;
#endif
HashNumber valueHash()
const override;
bool congruentTo(
const MDefinition* ins)
const override;
};
class MControlInstruction :
public MInstruction {
protected:
explicit MControlInstruction(Opcode op) : MInstruction(op) {}
public:
virtual size_t numSuccessors()
const = 0;
virtual MBasicBlock* getSuccessor(size_t i)
const = 0;
virtual void replaceSuccessor(size_t i, MBasicBlock* successor) = 0;
void initSuccessor(size_t i, MBasicBlock* successor) {
MOZ_ASSERT(!getSuccessor(i));
replaceSuccessor(i, successor);
}
bool isControlInstruction()
const override {
return true; }
#ifdef JS_JITSPEW
void printOpcode(GenericPrinter& out)
const override;
#endif
};
class MTableSwitch final :
public MControlInstruction,
public NoFloatPolicy<0>::Data {
// The successors of the tableswitch
// - First successor = the default case
// - Successors 2 and higher = the cases
Vector<MBasicBlock*, 0, JitAllocPolicy> successors_;
// Index into successors_ sorted on case index
Vector<size_t, 0, JitAllocPolicy> cases_;
MUse operand_;
int32_t low_;
int32_t high_;
void initOperand(size_t index, MDefinition* operand) {
MOZ_ASSERT(index == 0);
operand_.init(operand,
this);
}
MTableSwitch(TempAllocator& alloc, MDefinition* ins, int32_t low,
int32_t high)
: MControlInstruction(classOpcode),
successors_(alloc),
cases_(alloc),
low_(low),
high_(high) {
initOperand(0, ins);
}
protected:
MUse* getUseFor(size_t index) override {
MOZ_ASSERT(index == 0);
return &operand_;
}
const MUse* getUseFor(size_t index)
const override {
MOZ_ASSERT(index == 0);
return &operand_;
}
public:
INSTRUCTION_HEADER(TableSwitch)
static MTableSwitch*
New(TempAllocator& alloc, MDefinition* ins, int32_t low,
int32_t high) {
return new (alloc) MTableSwitch(alloc, ins, low, high);
}
size_t numSuccessors()
const override {
return successors_.length(); }
[[nodiscard]]
bool addSuccessor(MBasicBlock* successor, size_t* index) {
MOZ_ASSERT(successors_.length() < (size_t)(high_ - low_ + 2));
MOZ_ASSERT(!successors_.empty());
*index = successors_.length();
return successors_.append(successor);
}
MBasicBlock* getSuccessor(size_t i)
const override {
MOZ_ASSERT(i < numSuccessors());
return successors_[i];
}
void replaceSuccessor(size_t i, MBasicBlock* successor) override {
MOZ_ASSERT(i < numSuccessors());
successors_[i] = successor;
}
int32_t low()
const {
return low_; }
int32_t high()
const {
return high_; }
MBasicBlock* getDefault()
const {
return getSuccessor(0); }
MBasicBlock* getCase(size_t i)
const {
return getSuccessor(cases_[i]); }
[[nodiscard]]
bool addDefault(MBasicBlock* block, size_t* index = nullptr) {
MOZ_ASSERT(successors_.empty());
if (index) {
*index = 0;
}
return successors_.append(block);
}
[[nodiscard]]
bool addCase(size_t successorIndex) {
return cases_.append(successorIndex);
}
size_t numCases()
const {
return high() - low() + 1; }
MDefinition* getOperand(size_t index)
const override {
MOZ_ASSERT(index == 0);
return operand_.producer();
}
size_t numOperands()
const override {
return 1; }
size_t indexOf(
const MUse* u)
const final {
MOZ_ASSERT(u == getUseFor(0));
return 0;
}
void replaceOperand(size_t index, MDefinition* operand) final {
MOZ_ASSERT(index == 0);
operand_.replaceProducer(operand);
}
MDefinition* foldsTo(TempAllocator& alloc) override;
// It does read memory in that it must read an entry from the jump table,
// but that's effectively data that is private to this MIR. And it should
// certainly never be modified by any other MIR. Hence it is effect-free
// from an alias-analysis standpoint.
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
};
template <size_t Arity, size_t Successors>
class MAryControlInstruction :
public MControlInstruction {
mozilla::Array<MUse, Arity> operands_;
mozilla::Array<MBasicBlock*, Successors> successors_;
protected:
explicit MAryControlInstruction(Opcode op) : MControlInstruction(op) {}
void setSuccessor(size_t index, MBasicBlock* successor) {
successors_[index] = successor;
}
MUse* getUseFor(size_t index) final {
return &operands_[index]; }
const MUse* getUseFor(size_t index)
const final {
return &operands_[index]; }
void initOperand(size_t index, MDefinition* operand) {
operands_[index].init(operand,
this);
}
public:
MDefinition* getOperand(size_t index)
const final {
return operands_[index].producer();
}
size_t numOperands()
const final {
return Arity; }
size_t indexOf(
const MUse* u)
const final {
MOZ_ASSERT(u >= &operands_[0]);
MOZ_ASSERT(u <= &operands_[numOperands() - 1]);
return u - &operands_[0];
}
void replaceOperand(size_t index, MDefinition* operand) final {
operands_[index].replaceProducer(operand);
}
size_t numSuccessors()
const final {
return Successors; }
MBasicBlock* getSuccessor(size_t i)
const final {
return successors_[i]; }
void replaceSuccessor(size_t i, MBasicBlock* succ) final {
successors_[i] = succ;
}
};
template <size_t Successors>
class MVariadicControlInstruction :
public MVariadicT<MControlInstruction> {
mozilla::Array<MBasicBlock*, Successors> successors_;
protected:
explicit MVariadicControlInstruction(Opcode op)
: MVariadicT<MControlInstruction>(op) {}
void setSuccessor(size_t index, MBasicBlock* successor) {
successors_[index] = successor;
}
public:
size_t numSuccessors()
const final {
return Successors; }
MBasicBlock* getSuccessor(size_t i)
const final {
return successors_[i]; }
void replaceSuccessor(size_t i, MBasicBlock* succ) final {
successors_[i] = succ;
}
};
// Jump to the start of another basic block.
class MGoto :
public MAryControlInstruction<0, 1>,
public NoTypePolicy::Data {
explicit MGoto(MBasicBlock* target) : MAryControlInstruction(classOpcode) {
setSuccessor(TargetIndex, target);
}
public:
INSTRUCTION_HEADER(
Goto)
static MGoto*
New(TempAllocator& alloc, MBasicBlock* target);
static MGoto*
New(TempAllocator::Fallible alloc, MBasicBlock* target);
// Variant that may patch the target later.
static MGoto*
New(TempAllocator& alloc);
static constexpr size_t TargetIndex = 0;
MBasicBlock* target()
const {
return getSuccessor(TargetIndex); }
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
#ifdef JS_JITSPEW
void getExtras(ExtrasCollector* extras)
const override {
char buf[64];
SprintfLiteral(buf,
"Block%u", GetMBasicBlockId(target()));
extras->add(buf);
}
#endif
};
// Tests if the input instruction evaluates to true or false, and jumps to the
// start of a corresponding basic block.
class MTest :
public MAryControlInstruction<1, 2>,
public TestPolicy::Data {
// It is allowable to specify `trueBranch` or `falseBranch` as nullptr and
// patch it in later.
MTest(MDefinition* ins, MBasicBlock* trueBranch, MBasicBlock* falseBranch)
: MAryControlInstruction(classOpcode) {
initOperand(0, ins);
setSuccessor(TrueBranchIndex, trueBranch);
setSuccessor(FalseBranchIndex, falseBranch);
}
TypeDataList observedTypes_;
public:
INSTRUCTION_HEADER(Test)
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, input))
const TypeDataList& observedTypes()
const {
return observedTypes_; }
void setObservedTypes(
const TypeDataList& observed) {
observedTypes_ = observed;
}
static constexpr size_t TrueBranchIndex = 0;
static constexpr size_t FalseBranchIndex = 1;
MBasicBlock* ifTrue()
const {
return getSuccessor(TrueBranchIndex); }
MBasicBlock* ifFalse()
const {
return getSuccessor(FalseBranchIndex); }
MBasicBlock* branchSuccessor(BranchDirection dir)
const {
return (dir == TRUE_BRANCH) ? ifTrue() : ifFalse();
}
AliasSet getAliasSet()
const override {
return AliasSet::None(); }
MDefinition* foldsDoubleNegation(TempAllocator& alloc);
MDefinition* foldsConstant(TempAllocator& alloc);
MDefinition* foldsTypes(TempAllocator& alloc);
MDefinition* foldsNeedlessControlFlow(TempAllocator& alloc);
MDefinition* foldsRedundantTest(TempAllocator& alloc);
MDefinition* foldsTo(TempAllocator& alloc) override;
#ifdef DEBUG
bool isConsistentFloat32Use(MUse* use)
const override {
return true; }
#endif
#ifdef JS_JITSPEW
void getExtras(ExtrasCollector* extras)
const override {
char buf[64];
SprintfLiteral(buf,
"true->Block%u false->Block%u",
GetMBasicBlockId(ifTrue()), GetMBasicBlockId(ifFalse()));
extras->add(buf);
}
#endif
};
--> --------------------
--> maximum size reached
--> --------------------