/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: set ts=8 sts=2 et sw=2 tw=80: * * Copyright 2015 Mozilla Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
*/
static uint32_t ResultStackSize(ValType type) { switch (type.kind()) { case ValType::I32: return ABIResult::StackSizeOfInt32; case ValType::I64: return ABIResult::StackSizeOfInt64; case ValType::F32: return ABIResult::StackSizeOfFloat; case ValType::F64: return ABIResult::StackSizeOfDouble; #ifdef ENABLE_WASM_SIMD case ValType::V128: return ABIResult::StackSizeOfV128; #endif case ValType::Ref: return ABIResult::StackSizeOfPtr; default:
MOZ_CRASH("Unexpected result type");
}
}
// Compute the size of the stack slot that the wasm ABI requires be allocated // for a particular MIRType. Note that this sometimes differs from the // MIRType's natural size. See also ResultStackSize above and ABIResult::size() // and ABIResultIter below.
uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) { switch (type) { case MIRType::Int32: return ABIResult::StackSizeOfInt32; case MIRType::Int64: return ABIResult::StackSizeOfInt64; case MIRType::Float32: return ABIResult::StackSizeOfFloat; case MIRType::Double: return ABIResult::StackSizeOfDouble; #ifdef ENABLE_WASM_SIMD case MIRType::Simd128: return ABIResult::StackSizeOfV128; #endif case MIRType::Pointer: case MIRType::WasmAnyRef: return ABIResult::StackSizeOfPtr; default:
MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
}
}
if (taken) {
regs.take(taken.value());
} Register temp = regs.takeAnyGeneral();
{
MOZ_ASSERT(MaybeGetJitContext(), "codegen debug checks require a jit context"); # ifdef JS_CODEGEN_ARM64 if (IsCompilingWasm()) {
masm.setupWasmABICall();
} else { // JS ARM64 has an extra stack pointer which is not managed in WASM.
masm.setupUnalignedABICall(temp);
} # else
masm.setupUnalignedABICall(temp); # endif
passArgAndCall(IsCompilingWasm(), temp);
}
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) { // If we've gone this far, it means we're actually using the debugging // strings. In this case, we leak them! This is only for debugging, and // doing the right thing is cumbersome (in Ion, it'd mean add a vec of // strings to the IonScript; in wasm, it'd mean add it to the current // Module and serialize it properly). constchar* text = str.release();
staticbool FinishOffsets(MacroAssembler& masm, Offsets* offsets) { // On old ARM hardware, constant pools could be inserted and they need to // be flushed before considering the size of the masm.
masm.flushBuffer();
offsets->end = masm.size(); return !masm.oom();
}
staticvoid SetupABIArguments(MacroAssembler& masm, const FuncExport& fe, const FuncType& funcType, Register argv, Register scratch) { // Copy parameters out of argv and into the registers/stack-slots specified by // the wasm ABI. // // SetupABIArguments are only used for C++ -> wasm calls through callExport(), // and V128 and Ref types (other than externref) are not currently allowed.
ArgTypeVector args(funcType); for (WasmABIArgIter iter(args); !iter.done(); iter++) { unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType(); switch (iter->kind()) { case ABIArg::GPR: if (type == MIRType::Int32) {
masm.load32(src, iter->gpr());
} elseif (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} elseif (type == MIRType::WasmAnyRef) {
masm.loadPtr(src, iter->gpr());
} elseif (type == MIRType::StackResults) {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, iter->gpr());
} else {
MOZ_CRASH("unknown GPR type");
} break; #ifdef JS_CODEGEN_REGISTER_PAIR case ABIArg::GPR_PAIR: if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
} break; #endif case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize, "ExportArg must be big enough to store SIMD values"); switch (type) { case MIRType::Double:
masm.loadDouble(src, iter->fpu()); break; case MIRType::Float32:
masm.loadFloat32(src, iter->fpu()); break; case MIRType::Simd128: #ifdef ENABLE_WASM_SIMD // This is only used by the testing invoke path, // wasmLosslessInvoke, and is guarded against in normal JS-API // call paths.
masm.loadUnalignedSimd128(src, iter->fpu()); break; #else
MOZ_CRASH("V128 not supported in SetupABIArguments"); #endif default:
MOZ_CRASH("unexpected FPU type"); break;
} break;
} case ABIArg::Stack: switch (type) { case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase())); break; case MIRType::Int64: {
RegisterOrSP sp = masm.getStackPointer();
masm.copy64(src, Address(sp, iter->offsetFromArgBase()), scratch); break;
} case MIRType::WasmAnyRef:
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase())); break; case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase())); break;
} case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase())); break;
} case MIRType::Simd128: { #ifdef ENABLE_WASM_SIMD // This is only used by the testing invoke path, // wasmLosslessInvoke, and is guarded against in normal JS-API // call paths.
ScratchSimd128Scope fpscratch(masm);
masm.loadUnalignedSimd128(src, fpscratch);
masm.storeUnalignedSimd128(
fpscratch,
Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; #else
MOZ_CRASH("V128 not supported in SetupABIArguments"); #endif
} case MIRType::StackResults: {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase())); break;
} default:
MOZ_CRASH("unexpected stack arg type");
} break; case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
}
staticvoid StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe, const FuncType& funcType, Register loc) {
ResultType results = ResultType::Vector(funcType.results());
DebugOnly<bool> sawRegisterResult = false; for (ABIResultIter iter(results); !iter.done(); iter.next()) { const ABIResult& result = iter.cur(); if (result.inRegister()) {
MOZ_ASSERT(!sawRegisterResult);
sawRegisterResult = true; switch (result.type().kind()) { case ValType::I32:
masm.store32(result.gpr(), Address(loc, 0)); break; case ValType::I64:
masm.store64(result.gpr64(), Address(loc, 0)); break; case ValType::V128: #ifdef ENABLE_WASM_SIMD
masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0)); break; #else
MOZ_CRASH("V128 not supported in StoreABIReturn"); #endif case ValType::F32:
masm.storeFloat32(result.fpr(), Address(loc, 0)); break; case ValType::F64:
masm.storeDouble(result.fpr(), Address(loc, 0)); break; case ValType::Ref:
masm.storePtr(result.gpr(), Address(loc, 0)); break;
}
}
}
MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
}
#ifdefined(JS_CODEGEN_ARM) // The ARM system ABI also includes d15 & s31 in the non volatile float // registers. Also exclude lr (a.k.a. r14) as we preserve it manually. staticconst LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
GeneralRegisterSet(Registers::NonVolatileMask &
~(Registers::SetType(1) << Registers::lr)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
(FloatRegisters::SetType(1) << FloatRegisters::d15) |
(FloatRegisters::SetType(1) << FloatRegisters::s31))); #elifdefined(JS_CODEGEN_ARM64) // Exclude the Link Register (x30) because it is preserved manually. // // Include x16 (scratch) to make a 16-byte aligned amount of integer registers. // Include d31 (scratch) to make a 16-byte aligned amount of floating registers. staticconst LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
GeneralRegisterSet((Registers::NonVolatileMask &
~(Registers::SetType(1) << Registers::lr)) |
(Registers::SetType(1) << Registers::x16)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
FloatRegisters::NonAllocatableMask)); #else staticconst LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
FloatRegisterSet(FloatRegisters::NonVolatileMask)); #endif
staticvoid AssertExpectedSP(MacroAssembler& masm) { #ifdef JS_CODEGEN_ARM64
MOZ_ASSERT(sp.Is(masm.GetStackPointer64())); # ifdef DEBUG // Since we're asserting that SP is the currently active stack pointer, // let's also in effect assert that PSP is dead -- by setting it to 1, so as // to cause to cause any attempts to use it to segfault in an easily // identifiable way.
masm.asVIXL().Mov(PseudoStackPointer64, 1); # endif #endif
}
template <class Operand> staticvoid WasmPush(MacroAssembler& masm, const Operand& op) { #ifdef JS_CODEGEN_ARM64 // Allocate a pad word so that SP can remain properly aligned. |op| will be // written at the lower-addressed of the two words pushed here.
masm.reserveStack(WasmPushSize);
masm.storePtr(op, Address(masm.getStackPointer(), 0)); #else
masm.Push(op); #endif
}
staticvoid WasmPop(MacroAssembler& masm, Register r) { #ifdef JS_CODEGEN_ARM64 // Also pop the pad word allocated by WasmPush.
masm.loadPtr(Address(masm.getStackPointer(), 0), r);
masm.freeStack(WasmPushSize); #else
masm.Pop(r); #endif
}
// Generate a stub that enters wasm from a C++ caller via the native ABI. The // signature of the entry point is Module::ExportFuncPtr. The exported wasm // function has an ABI derived from its specific signature, so this function // must map from the ABI of ExportFuncPtr to the export's signature's ABI. staticbool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe, const FuncType& funcType, const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AutoCreatedBy acb(masm, "GenerateInterpEntry");
AssertExpectedSP(masm);
// UBSAN expects that the word before a C++ function pointer is readable for // some sort of generated assertion. // // These interp entry points can sometimes be output at the beginning of a // code page allocation, which will cause access violations when called with // UBSAN enabled. // // Insert some padding in this case by inserting a breakpoint before we align // our code. This breakpoint will misalign the code buffer (which was aligned // due to being at the beginning of the buffer), which will then be aligned // and have at least one word of padding before this entry point. if (masm.currentOffset() == 0) {
masm.breakpoint();
}
masm.haltingAlign(CodeAlignment);
// Double check that the first word is available for UBSAN; see above.
static_assert(CodeAlignment >= sizeof(uintptr_t));
MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() >= sizeof(uintptr_t));
offsets->begin = masm.currentOffset();
// Save the return address if it wasn't already saved by the call insn. #ifdef JS_USE_LINK_REGISTER # ifdefined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
masm.pushReturnAddress(); # elif defined(JS_CODEGEN_ARM64) // WasmPush updates framePushed() unlike pushReturnAddress(), but that's // cancelled by the setFramePushed() below.
WasmPush(masm, lr); # else
MOZ_CRASH("Implement this"); # endif #endif
// Save all caller non-volatile registers before we clobber them here and in // the wasm callee (which does not preserve non-volatile registers).
masm.setFramePushed(0);
masm.PushRegsInMask(NonVolatileRegs);
// Put the 'argv' argument into a non-argument/return/instance register so // that we can use 'argv' while we fill in the arguments for the wasm callee. // Use a second non-argument/return register as temporary scratch. Register argv = ABINonArgReturnReg0; Register scratch = ABINonArgReturnReg1;
// scratch := SP
masm.moveStackPtrTo(scratch);
// Dynamically align the stack since ABIStackAlignment is not necessarily // WasmStackAlignment. Preserve SP so it can be restored after the call. #ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment"); #else
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1))); #endif
masm.assertStackAlignment(WasmStackAlignment);
// Create a fake frame: just previous RA and an FP. const size_t FakeFrameSize = 2 * sizeof(void*); #ifdef JS_CODEGEN_ARM64
masm.Ldr(ARMRegister(ABINonArgReturnReg0, 64),
MemOperand(ARMRegister(scratch, 64), nonVolatileRegsPushSize)); #else
masm.Push(Address(scratch, nonVolatileRegsPushSize)); #endif // Store fake wasm register state. Ensure the frame pointer passed by the C++ // caller doesn't have the ExitFPTag bit set to not confuse frame iterators. // This bit shouldn't be set if C++ code is using frame pointers, so this has // no effect on native stack unwinders.
masm.andPtr(Imm32(int32_t(~ExitFPTag)), FramePointer); #ifdef JS_CODEGEN_ARM64
masm.asVIXL().Push(ARMRegister(ABINonArgReturnReg0, 64),
ARMRegister(FramePointer, 64));
masm.moveStackPtrTo(FramePointer); #else
masm.Push(FramePointer); #endif
// Read the arguments of wasm::ExportFuncPtr according to the native ABI. // The entry stub's frame is 1 word. constunsigned argBase = sizeof(void*) + nonVolatileRegsPushSize;
ABIArgGenerator abi;
ABIArg arg;
// Align (missing) results area to WasmStackAlignment boudary. Return calls // expect arguments to not overlap with results or other slots. unsigned aligned =
AlignBytes(masm.framePushed() + FakeFrameSize, WasmStackAlignment);
masm.reserveStack(aligned - masm.framePushed() + FakeFrameSize);
// Reserve stack space for the wasm call. unsigned argDecrement = StackDecrementForCall(
WasmStackAlignment, aligned, StackArgBytesForWasmABI(funcType));
masm.reserveStack(argDecrement);
// Copy parameters out of argv and into the wasm ABI registers/stack-slots.
SetupABIArguments(masm, fe, funcType, argv, scratch);
// Call into the real function. Note that, due to the throw stub, fp, instance // and pinned registers may be clobbered.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
// Set the return value based on whether InstanceReg is the // InterpFailInstanceReg magic value (set by the exception handler).
Label success, join;
masm.branchPtr(Assembler::NotEqual, InstanceReg, Imm32(InterpFailInstanceReg),
&success);
masm.move32(Imm32(false), scratch);
masm.jump(&join);
masm.bind(&success);
masm.move32(Imm32(true), scratch);
masm.bind(&join);
// Pop the arguments pushed after the dynamic alignment.
masm.setFramePushed(frameSizeBeforeCall);
masm.freeStackTo(frameSizeBeforeCall);
// Recover the 'argv' pointer which was saved before aligning the stack.
WasmPop(masm, argv);
WasmPop(masm, InstanceReg);
// Pop the stack pointer to its value right before dynamic alignment. #ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
masm.setFramePushed(FakeFrameSize);
masm.freeStack(FakeFrameSize); #else
masm.PopStackPtr(); #endif
// Store the register result, if any, in argv[0]. // No widening is required, as the value leaves ReturnReg.
StoreRegisterResult(masm, fe, funcType, argv);
masm.move32(scratch, ReturnReg);
// Restore clobbered non-volatile registers of the caller.
masm.setFramePushed(nonVolatileRegsPushSize);
masm.PopRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == 0);
// Creates a JS fake exit frame for wasm, so the frame iterators just use // JSJit frame iteration. // // Note: the caller must ensure InstanceReg is valid. staticvoid GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
// Helper function for allocating a BigInt and initializing it from an I64 in // GenerateJitEntry. The return result is written to scratch. // // Note that this will create a new frame and must not - in its current form - // be called from a context where there is already another stub frame on the // stack, as that confuses unwinding during profiling. This was a problem for // its use from GenerateImportJitExit, see bug 1754258. Therefore, // FuncType::canHaveJitExit prevents the present function from being called for // exits. staticvoid GenerateBigIntInitialization(MacroAssembler& masm, unsigned bytesPushedByPrologue,
Register64 input, Register scratch, const FuncExport& fe, Label* fail) { #if JS_BITS_PER_WORD == 32
MOZ_ASSERT(input.low != scratch);
MOZ_ASSERT(input.high != scratch); #else
MOZ_ASSERT(input.reg != scratch); #endif
// We need to avoid clobbering other argument registers and the input.
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
// Generate a stub that enters wasm from a jit code caller via the jit ABI. // // ARM64 note: This does not save the PseudoStackPointer so we must be sure to // recompute it on every return path, be it normal return or exception return. // The JIT code we return to assumes it is correct.
// The jit caller has set up the following stack layout (sp grows to the // left): // <-- retAddr | descriptor | callee | argc | this | arg1..N // // GenerateJitEntryPrologue has additionally pushed the caller's frame // pointer. The stack pointer is now JitStackAlignment-aligned. // // We initialize an ExitFooterFrame (with ExitFrameType::WasmGenericJitEntry) // immediately below the frame pointer to ensure FP is a valid JS JIT exit // frame.
MOZ_ASSERT(masm.framePushed() == 0);
// Avoid overlapping aligned stack arguments area with ExitFooterFrame. constunsigned AlignedExitFooterFrameSize =
AlignBytes(ExitFooterFrame::Size(), WasmStackAlignment); unsigned normalBytesNeeded =
AlignedExitFooterFrameSize + StackArgBytesForWasmABI(funcType);
// We do two loops: // - one loop up-front will make sure that all the Value tags fit the // expected signature argument types. If at least one inline conversion // fails, we just jump to the OOL path which will call into C++. Inline // conversions are ordered in the way we expect them to happen the most. // - the second loop will unbox the arguments into the right registers.
Label oolCall; for (size_t i = 0; i < funcType.args().length(); i++) {
Address jitArgAddr(FramePointer, JitFrameLayout::offsetOfActualArg(i));
masm.loadValue(jitArgAddr, scratchV);
// Other types (symbol, object, strings) go to the C++ call.
masm.jump(&oolCall);
}
Label storeBack;
// For double inputs, unbox, truncate and store back.
masm.bind(&isDouble);
{
masm.unboxDouble(scratchV, scratchF);
masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
masm.jump(&storeBack);
}
// For null or undefined, store 0.
masm.bind(&isUndefinedOrNull);
{
masm.storeValue(Int32Value(0), jitArgAddr);
masm.jump(&next);
}
// For booleans, store the number value back.
masm.bind(&isBoolean);
masm.unboxBoolean(scratchV, scratchG); // fallthrough:
masm.bind(&storeBack);
masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr); break;
} case ValType::I64: { // For BigInt inputs, just skip. Otherwise go to C++ for other // types that require creating a new BigInt or erroring.
masm.branchTestBigInt(Assembler::NotEqual, scratchV, &oolCall); break;
} case ValType::F32: case ValType::F64: { // Note we can reuse the same code for f32/f64 here, since for the // case of f32, the conversion of f64 to f32 will happen in the // second loop.
// Other types (symbol, object, strings) go to the C++ call.
masm.jump(&oolCall);
}
// For int32 and boolean inputs, convert and rebox.
masm.bind(&isInt32OrBoolean);
{
masm.convertInt32ToDouble(scratchV.payloadOrValueReg(), scratchF);
masm.boxDouble(scratchF, jitArgAddr);
masm.jump(&next);
}
// For undefined (missing argument), store NaN.
masm.bind(&isUndefined);
{
masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
masm.jump(&next);
}
// +null is 0.
masm.bind(&isNull);
{
masm.storeValue(DoubleValue(0.), jitArgAddr);
} break;
} case ValType::Ref: { // Guarded against by temporarilyUnsupportedReftypeForEntry()
MOZ_RELEASE_ASSERT(funcType.args()[i].refType().isExtern());
masm.branchValueConvertsToWasmAnyRefInline(scratchV, scratchG, scratchF,
&next);
masm.jump(&oolCall); break;
} case ValType::V128: { // Guarded against by hasUnexposableArgOrRet()
MOZ_CRASH("unexpected argument type when calling from the jit");
} default: {
MOZ_CRASH("unexpected argument type when calling from the jit");
}
}
masm.nopAlign(CodeAlignment);
masm.bind(&next);
}
// Call into the real function.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
// Generate an OOL call to the C++ conversion path. bool hasFallThroughForException = false; if (oolCall.used()) {
masm.bind(&oolCall);
masm.setFramePushed(frameSize);
// Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to // unify the BuiltinThunk's interface we call it here with wasm abi.
jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
// argument 0: function index. if (argsIter->kind() == ABIArg::GPR) {
masm.movePtr(ImmWord(fe.funcIndex()), argsIter->gpr());
} else {
masm.storePtr(ImmWord(fe.funcIndex()),
Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
// Note, if code here pushes a reference value into the frame for its own // purposes (and not just as an argument to the callee) then the frame must be // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The // callee will trace values that are pushed as arguments, however.
// Push a special frame descriptor that indicates the frame size so we can // directly iterate from the current JIT frame without an extra call. // Note: buildFakeExitFrame pushes an ExitFrameLayout containing the current // frame pointer. We also use this to restore the frame pointer after the // call.
*callOffset = masm.buildFakeExitFrame(scratch); // FP := ExitFrameLayout*
masm.moveStackPtrTo(FramePointer);
size_t framePushedAtFakeFrame = masm.framePushed();
masm.setFramePushed(0);
masm.loadJSContext(scratch);
masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
const JitCallStackArg& stackArg = stackArgs[iter.index()]; switch (stackArg.tag()) { case JitCallStackArg::Tag::Imm32:
GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
masm.storePtr(ImmWord(stackArg.imm32()), dst); break; case JitCallStackArg::Tag::GPR:
MOZ_ASSERT(stackArg.gpr() != scratch);
MOZ_ASSERT(stackArg.gpr() != FramePointer);
GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
masm.storePtr(stackArg.gpr(), dst); break; case JitCallStackArg::Tag::FPU: switch (iter.mirType()) { case MIRType::Double:
GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
masm.storeDouble(stackArg.fpu(), dst); break; case MIRType::Float32:
GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
masm.storeFloat32(stackArg.fpu(), dst); break; default:
MOZ_CRASH( "unexpected MIR type for a float register in wasm fast call");
} break; case JitCallStackArg::Tag::Address: { // The address offsets were valid *before* we pushed our frame.
Address src = stackArg.addr();
MOZ_ASSERT(src.base == masm.getStackPointer());
src.offset += int32_t(framePushedAtFakeFrame + fakeFramePushed -
framePushedAtStart); switch (iter.mirType()) { case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
GenPrintF64(DebugChannel::Function, masm, fpscratch);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, dst); break;
} case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
GenPrintF32(DebugChannel::Function, masm, fpscratch);
masm.storeFloat32(fpscratch, dst); break;
} case MIRType::Int32: {
masm.loadPtr(src, scratch);
GenPrintIsize(DebugChannel::Function, masm, scratch);
masm.storePtr(scratch, dst); break;
} case MIRType::WasmAnyRef: {
masm.loadPtr(src, scratch);
GenPrintPtr(DebugChannel::Function, masm, scratch);
masm.storePtr(scratch, dst); break;
} case MIRType::StackResults: {
MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
} default: {
MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
}
} break;
} case JitCallStackArg::Tag::Undefined: {
MOZ_CRASH("can't happen because of arg.kind() check");
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
// Load instance; from now on, InstanceReg is live.
masm.movePtr(ImmPtr(&inst), InstanceReg);
masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
WasmCalleeInstanceOffsetBeforeCall));
masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
masm.assertStackAlignment(WasmStackAlignment);
MoveSPForJitABI(masm);
masm.callJit(ImmPtr(callee)); #ifdef JS_CODEGEN_ARM64 // WASM does not always keep PSP in sync with SP. So reinitialize it as it // might be clobbered either by WASM or by any C++ calls within.
masm.initPseudoStackPtr(); #endif
masm.freeStackTo(fakeFramePushed);
masm.assertStackAlignment(WasmStackAlignment);
// Store the return value in the appropriate place.
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex()); const ValTypeVector& results = funcType.results(); if (results.length() == 0) {
masm.moveValue(UndefinedValue(), JSReturnOperand);
GenPrintf(DebugChannel::Function, masm, "void");
} else {
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented"); switch (results[0].kind()) { case wasm::ValType::I32: // The return value is in ReturnReg, which is what Ion expects.
GenPrintIsize(DebugChannel::Function, masm, ReturnReg); #ifdef JS_64BIT
masm.widenInt32(ReturnReg); #endif break; case wasm::ValType::I64: // The return value is in ReturnReg64, which is what Ion expects.
GenPrintI64(DebugChannel::Function, masm, ReturnReg64); break; case wasm::ValType::F32:
masm.canonicalizeFloat(ReturnFloat32Reg);
GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg); break; case wasm::ValType::F64:
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg); break; case wasm::ValType::Ref:
GenPrintPtr(DebugChannel::Import, masm, ReturnReg); // The call to wasm above preserves the InstanceReg, we don't // need to reload it here.
masm.convertWasmAnyRefToValue(InstanceReg, ReturnReg, JSReturnOperand,
WasmJitEntryReturnScratch); break; case wasm::ValType::V128:
MOZ_CRASH("unexpected return type when calling from ion to wasm");
}
}
staticvoid FillArgumentArrayForInterpExit(MacroAssembler& masm, unsigned funcImportIndex, const FuncType& funcType, unsigned argOffset, Register scratch) { // This is `sizeof(FrameWithInstances) - ShadowStackSpace` because the latter // is accounted for by the ABIArgIter. constunsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithInstances) - jit::ShadowStackSpace;
MIRType type = i.mirType();
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
(type == MIRType::StackResults)); switch (i->kind()) { case ABIArg::GPR: if (type == MIRType::Int32) {
GenPrintIsize(DebugChannel::Import, masm, i->gpr());
masm.store32(i->gpr(), dst);
} elseif (type == MIRType::Int64) {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
masm.store64(i->gpr64(), dst);
} elseif (type == MIRType::WasmAnyRef) {
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
masm.storePtr(i->gpr(), dst);
} elseif (type == MIRType::StackResults) {
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
masm.storePtr(i->gpr(), dst);
} else {
MOZ_CRASH( "FillArgumentArrayForInterpExit, ABIArg::GPR: unexpected type");
} break; #ifdef JS_CODEGEN_REGISTER_PAIR case ABIArg::GPR_PAIR: if (type == MIRType::Int64) {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
masm.store64(i->gpr64(), dst);
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
} break; #endif case ABIArg::FPU: {
FloatRegister srcReg = i->fpu(); if (type == MIRType::Double) {
GenPrintF64(DebugChannel::Import, masm, srcReg);
masm.storeDouble(srcReg, dst);
} elseif (type == MIRType::Float32) { // Preserve the NaN pattern in the input.
GenPrintF32(DebugChannel::Import, masm, srcReg);
masm.storeFloat32(srcReg, dst);
} elseif (type == MIRType::Simd128) { // The value should never escape; the call will be stopped later as // the import is being called. But we should generate something sane // here for the boxed case since a debugger or the stack walker may // observe something.
ScratchDoubleScope dscratch(masm);
masm.loadConstantDouble(0, dscratch);
GenPrintF64(DebugChannel::Import, masm, dscratch);
masm.storeDouble(dscratch, dst);
} else {
MOZ_CRASH("Unknown MIRType in wasm exit stub");
} break;
} case ABIArg::Stack: {
Address src(FramePointer,
offsetFromFPToCallerStackArgs + i->offsetFromArgBase()); if (type == MIRType::Simd128) { // As above. StackCopy does not know this trick.
ScratchDoubleScope dscratch(masm);
masm.loadConstantDouble(0, dscratch);
GenPrintF64(DebugChannel::Import, masm, dscratch);
masm.storeDouble(dscratch, dst);
} else {
StackCopy(masm, type, scratch, src, dst);
} break;
} case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
GenPrintf(DebugChannel::Import, masm, "\n");
}
// Note, this may destroy the values in incoming argument registers as a result // of Spectre mitigation. staticvoid FillArgumentArrayForJitExit(MacroAssembler& masm, Register instance, unsigned funcImportIndex, const FuncType& funcType, unsigned argOffset, Register scratch, Register scratch2, Label* throwLabel) {
MOZ_ASSERT(scratch != scratch2);
// This is `sizeof(FrameWithInstances) - ShadowStackSpace` because the latter // is accounted for by the ABIArgIter. constunsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithInstances) - jit::ShadowStackSpace;
// This loop does not root the values that are being constructed in // for the arguments. Allocations that are generated by code either // in the loop or called from it should be NoGC allocations.
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
funcImportIndex);
MIRType type = i.mirType();
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
(type == MIRType::StackResults)); switch (i->kind()) { case ABIArg::GPR: if (type == MIRType::Int32) {
GenPrintIsize(DebugChannel::Import, masm, i->gpr());
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
} elseif (type == MIRType::Int64) { // FuncType::canHaveJitExit should prevent this. Also see comments // at GenerateBigIntInitialization.
MOZ_CRASH("Should not happen");
} elseif (type == MIRType::WasmAnyRef) { // This works also for FuncRef because it is distinguishable from // a boxed AnyRef.
masm.movePtr(i->gpr(), scratch2);
GenPrintPtr(DebugChannel::Import, masm, scratch2);
masm.convertWasmAnyRefToValue(instance, scratch2, dst, scratch);
} elseif (type == MIRType::StackResults) {
MOZ_CRASH("Multi-result exit to JIT unimplemented");
} else {
MOZ_CRASH( "FillArgumentArrayForJitExit, ABIArg::GPR: unexpected type");
} break; #ifdef JS_CODEGEN_REGISTER_PAIR case ABIArg::GPR_PAIR: if (type == MIRType::Int64) { // FuncType::canHaveJitExit should prevent this. Also see comments // at GenerateBigIntInitialization.
MOZ_CRASH("Should not happen");
} else {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.26 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.