/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmInstance-inl.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include <algorithm>
#include <utility>
#include "jsmath.h"
#include "builtin/String.h"
#include "gc/Barrier.h"
#include "gc/Marking.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassemble.h"
#include "jit/JitCommon.h"
#include "jit/JitRuntime.h"
#include "jit/Registers.h"
#include "js/ForOfIterator.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/Stack.h" // JS::NativeStackLimitMin
#include "util/StringBuilder.h"
#include "util/Text.h"
#include "util/Unicode.h"
#include "vm/ArrayBufferObject.h"
#include "vm/BigIntType.h"
#include "vm/Compartment.h"
#include "vm/ErrorObject.h"
#include "vm/Interpreter.h"
#include "vm/Iteration.h"
#include "vm/JitActivation.h"
#include "vm/JSFunction.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmDebug.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmFeatures.h"
#include "wasm/WasmHeuristics.h"
#include "wasm/WasmInitExpr.h"
#include "wasm/WasmJS.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmModuleTypes.h"
#include "wasm/WasmPI.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValType.h"
#include "wasm/WasmValue.h"
#include "gc/Marking-inl.h"
#include "gc/StoreBuffer-inl.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/JSObject-inl.h"
#include "wasm/WasmGcObject-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::CheckedUint32;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
// Instance must be aligned at least as much as any of the integer, float,
// or SIMD values that we'd like to store in it.
static_assert(alignof(Instance) >=
std::max(
sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent)));
// The globalArea must be aligned at least as much as an instance. This is
// guaranteed to be sufficient for all data types we care about, including
// SIMD values. See the above assertion.
static_assert(Instance::offsetOfData() % alignof(Instance) == 0);
// We want the memory base to be the first field, and accessible with no
// offset. This incidentally is also an assertion that there is no superclass
// with fields.
static_assert(Instance::offsetOfMemory0Base() == 0);
// We want instance fields that are commonly accessed by the JIT to have
// compact encodings. A limit of less than 128 bytes is chosen to fit within
// the signed 8-bit mod r/m x86 encoding.
static_assert(Instance::offsetOfLastCommonJitField() < 128);
//////////////////////////////////////////////////////////////////////////////
//
// Functions and invocation.
FuncDefInstanceData* Instance::funcDefInstanceData(uint32_t funcIndex)
const {
MOZ_ASSERT(funcIndex >= codeMeta().numFuncImports);
uint32_t funcDefIndex = funcIndex - codeMeta().numFuncImports;
FuncDefInstanceData* instanceData =
(FuncDefInstanceData*)(data() + codeMeta().funcDefsOffsetStart);
return &instanceData[funcDefIndex];
}
TypeDefInstanceData* Instance::typeDefInstanceData(uint32_t typeIndex)
const {
TypeDefInstanceData* instanceData =
(TypeDefInstanceData*)(data() + codeMeta().typeDefsOffsetStart);
return &instanceData[typeIndex];
}
const void* Instance::addressOfGlobalCell(
const GlobalDesc& global)
const {
const void* cell = data() + global.offset();
// Indirect globals store a pointer to their cell in the instance global
// data. Dereference it to find the real cell.
if (global.isIndirect()) {
cell = *(
const void**)cell;
}
return cell;
}
FuncImportInstanceData& Instance::funcImportInstanceData(uint32_t funcIndex) {
MOZ_ASSERT(funcIndex < codeMeta().numFuncImports);
FuncImportInstanceData* instanceData =
(FuncImportInstanceData*)(data() + codeMeta().funcImportsOffsetStart);
return instanceData[funcIndex];
}
FuncExportInstanceData& Instance::funcExportInstanceData(
uint32_t funcExportIndex) {
FuncExportInstanceData* instanceData =
(FuncExportInstanceData*)(data() + codeMeta().funcExportsOffsetStart);
return instanceData[funcExportIndex];
}
MemoryInstanceData& Instance::memoryInstanceData(uint32_t memoryIndex)
const {
MemoryInstanceData* instanceData =
(MemoryInstanceData*)(data() + codeMeta().memoriesOffsetStart);
return instanceData[memoryIndex];
}
TableInstanceData& Instance::tableInstanceData(uint32_t tableIndex)
const {
TableInstanceData* instanceData =
(TableInstanceData*)(data() + codeMeta().tablesOffsetStart);
return instanceData[tableIndex];
}
TagInstanceData& Instance::tagInstanceData(uint32_t tagIndex)
const {
TagInstanceData* instanceData =
(TagInstanceData*)(data() + codeMeta().tagsOffsetStart);
return instanceData[tagIndex];
}
static bool UnpackResults(JSContext* cx,
const ValTypeVector& resultTypes,
const Maybe<
char*> stackResultsArea, uint64_t* argv,
MutableHandleValue rval) {
if (!stackResultsArea) {
MOZ_ASSERT(resultTypes.length() <= 1);
// Result is either one scalar value to unpack to a wasm value, or
// an ignored value for a zero-valued function.
if (resultTypes.length() == 1) {
return ToWebAssemblyValue(cx, rval, resultTypes[0], argv,
true);
}
return true;
}
MOZ_ASSERT(stackResultsArea.isSome());
Rooted<ArrayObject*> array(cx);
if (!IterableToArray(cx, rval, &array)) {
return false;
}
if (resultTypes.length() != array->length()) {
UniqueChars expected(JS_smprintf(
"%zu", resultTypes.length()));
UniqueChars got(JS_smprintf(
"%u", array->length()));
if (!expected || !got) {
ReportOutOfMemory(cx);
return false;
}
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
got.get());
return false;
}
DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
ABIResultIter iter(ResultType::Vector(resultTypes));
// The values are converted in the order they are pushed on the
// abstract WebAssembly stack; switch to iterate in push order.
while (!iter.done()) {
iter.next();
}
DebugOnly<
bool> seenRegisterResult =
false;
for (iter.switchToPrev(); !iter.done(); iter.prev()) {
const ABIResult& result = iter.cur();
MOZ_ASSERT(!seenRegisterResult);
// Use rval as a scratch area to hold the extracted result.
rval.set(array->getDenseElement(iter.index()));
if (result.inRegister()) {
// Currently, if a function type has results, there can be only
// one register result. If there is only one result, it is
// returned as a scalar and not an iterable, so we don't get here.
// If there are multiple results, we extract the register result
// and set `argv[0]` set to the extracted result, to be returned by
// register in the stub. The register result follows any stack
// results, so this preserves conversion order.
if (!ToWebAssemblyValue(cx, rval, result.type(), argv,
true)) {
return false;
}
seenRegisterResult =
true;
continue;
}
uint32_t result_size = result.size();
MOZ_ASSERT(result_size == 4 || result_size == 8);
#ifdef DEBUG
if (previousOffset == ~(uint64_t)0) {
previousOffset = (uint64_t)result.stackOffset();
}
else {
MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
(uint64_t)result.stackOffset());
previousOffset -= (uint64_t)result_size;
}
#endif
char* loc = stackResultsArea.value() + result.stackOffset();
if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
return false;
}
}
return true;
}
bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
unsigned argc, uint64_t* argv) {
AssertRealmUnchanged aru(cx);
const FuncImport& fi = code().funcImport(funcImportIndex);
const FuncType& funcType = codeMeta().getFuncType(funcImportIndex);
ArgTypeVector argTypes(funcType);
InvokeArgs args(cx);
if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
return false;
}
if (funcType.hasUnexposableArgOrRet()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_VAL_TYPE);
return false;
}
MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
Maybe<
char*> stackResultPointer;
size_t lastBoxIndexPlusOne = 0;
{
JS::AutoAssertNoGC nogc;
for (size_t i = 0; i < argc; i++) {
const void* rawArgLoc = &argv[i];
if (argTypes.isSyntheticStackResultPointerArg(i)) {
stackResultPointer = Some(*(
char**)rawArgLoc);
continue;
}
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
// Avoid boxes creation not to trigger GC.
if (ToJSValueMayGC(type)) {
lastBoxIndexPlusOne = i + 1;
continue;
}
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
}
// Visit arguments that need to perform allocation in a second loop
// after the rest of arguments are converted.
for (size_t i = 0; i < lastBoxIndexPlusOne; i++) {
if (argTypes.isSyntheticStackResultPointerArg(i)) {
continue;
}
const void* rawArgLoc = &argv[i];
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
if (!ToJSValueMayGC(type)) {
continue;
}
MOZ_ASSERT(!type.isRefRepr());
// The conversions are safe here because source values are not references
// and will not be moved.
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
FuncImportInstanceData& import = funcImportInstanceData(funcImportIndex);
Rooted<JSObject*> importCallable(cx, import.callable);
MOZ_ASSERT(cx->realm() == importCallable->nonCCWRealm());
RootedValue fval(cx, ObjectValue(*importCallable));
RootedValue thisv(cx, UndefinedValue());
RootedValue rval(cx);
if (!Call(cx, fval, thisv, args, &rval)) {
return false;
}
if (!UnpackResults(cx, funcType.results(), stackResultPointer, argv, &rval)) {
return false;
}
if (!JitOptions.enableWasmJitExit) {
return true;
}
// The import may already have become optimized.
void* jitExitCode =
code().sharedStubs().segment->base() + fi.jitExitCodeOffset();
if (import.code == jitExitCode) {
return true;
}
if (!importCallable->is<JSFunction>()) {
return true;
}
// Test if the function is JIT compiled.
if (!importCallable->as<JSFunction>().hasBytecode()) {
return true;
}
JSScript* script = importCallable->as<JSFunction>().nonLazyScript();
if (!script->hasJitScript()) {
return true;
}
// Skip if the function does not have a signature that allows for a JIT exit.
if (!funcType.canHaveJitExit()) {
return true;
}
// Let's optimize it!
import.code = jitExitCode;
return true;
}
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
int32_t argc, uint64_t* argv) {
JSContext* cx = instance->cx();
#ifdef ENABLE_WASM_JSPI
if (IsSuspendableStackActive(cx)) {
struct ImportCallData {
Instance* instance;
int32_t funcImportIndex;
int32_t argc;
uint64_t* argv;
static bool Call(ImportCallData* data) {
Instance* instance = data->instance;
JSContext* cx = instance->cx();
return instance->callImport(cx, data->funcImportIndex, data->argc,
data->argv);
}
} data = {instance, funcImportIndex, argc, argv};
return CallOnMainStack(
cx,
reinterpret_cast<CallOnMainStackFn>(ImportCallData::Call), &data);
}
#endif
return instance->callImport(cx, funcImportIndex, argc, argv);
}
//////////////////////////////////////////////////////////////////////////////
//
// Atomic operations and shared memory.
template <
typename ValT,
typename PtrT>
static int32_t PerformWait(Instance* instance, uint32_t memoryIndex,
PtrT byteOffset, ValT value, int64_t timeout_ns) {
JSContext* cx = instance->cx();
if (!instance->memory(memoryIndex)->isShared()) {
ReportTrapError(cx, JSMSG_WASM_NONSHARED_WAIT);
return -1;
}
if (byteOffset & (
sizeof(ValT) - 1)) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset +
sizeof(ValT) >
instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (timeout_ns >= 0) {
timeout = mozilla::Some(
mozilla::TimeDuration::FromMicroseconds(
double(timeout_ns) / 1000));
}
MOZ_ASSERT(byteOffset <= SIZE_MAX,
"Bounds check is broken");
switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), value, timeout)) {
case FutexThread::WaitResult::OK:
return 0;
case FutexThread::WaitResult::NotEqual:
return 1;
case FutexThread::WaitResult::TimedOut:
return 2;
case FutexThread::WaitResult::Error:
return -1;
default:
MOZ_CRASH();
}
}
/* static */ int32_t Instance::wait_i32_m32(Instance* instance,
uint32_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i32_m64(Instance* instance,
uint64_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m32(Instance* instance,
uint32_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m64(Instance* instance,
uint64_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
template <
typename PtrT>
static int32_t PerformWake(Instance* instance, PtrT byteOffset, int32_t count,
uint32_t memoryIndex) {
JSContext* cx = instance->cx();
// The alignment guard is not in the wasm spec as of 2017-11-02, but is
// considered likely to appear, as 4-byte alignment is required for WAKE by
// the spec's validation algorithm.
if (byteOffset & 3) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset >= instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
if (!instance->memory(memoryIndex)->isShared()) {
return 0;
}
MOZ_ASSERT(byteOffset <= SIZE_MAX,
"Bounds check is broken");
int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), int64_t(count));
if (woken > INT32_MAX) {
ReportTrapError(cx, JSMSG_WASM_WAKE_OVERFLOW);
return -1;
}
return int32_t(woken);
}
/* static */ int32_t Instance::wake_m32(Instance* instance, uint32_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
/* static */ int32_t Instance::wake_m64(Instance* instance, uint64_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk memory operations.
/* static */ uint32_t Instance::memoryGrow_m32(Instance* instance,
uint32_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM32.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
// It is safe to cast to uint32_t, as all limits have been checked inside
// grow() and will not have been exceeded for a 32-bit memory.
uint32_t ret = uint32_t(WasmMemoryObject::grow(memory, uint64_t(delta), cx));
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint64_t Instance::memoryGrow_m64(Instance* instance,
uint64_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM64.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
uint64_t ret = WasmMemoryObject::grow(memory, delta, cx);
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint32_t Instance::memorySize_m32(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM32.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
// Ensure that the memory size is no more than 4GiB.
MOZ_ASSERT(pages <= Pages(MaxMemory32PagesValidation));
#endif
return uint32_t(pages.value());
}
/* static */ uint64_t Instance::memorySize_m64(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM64.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
MOZ_ASSERT(pages <= Pages(MaxMemory64PagesValidation));
#endif
return pages.value();
}
template <
typename PointerT,
typename CopyFuncT,
typename IndexT>
inline int32_t WasmMemoryCopy(JSContext* cx, PointerT dstMemBase,
PointerT srcMemBase, size_t dstMemLen,
size_t srcMemLen, IndexT dstByteOffset,
IndexT srcByteOffset, IndexT len,
CopyFuncT memMove) {
if (!MemoryBoundsCheck(dstByteOffset, len, dstMemLen) ||
!MemoryBoundsCheck(srcByteOffset, len, srcMemLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
memMove(dstMemBase + uintptr_t(dstByteOffset),
srcMemBase + uintptr_t(srcByteOffset), size_t(len));
return 0;
}
template <
typename I>
inline int32_t MemoryCopy(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryCopy(cx, memBase, memBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, memmove);
}
template <
typename I>
inline int32_t MemoryCopyShared(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
SharedMem<uint8_t*> sharedMemBase = SharedMem<uint8_t*>::shared(memBase);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, sharedMemBase, sharedMemBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
/* static */ int32_t Instance::memCopy_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset,
uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopy_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset,
uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
// Dynamic dispatch to get the length of a memory given just the base and
// whether it is shared or not. This is only used for memCopy_any, where being
// slower is okay.
static inline size_t GetVolatileByteLength(uint8_t* memBase,
bool isShared) {
if (isShared) {
return WasmSharedArrayRawBuffer::fromDataPtr(memBase)->volatileByteLength();
}
return WasmArrayRawBuffer::fromDataPtr(memBase)->byteLength();
}
/* static */ int32_t Instance::memCopy_any(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint32_t dstMemIndex,
uint32_t srcMemIndex) {
MOZ_ASSERT(SASigMemCopyAny.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const MemoryInstanceData& dstMemory =
instance->memoryInstanceData(dstMemIndex);
const MemoryInstanceData& srcMemory =
instance->memoryInstanceData(srcMemIndex);
uint8_t* dstMemBase = dstMemory.base;
uint8_t* srcMemBase = srcMemory.base;
size_t dstMemLen = GetVolatileByteLength(dstMemBase, dstMemory.isShared);
size_t srcMemLen = GetVolatileByteLength(srcMemBase, srcMemory.isShared);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, SharedMem<uint8_t*>::shared(dstMemBase),
SharedMem<uint8_t*>::shared(srcMemBase), dstMemLen, srcMemLen,
dstByteOffset, srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
template <
typename T,
typename F,
typename I>
inline int32_t WasmMemoryFill(JSContext* cx, T memBase, size_t memLen,
I byteOffset, uint32_t value, I len, F memSet) {
if (!MemoryBoundsCheck(byteOffset, len, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
memSet(memBase + uintptr_t(byteOffset),
int(value), size_t(len));
return 0;
}
template <
typename I>
inline int32_t MemoryFill(JSContext* cx, I byteOffset, uint32_t value, I len,
uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryFill(cx, memBase, memLen, byteOffset, value, len, memset);
}
template <
typename I>
inline int32_t MemoryFillShared(JSContext* cx, I byteOffset, uint32_t value,
I len, uint8_t* memBase) {
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
return WasmMemoryFill(cx, SharedMem<uint8_t*>::shared(memBase), memLen,
byteOffset, value, len,
AtomicOperations::memsetSafeWhenRacy);
}
/* static */ int32_t Instance::memFill_m32(Instance* instance,
uint32_t byteOffset, uint32_t value,
uint32_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t value, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFill_m64(Instance* instance,
uint64_t byteOffset, uint32_t value,
uint64_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m64(Instance* instance,
uint64_t byteOffset,
uint32_t value, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
static bool BoundsCheckInit(uint32_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit > memLen || srcOffsetLimit > segLen;
}
static bool BoundsCheckInit(uint64_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = dstOffset + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit < dstOffset || dstOffsetLimit > memLen ||
srcOffsetLimit > segLen;
}
template <
typename I>
static int32_t MemoryInit(JSContext* cx, Instance* instance,
uint32_t memoryIndex, I dstOffset, uint32_t srcOffset,
uint32_t len,
const DataSegment* maybeSeg) {
if (!maybeSeg) {
if (len == 0 && srcOffset == 0) {
return 0;
}
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
const DataSegment& seg = *maybeSeg;
MOZ_RELEASE_ASSERT(!seg.active());
const uint32_t segLen = seg.bytes.length();
WasmMemoryObject* mem = instance->memory(memoryIndex);
const size_t memLen = mem->volatileMemoryLength();
// We are proposing to copy
//
// seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
// to
// memoryBase[ dstOffset .. dstOffset + len - 1 ]
if (BoundsCheckInit(dstOffset, srcOffset, len, memLen, segLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required read/write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memcpySafeWhenRacy(
dataPtr + uintptr_t(dstOffset), (uint8_t*)seg.bytes.begin() + srcOffset,
len);
}
else {
uint8_t* rawBuf = dataPtr.unwrap(
/*Unshared*/);
memcpy(rawBuf + uintptr_t(dstOffset),
(
const char*)seg.bytes.begin() + srcOffset, len);
}
return 0;
}
/* static */ int32_t Instance::memInit_m32(Instance* instance,
uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM32.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
/* static */ int32_t Instance::memInit_m64(Instance* instance,
uint64_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM64.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk table operations.
/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t dstTableIndex,
uint32_t srcTableIndex) {
MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
const SharedTable& srcTable = instance->tables()[srcTableIndex];
uint32_t srcTableLen = srcTable->length();
const SharedTable& dstTable = instance->tables()[dstTableIndex];
uint32_t dstTableLen = dstTable->length();
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
bool isOOM =
false;
if (&srcTable == &dstTable && dstOffset > srcOffset) {
for (uint32_t i = len; i > 0; i--) {
if (!dstTable->copy(cx, *srcTable, dstOffset + (i - 1),
srcOffset + (i - 1))) {
isOOM =
true;
break;
}
}
}
else if (&srcTable == &dstTable && dstOffset == srcOffset) {
// No-op
}
else {
for (uint32_t i = 0; i < len; i++) {
if (!dstTable->copy(cx, *srcTable, dstOffset + i, srcOffset + i)) {
isOOM =
true;
break;
}
}
}
if (isOOM) {
return -1;
}
return 0;
}
#ifdef DEBUG
static bool AllSegmentsArePassive(
const DataSegmentVector& vec) {
for (
const DataSegment* seg : vec) {
if (seg->active()) {
return false;
}
}
return true;
}
#endif
bool Instance::initSegments(JSContext* cx,
const DataSegmentVector& dataSegments,
const ModuleElemSegmentVector& elemSegments) {
MOZ_ASSERT_IF(codeMeta().memories.length() == 0,
AllSegmentsArePassive(dataSegments));
Rooted<WasmInstanceObject*> instanceObj(cx, object());
// Write data/elem segments into memories/tables.
for (
const ModuleElemSegment& seg : elemSegments) {
if (seg.active()) {
RootedVal offsetVal(cx);
if (!seg.offset().evaluate(cx, instanceObj, &offsetVal)) {
return false;
// OOM
}
const wasm::Table* table = tables()[seg.tableIndex];
uint64_t offset = table->addressType() == AddressType::I32
? offsetVal.get().i32()
: offsetVal.get().i64();
uint64_t tableLength = table->length();
if (offset > tableLength || tableLength - offset < seg.numElements()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
if (!initElems(cx, seg.tableIndex, seg, offset)) {
return false;
// OOM
}
}
}
for (
const DataSegment* seg : dataSegments) {
if (!seg->active()) {
continue;
}
Rooted<
const WasmMemoryObject*> memoryObj(cx, memory(seg->memoryIndex));
size_t memoryLength = memoryObj->volatileMemoryLength();
uint8_t* memoryBase =
memoryObj->buffer().dataPointerEither().unwrap(
/* memcpy */);
RootedVal offsetVal(cx);
if (!seg->offset().evaluate(cx, instanceObj, &offsetVal)) {
return false;
// OOM
}
uint64_t offset = memoryObj->addressType() == AddressType::I32
? offsetVal.get().i32()
: offsetVal.get().i64();
uint32_t count = seg->bytes.length();
if (offset > memoryLength || memoryLength - offset < count) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
memcpy(memoryBase + uintptr_t(offset), seg->bytes.begin(), count);
}
return true;
}
bool Instance::initElems(JSContext* cx, uint32_t tableIndex,
const ModuleElemSegment& seg, uint32_t dstOffset) {
Table& table = *tables_[tableIndex];
MOZ_ASSERT(dstOffset <= table.length());
MOZ_ASSERT(seg.numElements() <= table.length() - dstOffset);
if (seg.numElements() == 0) {
return true;
}
if (table.isFunction() &&
seg.encoding == ModuleElemSegment::Encoding::Indices) {
// Initialize this table of functions without creating any intermediate
// JSFunctions.
bool ok = iterElemsFunctions(
seg, [&](uint32_t i,
void* code, Instance* instance) ->
bool {
table.setFuncRef(dstOffset + i, code, instance);
return true;
});
if (!ok) {
return false;
}
}
else {
bool ok = iterElemsAnyrefs(cx, seg, [&](uint32_t i, AnyRef ref) ->
bool {
table.setRef(dstOffset + i, ref);
return true;
});
if (!ok) {
return false;
}
}
return true;
}
template <
typename F>
bool Instance::iterElemsFunctions(
const ModuleElemSegment& seg,
const F& onFunc) {
// In the future, we could theoretically get function data (instance + code
// pointer) from segments with the expression encoding without creating
// JSFunctions. But that is not how it works today. We can only bypass the
// creation of JSFunctions for the index encoding.
MOZ_ASSERT(seg.encoding == ModuleElemSegment::Encoding::Indices);
if (seg.numElements() == 0) {
return true;
}
const FuncImportVector& funcImports = code().funcImports();
for (uint32_t i = 0; i < seg.numElements(); i++) {
uint32_t elemFuncIndex = seg.elemIndices[i];
if (elemFuncIndex < funcImports.length()) {
FuncImportInstanceData& import = funcImportInstanceData(elemFuncIndex);
MOZ_ASSERT(import.callable->isCallable());
if (import.callable->is<JSFunction>()) {
JSFunction* fun = &import.callable->as<JSFunction>();
if (!codeMeta().funcImportsAreJS && fun->isWasm()) {
// This element is a wasm function imported from another
// instance. To preserve the === function identity required by
// the JS embedding spec, we must get the imported function's
// underlying CodeRange.funcCheckedCallEntry and Instance so that
// future Table.get()s produce the same function object as was
// imported.
if (!onFunc(i, fun->wasmCheckedCallEntry(), &fun->wasmInstance())) {
return false;
}
continue;
}
}
}
const CodeRange* codeRange;
uint8_t* codeBase;
code().funcCodeRange(elemFuncIndex, &codeRange, &codeBase);
if (!onFunc(i, codeBase + codeRange->funcCheckedCallEntry(),
this)) {
return false;
}
}
return true;
}
template <
typename F>
bool Instance::iterElemsAnyrefs(JSContext* cx,
const ModuleElemSegment& seg,
const F& onAnyRef) {
if (seg.numElements() == 0) {
return true;
}
switch (seg.encoding) {
case ModuleElemSegment::Encoding::Indices: {
// The only types of indices that exist right now are function indices, so
// this code is specialized to functions.
RootedFunction fun(cx);
for (uint32_t i = 0; i < seg.numElements(); i++) {
uint32_t funcIndex = seg.elemIndices[i];
if (!getExportedFunction(cx, funcIndex, &fun) ||
!onAnyRef(i, AnyRef::fromJSObject(*fun.get()))) {
return false;
}
}
break;
}
case ModuleElemSegment::Encoding::Expressions: {
Rooted<WasmInstanceObject*> instanceObj(cx, object());
const ModuleElemSegment::Expressions& exprs = seg.elemExpressions;
UniqueChars error;
// The offset is a dummy because the expression has already been
// validated.
Decoder d(exprs.exprBytes.begin(), exprs.exprBytes.end(), 0, &error);
for (uint32_t i = 0; i < seg.numElements(); i++) {
RootedVal result(cx);
if (!InitExpr::decodeAndEvaluate(cx, instanceObj, d, seg.elemType,
&result)) {
MOZ_ASSERT(!error);
// The only possible failure should be OOM.
return false;
}
// We would need to root this AnyRef if we were doing anything other
// than storing it.
AnyRef ref = result.get().ref();
if (!onAnyRef(i, ref)) {
return false;
}
}
break;
}
default:
MOZ_CRASH(
"unknown encoding type for element segment");
}
return true;
}
/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
const InstanceElemSegment& seg = instance->passiveElemSegments_[segIndex];
const uint32_t segLen = seg.length();
Table& table = *instance->tables()[tableIndex];
const uint32_t tableLen = table.length();
// We are proposing to copy
//
// seg[ srcOffset .. srcOffset + len - 1 ]
// to
// tableBase[ dstOffset .. dstOffset + len - 1 ]
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
for (size_t i = 0; i < len; i++) {
table.setRef(dstOffset + i, seg[srcOffset + i]);
}
return 0;
}
/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
void* value, uint32_t len,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
// Bounds check and deal with arithmetic overflow.
uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
if (offsetLimit > table.length()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
template <
typename I>
static bool WasmDiscardCheck(Instance* instance, I byteOffset, I byteLen,
size_t memLen,
bool shared) {
JSContext* cx = instance->cx();
if (byteOffset % wasm::PageSize != 0 || byteLen % wasm::PageSize != 0) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return false;
}
if (!MemoryBoundsCheck(byteOffset, byteLen, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
return true;
}
template <
typename I>
static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen,
false)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
}
template <
typename I>
static int32_t MemDiscardShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen,
true)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
}
/* static */ int32_t Instance::memDiscard_m32(Instance* instance,
uint32_t byteOffset,
uint32_t byteLen,
uint8_t* memBase) {
return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscard_m64(Instance* instance,
uint64_t byteOffset,
uint64_t byteLen,
uint8_t* memBase) {
return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscardShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t byteLen,
uint8_t* memBase) {
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscardShared_m64(Instance* instance,
uint64_t byteOffset,
uint64_t byteLen,
uint8_t* memBase) {
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ void* Instance::tableGet(Instance* instance, uint32_t address,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
const Table& table = *instance->tables()[tableIndex];
if (address >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return AnyRef::invalid().forCompiledCode();
}
switch (table.repr()) {
case TableRepr::Ref:
return table.getAnyRef(address).forCompiledCode();
case TableRepr::Func: {
MOZ_RELEASE_ASSERT(!table.isAsmJS());
RootedFunction fun(cx);
if (!table.getFuncRef(cx, address, &fun)) {
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
}
MOZ_CRASH(
"Should not happen");
}
/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
uint32_t delta, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
JSContext* cx = instance->cx();
RootedAnyRef ref(cx, AnyRef::fromCompiledCode(initValue));
Table& table = *instance->tables()[tableIndex];
uint32_t oldSize = table.grow(delta);
if (oldSize != uint32_t(-1) && initValue != nullptr) {
table.fillUninitialized(oldSize, delta, ref, cx);
}
#ifdef DEBUG
if (!table.elemType().isNullable()) {
table.assertRangeNotNull(oldSize, delta);
}
#endif // DEBUG
return oldSize;
}
/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t address,
void* value, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
if (address >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.setAnyRef(address, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(address, 1, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
/* static */ uint32_t Instance::tableSize(Instance* instance,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
Table& table = *instance->tables()[tableIndex];
return table.length();
}
/* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
RootedFunction exportedFunc(cx);
if (!instance->getExportedFunction(cx, funcIndex, &exportedFunc)) {
MOZ_ASSERT(cx->isThrowingOutOfMemory());
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(exportedFunc.get()).forCompiledCode();
}
//////////////////////////////////////////////////////////////////////////////
//
// Segment management.
/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigElemDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
instance->passiveElemSegments_[segIndex].clearAndFree();
return 0;
}
/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
if (!instance->passiveDataSegments_[segIndex]) {
return 0;
}
SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
MOZ_RELEASE_ASSERT(!segRefPtr->active());
// Drop this instance's reference to the DataSegment so it can be released.
segRefPtr = nullptr;
return 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// AnyRef support.
/* static */ void Instance::postBarrier(Instance* instance, void** location) {
MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
instance->storeBuffer_->putWasmAnyRef(
reinterpret_cast<wasm::AnyRef*>(location));
}
/* static */ void Instance::postBarrierPrecise(Instance* instance,
void** location,
void* prev) {
MOZ_ASSERT(SASigPostBarrierPrecise.failureMode == FailureMode::Infallible);
postBarrierPreciseWithOffset(instance, location,
/*offset=*/0, prev);
}
/* static */ void Instance::postBarrierPreciseWithOffset(Instance* instance,
void** base,
uint32_t offset,
void* prev) {
MOZ_ASSERT(SASigPostBarrierPreciseWithOffset.failureMode ==
FailureMode::Infallible);
MOZ_ASSERT(base);
wasm::AnyRef* location = (wasm::AnyRef*)(uintptr_t(base) + size_t(offset));
wasm::AnyRef next = *location;
InternalBarrierMethods<AnyRef>::postBarrier(
location, wasm::AnyRef::fromCompiledCode(prev), next);
}
//////////////////////////////////////////////////////////////////////////////
//
// GC and exception handling support.
/* static */
template <
bool ZeroFields>
void* Instance::structNewIL(Instance* instance,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT((ZeroFields ? SASigStructNewIL_true : SASigStructNewIL_false)
.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new struct will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmStructObject::createStructIL<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap());
}
template void* Instance::structNewIL<
true>(Instance* instance,
TypeDefInstanceData* typeDefData);
template void* Instance::structNewIL<
false>(Instance* instance,
TypeDefInstanceData* typeDefData);
/* static */
template <
bool ZeroFields>
void* Instance::structNewOOL(Instance* instance,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT((ZeroFields ? SASigStructNewOOL_true : SASigStructNewOOL_false)
.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new struct will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmStructObject::createStructOOL<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap());
}
template void* Instance::structNewOOL<
true>(Instance* instance,
TypeDefInstanceData* typeDefData);
template void* Instance::structNewOOL<
false>(Instance* instance,
TypeDefInstanceData* typeDefData);
/* static */
template <
bool ZeroFields>
void* Instance::arrayNew(Instance* instance, uint32_t numElements,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT(
(ZeroFields ? SASigArrayNew_true : SASigArrayNew_false).failureMode ==
FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new array will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmArrayObject::createArray<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements);
}
template void* Instance::arrayNew<
true>(Instance* instance,
uint32_t numElements,
TypeDefInstanceData* typeDefData);
template void* Instance::arrayNew<
false>(Instance* instance,
uint32_t numElements,
TypeDefInstanceData* typeDefData);
// Copies from a data segment into a wasm GC array. Performs the necessary
// bounds checks, accounting for the array's element size. If this function
// returns false, it has already reported a trap error. Null arrays should
// be handled in the caller.
static bool ArrayCopyFromData(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
uint32_t arrayIndex,
const DataSegment* seg,
uint32_t segByteOffset, uint32_t numElements) {
uint32_t elemSize = arrayObj->
typeDef().arrayType().elementType().size();
// Compute the number of bytes to copy, ensuring it's below 2^32.
CheckedUint32 numBytesToCopy =
CheckedUint32(numElements) * CheckedUint32(elemSize);
if (!numBytesToCopy.isValid()) {
// Because the request implies that 2^32 or more bytes are to be copied.
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
// Range-check the copy. The obvious thing to do is to compute the offset
// of the last byte to copy, but that would cause underflow in the
// zero-length-and-zero-offset case. Instead, compute that value plus one;
// in other words the offset of the first byte *not* to copy.
CheckedUint32 lastByteOffsetPlus1 =
CheckedUint32(segByteOffset) + numBytesToCopy;
CheckedUint32 numBytesAvailable(seg->bytes.length());
if (!lastByteOffsetPlus1.isValid() || !numBytesAvailable.isValid() ||
lastByteOffsetPlus1.value() > numBytesAvailable.value()) {
// Because the last byte to copy doesn't exist inside `seg->bytes`.
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
// Range check the destination array.
uint64_t dstNumElements = uint64_t(arrayObj->numElements_);
if (uint64_t(arrayIndex) + uint64_t(numElements) > dstNumElements) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
// This value is safe due to the previous range check on number of elements.
// (We know the full result fits in the array, and we can't overflow uint64_t
// since elemSize caps out at 16.)
uint64_t dstByteOffset = uint64_t(arrayIndex) * uint64_t(elemSize);
// Because `numBytesToCopy` is an in-range `CheckedUint32`, the cast to
// `size_t` is safe even on a 32-bit target.
if (numElements != 0) {
memcpy(&arrayObj->data_[dstByteOffset], &seg->bytes[segByteOffset],
size_t(numBytesToCopy.value()));
}
return true;
}
// Copies from an element segment into a wasm GC array. Performs the necessary
// bounds checks, accounting for the array's element size. If this function
// returns false, it has already reported a trap error.
static bool ArrayCopyFromElem(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
uint32_t arrayIndex,
const InstanceElemSegment& seg,
uint32_t segOffset, uint32_t numElements) {
// Range-check the copy. As in ArrayCopyFromData, compute the index of the
// last element to copy, plus one.
CheckedUint32 lastIndexPlus1 =
CheckedUint32(segOffset) + CheckedUint32(numElements);
CheckedUint32 numElemsAvailable(seg.length());
if (!lastIndexPlus1.isValid() || !numElemsAvailable.isValid() ||
lastIndexPlus1.value() > numElemsAvailable.value()) {
// Because the last element to copy doesn't exist inside the segment.
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
// Range check the destination array.
uint64_t dstNumElements = uint64_t(arrayObj->numElements_);
if (uint64_t(arrayIndex) + uint64_t(numElements) > dstNumElements) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
GCPtr<AnyRef>* dst =
reinterpret_cast<GCPtr<AnyRef>*>(arrayObj->data_);
for (uint32_t i = 0; i < numElements; i++) {
dst[arrayIndex + i] = seg[segOffset + i];
}
return true;
}
// Creates an array (WasmArrayObject) containing `numElements` of type
// described by `typeDef`. Initialises it with data copied from the data
// segment whose index is `segIndex`, starting at byte offset `segByteOffset`
// in the segment. Traps if the segment doesn't hold enough bytes to fill the
// array.
/* static */ void* Instance::arrayNewData(Instance* instance,
uint32_t segByteOffset,
uint32_t numElements,
TypeDefInstanceData* typeDefData,
uint32_t segIndex) {
MOZ_ASSERT(SASigArrayNewData.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// Check that the data segment is valid for use.
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
const DataSegment* seg = instance->passiveDataSegments_[segIndex];
// `seg` will be nullptr if the segment has already been 'data.drop'ed
// (either implicitly in the case of 'active' segments during instantiation,
// or explicitly by the data.drop instruction.) In that case we can
// continue only if there's no need to copy any data out of it.
if (!seg && (numElements != 0 || segByteOffset != 0)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return nullptr;
}
// At this point, if `seg` is null then `numElements` and `segByteOffset`
// are both zero.
Rooted<WasmArrayObject*> arrayObj(
cx,
WasmArrayObject::createArray<
true>(
cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements));
if (!arrayObj) {
// WasmArrayObject::createArray will have reported OOM.
return nullptr;
}
MOZ_RELEASE_ASSERT(arrayObj->is<WasmArrayObject>());
if (!seg) {
// A zero-length array was requested and has been created, so we're done.
return arrayObj;
}
if (!ArrayCopyFromData(cx, arrayObj, 0, seg, segByteOffset, numElements)) {
// Trap errors will be reported by ArrayCopyFromData.
return nullptr;
}
return arrayObj;
}
// This is almost identical to ::arrayNewData, apart from the final part that
// actually copies the data. It creates an array (WasmArrayObject)
// containing `numElements` of type described by `typeDef`. Initialises it
// with data copied from the element segment whose index is `segIndex`,
// starting at element number `srcOffset` in the segment. Traps if the
// segment doesn't hold enough elements to fill the array.
/* static */ void* Instance::arrayNewElem(Instance* instance,
uint32_t srcOffset,
uint32_t numElements,
TypeDefInstanceData* typeDefData,
uint32_t segIndex) {
MOZ_ASSERT(SASigArrayNewElem.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// Check that the element segment is valid for use.
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
const InstanceElemSegment& seg = instance->passiveElemSegments_[segIndex];
const TypeDef*
typeDef = typeDefData->
typeDef;
// Any data coming from an element segment will be an AnyRef. Writes into
// array memory are done with raw pointers, so we must ensure here that the
// destination size is correct.
MOZ_RELEASE_ASSERT(typeDef->arrayType().elementType().size() ==
sizeof(AnyRef));
Rooted<WasmArrayObject*> arrayObj(
cx,
WasmArrayObject::createArray<
true>(
cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements));
if (!arrayObj) {
// WasmArrayObject::createArray will have reported OOM.
return nullptr;
}
MOZ_RELEASE_ASSERT(arrayObj->is<WasmArrayObject>());
if (!ArrayCopyFromElem(cx, arrayObj, 0, seg, srcOffset, numElements)) {
// Trap errors will be reported by ArrayCopyFromElems.
return nullptr;
}
return arrayObj;
}
// Copies a range of the data segment `segIndex` into an array
// (WasmArrayObject), starting at offset `segByteOffset` in the data segment and
// index `index` in the array. `numElements` is the length of the copy in array
// elements, NOT bytes - the number of bytes will be computed based on the type
// of the array.
//
// Traps if accesses are out of bounds for either the data segment or the array,
// or if the array object is null.
/* static */ int32_t Instance::arrayInitData(Instance* instance, void* array,
uint32_t index,
uint32_t segByteOffset,
uint32_t numElements,
uint32_t segIndex) {
MOZ_ASSERT(SASigArrayInitData.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
// Check that the data segment is valid for use.
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
const DataSegment* seg = instance->passiveDataSegments_[segIndex];
// `seg` will be nullptr if the segment has already been 'data.drop'ed
// (either implicitly in the case of 'active' segments during instantiation,
// or explicitly by the data.drop instruction.) In that case we can
// continue only if there's no need to copy any data out of it.
if (!seg && (numElements != 0 || segByteOffset != 0)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// At this point, if `seg` is null then `numElements` and `segByteOffset`
// are both zero.
// Trap if the array is null.
if (!array) {
ReportTrapError(cx, JSMSG_WASM_DEREF_NULL);
return -1;
}
if (!seg) {
// The segment was dropped, therefore a zero-length init was requested, so
// we're done.
return 0;
}
// Get hold of the array.
Rooted<WasmArrayObject*> arrayObj(cx,
static_cast<WasmArrayObject*>(array));
MOZ_RELEASE_ASSERT(arrayObj->is<WasmArrayObject>());
if (!ArrayCopyFromData(cx, arrayObj, index, seg, segByteOffset,
numElements)) {
// Trap errors will be reported by ArrayCopyFromData.
return -1;
}
return 0;
}
// Copies a range of the element segment `segIndex` into an array
// (WasmArrayObject), starting at offset `segOffset` in the elem segment and
// index `index` in the array. `numElements` is the length of the copy.
//
// Traps if accesses are out of bounds for either the elem segment or the array,
// or if the array object is null.
/* static */ int32_t Instance::arrayInitElem(Instance* instance, void* array,
uint32_t index, uint32_t segOffset,
uint32_t numElements,
TypeDefInstanceData* typeDefData,
uint32_t segIndex) {
MOZ_ASSERT(SASigArrayInitElem.failureMode == FailureMode::FailOnNegI32);
--> --------------------
--> maximum size reached
--> --------------------