Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_types_h
#define wasm_types_h
#include "mozilla/Alignment.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/Atomics.h"
#include "mozilla/BinarySearch.h"
#include "mozilla/EnumeratedArray.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/Unused.h"
#include <type_traits>
#include "NamespaceImports.h"
#include "ds/LifoAlloc.h"
#include "jit/IonTypes.h"
#include "js/RefCounted.h"
#include "js/UniquePtr.h"
#include "js/Utility.h"
#include "js/Vector.h"
#include "vm/JSFunction.h"
#include "vm/MallocProvider.h"
#include "vm/NativeObject.h"
#include "wasm/WasmConstants.h"
#include "wasm/WasmUtility.h"
namespace js {
namespace jit {
class JitScript;
enum class RoundingMode;
template <class VecT>
class ABIArgIter;
} // namespace jit
// This is a widespread header, so lets keep out the core wasm impl types.
typedef GCVector<JSFunction*, 0, SystemAllocPolicy> JSFunctionVector;
class WasmMemoryObject;
using GCPtrWasmMemoryObject = GCPtr<WasmMemoryObject*>;
using RootedWasmMemoryObject = Rooted<WasmMemoryObject*>;
using HandleWasmMemoryObject = Handle<WasmMemoryObject*>;
using MutableHandleWasmMemoryObject = MutableHandle<WasmMemoryObject*>;
class WasmModuleObject;
using RootedWasmModuleObject = Rooted<WasmModuleObject*>;
using HandleWasmModuleObject = Handle<WasmModuleObject*>;
using MutableHandleWasmModuleObject = MutableHandle<WasmModuleObject*>;
class WasmInstanceObject;
using WasmInstanceObjectVector = GCVector<WasmInstanceObject*>;
using RootedWasmInstanceObject = Rooted<WasmInstanceObject*>;
using HandleWasmInstanceObject = Handle<WasmInstanceObject*>;
using MutableHandleWasmInstanceObject = MutableHandle<WasmInstanceObject*>;
class WasmTableObject;
typedef GCVector<WasmTableObject*, 0, SystemAllocPolicy> WasmTableObjectVector;
using RootedWasmTableObject = Rooted<WasmTableObject*>;
using HandleWasmTableObject = Handle<WasmTableObject*>;
using MutableHandleWasmTableObject = MutableHandle<WasmTableObject*>;
class WasmGlobalObject;
typedef GCVector<WasmGlobalObject*, 0, SystemAllocPolicy>
WasmGlobalObjectVector;
using RootedWasmGlobalObject = Rooted<WasmGlobalObject*>;
class StructTypeDescr;
typedef GCVector<HeapPtr<StructTypeDescr*>, 0, SystemAllocPolicy>
StructTypeDescrVector;
namespace wasm {
using mozilla::ArrayEqual;
using mozilla::Atomic;
using mozilla::DebugOnly;
using mozilla::EnumeratedArray;
using mozilla::MallocSizeOf;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::PodCopy;
using mozilla::PodZero;
using mozilla::Some;
using mozilla::Unused;
class Code;
class DebugState;
class GeneratedSourceMap;
class Memory;
class Module;
class Instance;
class Table;
// Uint32Vector has initial size 8 on the basis that the dominant use cases
// (line numbers and control stacks) tend to have a small but nonzero number
// of elements.
typedef Vector<uint32_t, 8, SystemAllocPolicy> Uint32Vector;
typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
using UniqueBytes = UniquePtr<Bytes>;
using UniqueConstBytes = UniquePtr<const Bytes>;
typedef Vector<char, 0, SystemAllocPolicy> UTF8Bytes;
typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
typedef Vector<UniqueChars, 0, SystemAllocPolicy> UniqueCharsVector;
// Bit set as the lowest bit of a frame pointer, used in two different mutually
// exclusive situations:
// - either it's a low bit tag in a FramePointer value read from the
// Frame::callerFP of an inner wasm frame. This indicates the previous call
// frame has been set up by a JIT caller that directly called into a wasm
// function's body. This is only stored in Frame::callerFP for a wasm frame
// called from JIT code, and thus it can not appear in a JitActivation's
// exitFP.
// - or it's the low big tag set when exiting wasm code in JitActivation's
// exitFP.
constexpr uintptr_t ExitOrJitEntryFPTag = 0x1;
// To call Vector::shrinkStorageToFit , a type must specialize mozilla::IsPod
// which is pretty verbose to do within js::wasm, so factor that process out
// into a macro.
#define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
} \
} \
namespace mozilla { \
template <> \
struct IsPod<js::wasm::Type> : std::true_type {}; \
} \
namespace js { \
namespace wasm { \
typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
// A wasm Module and everything it contains must support serialization and
// deserialization. Some data can be simply copied as raw bytes and,
// as a convention, is stored in an inline CacheablePod struct. Everything else
// should implement the below methods which are called recusively by the
// containing Module.
#define WASM_DECLARE_SERIALIZABLE(Type) \
size_t serializedSize() const; \
uint8_t* serialize(uint8_t* cursor) const; \
const uint8_t* deserialize(const uint8_t* cursor); \
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
template <class T>
struct SerializableRefPtr : RefPtr<T> {
using RefPtr<T>::operator=;
SerializableRefPtr() = default;
template <class U>
MOZ_IMPLICIT SerializableRefPtr(U&& u) : RefPtr<T>(std::forward<U>(u)) {}
WASM_DECLARE_SERIALIZABLE(SerializableRefPtr)
};
// This reusable base class factors out the logic for a resource that is shared
// by multiple instances/modules but should only be counted once when computing
// about:memory stats.
template <class T>
struct ShareableBase : AtomicRefCounted<T> {
using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf,
SeenSet* seen) const {
const T* self = static_cast<const T*>(this);
typename SeenSet::AddPtr p = seen->lookupForAdd(self);
if (p) {
return 0;
}
bool ok = seen->add(p, self);
(void)ok; // oh well
return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
}
};
// ShareableBytes is a reference-counted Vector of bytes.
struct ShareableBytes : ShareableBase<ShareableBytes> {
// Vector is 'final', so instead make Vector a member and add boilerplate.
Bytes bytes;
ShareableBytes() = default;
explicit ShareableBytes(Bytes&& bytes) : bytes(std::move(bytes)) {}
size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
return bytes.sizeOfExcludingThis(mallocSizeOf);
}
const uint8_t* begin() const { return bytes.begin(); }
const uint8_t* end() const { return bytes.end(); }
size_t length() const { return bytes.length(); }
bool append(const uint8_t* start, uint32_t len) {
return bytes.append(start, len);
}
};
using MutableBytes = RefPtr<ShareableBytes>;
using SharedBytes = RefPtr<const ShareableBytes>;
// The Opcode compactly and safely represents the primary opcode plus any
// extension, with convenient predicates and accessors.
class Opcode {
uint32_t bits_;
public:
MOZ_IMPLICIT Opcode(Op op) : bits_(uint32_t(op)) {
static_assert(size_t(Op::Limit) == 256, "fits");
MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
}
MOZ_IMPLICIT Opcode(MiscOp op)
: bits_((uint32_t(op) << 8) | uint32_t(Op::MiscPrefix)) {
static_assert(size_t(MiscOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(MiscOp::Limit));
}
MOZ_IMPLICIT Opcode(ThreadOp op)
: bits_((uint32_t(op) << 8) | uint32_t(Op::ThreadPrefix)) {
static_assert(size_t(ThreadOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(ThreadOp::Limit));
}
MOZ_IMPLICIT Opcode(MozOp op)
: bits_((uint32_t(op) << 8) | uint32_t(Op::MozPrefix)) {
static_assert(size_t(MozOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(MozOp::Limit));
}
MOZ_IMPLICIT Opcode(SimdOp op)
: bits_((uint32_t(op) << 8) | uint32_t(Op::SimdPrefix)) {
static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
}
bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
Op asOp() const {
MOZ_ASSERT(isOp());
return Op(bits_);
}
MiscOp asMisc() const {
MOZ_ASSERT(isMisc());
return MiscOp(bits_ >> 8);
}
ThreadOp asThread() const {
MOZ_ASSERT(isThread());
return ThreadOp(bits_ >> 8);
}
MozOp asMoz() const {
MOZ_ASSERT(isMoz());
return MozOp(bits_ >> 8);
}
SimdOp asSimd() const {
MOZ_ASSERT(isSimd());
return SimdOp(bits_ >> 8);
}
uint32_t bits() const { return bits_; }
bool operator==(const Opcode& that) const { return bits_ == that.bits_; }
bool operator!=(const Opcode& that) const { return bits_ != that.bits_; }
};
// A PackedTypeCode represents a TypeCode paired with a refTypeIndex (valid only
// for TypeCode::OptRef). PackedTypeCode is guaranteed to be POD. The TypeCode
// spans the full range of type codes including the specialized AnyRef, and
// FuncRef.
//
// PackedTypeCode is an enum class, as opposed to the more natural
// struct-with-bitfields, because bitfields would make it non-POD.
//
// DO NOT use PackedTypeCode as a cast. ALWAYS go via PackTypeCode().
enum class PackedTypeCode : uint32_t {};
static_assert(std::is_pod_v<PackedTypeCode>,
"must be POD to be simply serialized/deserialized");
const uint32_t NoTypeCode = 0xFF; // Only use these
const uint32_t NoRefTypeIndex = 0x3FFFFF; // with PackedTypeCode
static inline PackedTypeCode PackTypeCode(TypeCode tc, uint32_t refTypeIndex) {
MOZ_ASSERT(uint32_t(tc) <= 0xFF);
MOZ_ASSERT_IF(tc != TypeCode::OptRef, refTypeIndex == NoRefTypeIndex);
MOZ_ASSERT_IF(tc == TypeCode::OptRef, refTypeIndex <= MaxTypes);
// A PackedTypeCode should be representable in a single word, so in the
// smallest case, 32 bits. However sometimes 2 bits of the word may be taken
// by a pointer tag; for that reason, limit to 30 bits; and then there's the
// 8-bit typecode, so 22 bits left for the type index.
static_assert(MaxTypes < (1 << (30 - 8)), "enough bits");
return PackedTypeCode((refTypeIndex << 8) | uint32_t(tc));
}
static inline PackedTypeCode PackTypeCode(TypeCode tc) {
return PackTypeCode(tc, NoRefTypeIndex);
}
static inline PackedTypeCode InvalidPackedTypeCode() {
return PackedTypeCode(NoTypeCode);
}
static inline PackedTypeCode PackedTypeCodeFromBits(uint32_t bits) {
return PackTypeCode(TypeCode(bits & 255), bits >> 8);
}
static inline bool IsValid(PackedTypeCode ptc) {
return (uint32_t(ptc) & 255) != NoTypeCode;
}
static inline uint32_t PackedTypeCodeToBits(PackedTypeCode ptc) {
return uint32_t(ptc);
}
static inline TypeCode UnpackTypeCodeType(PackedTypeCode ptc) {
MOZ_ASSERT(IsValid(ptc));
return TypeCode(uint32_t(ptc) & 255);
}
static inline uint32_t UnpackTypeCodeIndex(PackedTypeCode ptc) {
MOZ_ASSERT(UnpackTypeCodeType(ptc) == TypeCode::OptRef);
return uint32_t(ptc) >> 8;
}
static inline uint32_t UnpackTypeCodeIndexUnchecked(PackedTypeCode ptc) {
return uint32_t(ptc) >> 8;
}
// Return the TypeCode, but return TypeCode::OptRef for any reference type.
//
// This function is very, very hot, hence what would normally be a switch on the
// value `c` to map the reference types to TypeCode::OptRef has been distilled
// into a simple comparison; this is fastest. Should type codes become too
// complicated for this to work then a lookup table also has better performance
// than a switch.
//
// An alternative is for the PackedTypeCode to represent something closer to
// what ValType needs, so that this decoding step is not necessary, but that
// moves complexity elsewhere, and the perf gain here would be only about 1% for
// baseline compilation throughput.
static inline TypeCode UnpackTypeCodeTypeAbstracted(PackedTypeCode ptc) {
TypeCode c = UnpackTypeCodeType(ptc);
return c < LowestPrimitiveTypeCode ? TypeCode::OptRef : c;
}
static inline bool IsReferenceType(PackedTypeCode ptc) {
return UnpackTypeCodeTypeAbstracted(ptc) == TypeCode::OptRef;
}
// The RefType carries more information about types t for which t.isReference()
// is true.
class RefType {
public:
enum Kind {
Any = uint8_t(TypeCode::AnyRef),
Func = uint8_t(TypeCode::FuncRef),
TypeIndex = uint8_t(TypeCode::OptRef)
};
private:
PackedTypeCode ptc_;
#ifdef DEBUG
bool isValid() const {
switch (UnpackTypeCodeType(ptc_)) {
case TypeCode::FuncRef:
case TypeCode::AnyRef:
MOZ_ASSERT(UnpackTypeCodeIndexUnchecked(ptc_) == NoRefTypeIndex);
return true;
case TypeCode::OptRef:
MOZ_ASSERT(UnpackTypeCodeIndexUnchecked(ptc_) != NoRefTypeIndex);
return true;
default:
return false;
}
}
#endif
explicit RefType(Kind kind) : ptc_(PackTypeCode(TypeCode(kind))) {
MOZ_ASSERT(isValid());
}
// We keep this private since all sorts of values coerce to uint32_t.
explicit RefType(uint32_t refTypeIndex)
: ptc_(PackTypeCode(TypeCode::OptRef, refTypeIndex)) {
MOZ_ASSERT(isValid());
}
public:
RefType() : ptc_(InvalidPackedTypeCode()) {}
explicit RefType(PackedTypeCode ptc) : ptc_(ptc) { MOZ_ASSERT(isValid()); }
static RefType fromTypeCode(TypeCode tc) {
MOZ_ASSERT(tc != TypeCode::OptRef);
return RefType(Kind(tc));
}
static RefType fromTypeIndex(uint32_t refTypeIndex) {
return RefType(refTypeIndex);
}
Kind kind() const { return Kind(UnpackTypeCodeType(ptc_)); }
uint32_t typeIndex() const { return UnpackTypeCodeIndex(ptc_); }
PackedTypeCode packed() const { return ptc_; }
static RefType any() { return RefType(Any); }
static RefType func() { return RefType(Func); }
bool operator==(const RefType& that) const { return ptc_ == that.ptc_; }
bool operator!=(const RefType& that) const { return ptc_ != that.ptc_; }
};
// The ValType represents the storage type of a WebAssembly location, whether
// parameter, local, or global.
class ValType {
PackedTypeCode tc_;
#ifdef DEBUG
bool isValidTypeCode() {
MOZ_ASSERT(isValid());
switch (UnpackTypeCodeType(tc_)) {
case TypeCode::I32:
case TypeCode::I64:
case TypeCode::F32:
case TypeCode::F64:
case TypeCode::V128:
case TypeCode::AnyRef:
case TypeCode::FuncRef:
case TypeCode::OptRef:
return true;
default:
return false;
}
}
#endif
public:
enum Kind {
I32 = uint8_t(TypeCode::I32),
I64 = uint8_t(TypeCode::I64),
F32 = uint8_t(TypeCode::F32),
F64 = uint8_t(TypeCode::F64),
V128 = uint8_t(TypeCode::V128),
Ref = uint8_t(TypeCode::OptRef),
};
private:
explicit ValType(TypeCode c) : tc_(PackTypeCode(c)) {
MOZ_ASSERT(c != TypeCode::OptRef);
MOZ_ASSERT(isValid());
}
TypeCode typeCode() const {
MOZ_ASSERT(isValid());
return UnpackTypeCodeType(tc_);
}
public:
ValType() : tc_(InvalidPackedTypeCode()) {}
MOZ_IMPLICIT ValType(Kind c) : tc_(PackTypeCode(TypeCode(c))) {
MOZ_ASSERT(c != Ref);
MOZ_ASSERT(isValidTypeCode());
}
MOZ_IMPLICIT ValType(RefType rt) : tc_(rt.packed()) {
MOZ_ASSERT(isValidTypeCode());
}
explicit ValType(PackedTypeCode ptc) : tc_(ptc) {
MOZ_ASSERT(isValidTypeCode());
}
explicit ValType(jit::MIRType mty) {
switch (mty) {
case jit::MIRType::Int32:
tc_ = PackTypeCode(TypeCode::I32);
break;
case jit::MIRType::Int64:
tc_ = PackTypeCode(TypeCode::I64);
break;
case jit::MIRType::Float32:
tc_ = PackTypeCode(TypeCode::F32);
break;
case jit::MIRType::Double:
tc_ = PackTypeCode(TypeCode::F64);
break;
case jit::MIRType::Simd128:
tc_ = PackTypeCode(TypeCode::V128);
break;
default:
MOZ_CRASH("ValType(MIRType): unexpected type");
}
}
static ValType fromNonRefTypeCode(TypeCode tc) {
#ifdef DEBUG
switch (tc) {
case TypeCode::I32:
case TypeCode::I64:
case TypeCode::F32:
case TypeCode::F64:
case TypeCode::V128:
break;
default:
MOZ_CRASH("Bad type code");
}
#endif
return ValType(tc);
}
static ValType fromBitsUnsafe(uint32_t bits) {
return ValType(PackedTypeCodeFromBits(bits));
}
bool isValid() const { return IsValid(tc_); }
PackedTypeCode packed() const {
MOZ_ASSERT(isValid());
return tc_;
}
uint32_t bitsUnsafe() const {
MOZ_ASSERT(isValid());
return PackedTypeCodeToBits(tc_);
}
bool isAnyRef() const { return UnpackTypeCodeType(tc_) == TypeCode::AnyRef; }
bool isFuncRef() const {
return UnpackTypeCodeType(tc_) == TypeCode::FuncRef;
}
bool isNullable() const {
MOZ_ASSERT(isReference());
return true;
}
bool isTypeIndex() const {
MOZ_ASSERT(isValid());
return UnpackTypeCodeType(tc_) == TypeCode::OptRef;
}
bool isReference() const {
MOZ_ASSERT(isValid());
return IsReferenceType(tc_);
}
Kind kind() const {
MOZ_ASSERT(isValid());
return Kind(UnpackTypeCodeTypeAbstracted(tc_));
}
RefType refType() const {
MOZ_ASSERT(isReference());
return RefType(tc_);
}
RefType::Kind refTypeKind() const {
MOZ_ASSERT(isReference());
return RefType(tc_).kind();
}
// Some types are encoded as JS::Value when they escape from Wasm (when passed
// as parameters to imports or returned from exports). For AnyRef the Value
// encoding is pretty much a requirement. For other types it's a choice that
// may (temporarily) simplify some code.
bool isEncodedAsJSValueOnEscape() const {
switch (typeCode()) {
case TypeCode::AnyRef:
case TypeCode::FuncRef:
return true;
default:
return false;
}
}
bool operator==(const ValType& that) const {
MOZ_ASSERT(isValid() && that.isValid());
return tc_ == that.tc_;
}
bool operator!=(const ValType& that) const {
MOZ_ASSERT(isValid() && that.isValid());
return tc_ != that.tc_;
}
bool operator==(Kind that) const {
MOZ_ASSERT(isValid());
MOZ_ASSERT(that != Kind::Ref);
return Kind(typeCode()) == that;
}
bool operator!=(Kind that) const { return !(*this == that); }
};
struct V128 {
uint8_t bytes[16]; // Little-endian
V128() { memset(bytes, 0, sizeof(bytes)); }
template <typename T>
T extractLane(unsigned lane) const {
T result;
MOZ_ASSERT(lane < 16 / sizeof(T));
memcpy(&result, bytes + sizeof(T) * lane, sizeof(T));
return result;
}
template <typename T>
void insertLane(unsigned lane, T value) {
MOZ_ASSERT(lane < 16 / sizeof(T));
memcpy(bytes + sizeof(T) * lane, &value, sizeof(T));
}
};
static_assert(sizeof(V128) == 16, "Invariant");
// The dominant use of this data type is for locals and args, and profiling
// with ZenGarden and Tanks suggests an initial size of 16 minimises heap
// allocation, both in terms of blocks and bytes.
typedef Vector<ValType, 16, SystemAllocPolicy> ValTypeVector;
// ValType utilities
static inline unsigned SizeOf(ValType vt) {
switch (vt.kind()) {
case ValType::I32:
case ValType::F32:
return 4;
case ValType::I64:
case ValType::F64:
return 8;
case ValType::V128:
return 16;
case ValType::Ref:
return sizeof(intptr_t);
}
MOZ_CRASH("Invalid ValType");
}
// Note, ToMIRType is only correct within Wasm, where an AnyRef is represented
// as a pointer. At the JS/wasm boundary, an AnyRef can be represented as a
// JS::Value, and the type translation may have to be handled specially and on a
// case-by-case basis.
static inline jit::MIRType ToMIRType(ValType vt) {
switch (vt.kind()) {
case ValType::I32:
return jit::MIRType::Int32;
case ValType::I64:
return jit::MIRType::Int64;
case ValType::F32:
return jit::MIRType::Float32;
case ValType::F64:
return jit::MIRType::Double;
case ValType::V128:
return jit::MIRType::Simd128;
case ValType::Ref:
return jit::MIRType::RefOrNull;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
}
static inline bool IsNumberType(ValType vt) { return !vt.isReference(); }
static inline jit::MIRType ToMIRType(const Maybe<ValType>& t) {
return t ? ToMIRType(ValType(t.ref())) : jit::MIRType::None;
}
extern UniqueChars ToString(ValType type);
static inline const char* ToCString(ValType type) {
switch (type.kind()) {
case ValType::I32:
return "i32";
case ValType::I64:
return "i64";
case ValType::V128:
return "v128";
case ValType::F32:
return "f32";
case ValType::F64:
return "f64";
case ValType::Ref:
switch (type.refTypeKind()) {
case RefType::Any:
return "externref";
case RefType::Func:
return "funcref";
case RefType::TypeIndex:
return "optref";
}
}
MOZ_CRASH("bad value type");
}
static inline const char* ToCString(const Maybe<ValType>& type) {
return type ? ToCString(type.ref()) : "void";
}
// An AnyRef is a boxed value that can represent any wasm reference type and any
// host type that the host system allows to flow into and out of wasm
// transparently. It is a pointer-sized datum that has the same representation
// as all its subtypes (funcref, eqref, (ref T), et al) due to the non-coercive
// subtyping of the wasm type system. Its current representation is a plain
// JSObject*, and the private JSObject subtype WasmValueBox is used to box
// non-object non-null JS values.
//
// The C++/wasm boundary always uses a 'void*' type to express AnyRef values, to
// emphasize the pointer-ness of the value. The C++ code must transform the
// void* into an AnyRef by calling AnyRef::fromCompiledCode(), and transform an
// AnyRef into a void* by calling AnyRef::toCompiledCode(). Once in C++, we use
// AnyRef everywhere. A JS Value is transformed into an AnyRef by calling
// AnyRef::box(), and the AnyRef is transformed into a JS Value by calling
// AnyRef::unbox().
//
// NOTE that AnyRef values may point to GC'd storage and as such need to be
// rooted if they are kept live in boxed form across code that may cause GC!
// Use RootedAnyRef / HandleAnyRef / MutableHandleAnyRef where necessary.
//
// The lowest bits of the pointer value are used for tagging, to allow for some
// representation optimizations and to distinguish various types.
// For version 0, we simply equate AnyRef and JSObject* (this means that there
// are technically no tags at all yet). We use a simple boxing scheme that
// wraps a JS value that is not already JSObject in a distinguishable JSObject
// that holds the value, see WasmTypes.cpp for details. Knowledge of this
// mapping is embedded in CodeGenerator.cpp (in WasmBoxValue and
// WasmAnyRefFromJSObject) and in WasmStubs.cpp (in functions Box* and Unbox*).
class AnyRef {
JSObject* value_;
explicit AnyRef() : value_((JSObject*)-1) {}
explicit AnyRef(JSObject* p) : value_(p) {
MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
}
public:
// An invalid AnyRef cannot arise naturally from wasm and so can be used as
// a sentinel value to indicate failure from an AnyRef-returning function.
static AnyRef invalid() { return AnyRef(); }
// Given a void* that comes from compiled wasm code, turn it into AnyRef.
static AnyRef fromCompiledCode(void* p) { return AnyRef((JSObject*)p); }
// Given a JSObject* that comes from JS, turn it into AnyRef.
static AnyRef fromJSObject(JSObject* p) { return AnyRef(p); }
// Generate an AnyRef null pointer.
static AnyRef null() { return AnyRef(nullptr); }
bool isNull() { return value_ == nullptr; }
void* forCompiledCode() const { return value_; }
JSObject* asJSObject() { return value_; }
JSObject** asJSObjectAddress() { return &value_; }
void trace(JSTracer* trc);
// Tags (to be developed further)
static constexpr uintptr_t AnyRefTagMask = 1;
static constexpr uintptr_t AnyRefObjTag = 0;
};
using RootedAnyRef = Rooted<AnyRef>;
using HandleAnyRef = Handle<AnyRef>;
using MutableHandleAnyRef = MutableHandle<AnyRef>;
// TODO/AnyRef-boxing: With boxed immediates and strings, these will be defined
// as MOZ_CRASH or similar so that we can find all locations that need to be
// fixed.
#define ASSERT_ANYREF_IS_JSOBJECT (void)(0)
#define STATIC_ASSERT_ANYREF_IS_JSOBJECT static_assert(1, "AnyRef is JSObject")
// Given any JS value, box it as an AnyRef and store it in *result. Returns
// false on OOM.
bool BoxAnyRef(JSContext* cx, HandleValue val, MutableHandleAnyRef result);
// Given a JS value that requires an object box, box it as an AnyRef and return
// it, returning nullptr on OOM.
//
// Currently the values requiring a box are those other than JSObject* or
// nullptr, but in the future more values will be represented without an
// allocation.
JSObject* BoxBoxableValue(JSContext* cx, HandleValue val);
// Given any AnyRef, unbox it as a JS Value. If it is a reference to a wasm
// object it will be reflected as a JSObject* representing some TypedObject
// instance.
Value UnboxAnyRef(AnyRef val);
class WasmValueBox : public NativeObject {
static const unsigned VALUE_SLOT = 0;
public:
static const unsigned RESERVED_SLOTS = 1;
static const JSClass class_;
static WasmValueBox* create(JSContext* cx, HandleValue val);
Value value() const { return getFixedSlot(VALUE_SLOT); }
static size_t offsetOfValue() {
return NativeObject::getFixedSlotOffset(VALUE_SLOT);
}
};
// A FuncRef is a JSFunction* and is hence also an AnyRef, and the remarks above
// about AnyRef apply also to FuncRef. When 'funcref' is used as a value type
// in wasm code, the value that is held is "the canonical function value", which
// is a function for which IsWasmExportedFunction() is true, and which has the
// correct identity wrt reference equality of functions. Notably, if a function
// is imported then its ref.func value compares === in JS to the function that
// was passed as an import when the instance was created.
//
// These rules ensure that casts from funcref to anyref are non-converting
// (generate no code), and that no wrapping or unwrapping needs to happen when a
// funcref or anyref flows across the JS/wasm boundary, and that functions have
// the necessary identity when observed from JS, and in the future, from wasm.
//
// Functions stored in tables, whether wasm tables or internal tables, can be
// stored in a form that optimizes for eg call speed, however.
//
// Reading a funcref from a funcref table, writing a funcref to a funcref table,
// and generating the value for a ref.func instruction are therefore nontrivial
// operations that require mapping between the canonical JSFunction and the
// optimized table representation. Once we get an instruction to call a
// ref.func directly it too will require such a mapping.
// In many cases, a FuncRef is exactly the same as AnyRef and we can use AnyRef
// functionality on funcref values. The FuncRef class exists mostly to add more
// checks and to make it clear, when we need to, that we're manipulating funcref
// values. FuncRef does not currently subclass AnyRef because there's been no
// need to, but it probably could.
class FuncRef {
JSFunction* value_;
explicit FuncRef() : value_((JSFunction*)-1) {}
explicit FuncRef(JSFunction* p) : value_(p) {
MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
}
public:
// Given a void* that comes from compiled wasm code, turn it into FuncRef.
static FuncRef fromCompiledCode(void* p) { return FuncRef((JSFunction*)p); }
// Given a JSFunction* that comes from JS, turn it into FuncRef.
static FuncRef fromJSFunction(JSFunction* p) { return FuncRef(p); }
// Given an AnyRef that represents a possibly-null funcref, turn it into a
// FuncRef.
static FuncRef fromAnyRefUnchecked(AnyRef p) {
#ifdef DEBUG
Value v = UnboxAnyRef(p);
if (v.isNull()) {
return FuncRef(nullptr);
}
if (v.toObject().is<JSFunction>()) {
return FuncRef(&v.toObject().as<JSFunction>());
}
MOZ_CRASH("Bad value");
#else
return FuncRef(&p.asJSObject()->as<JSFunction>());
#endif
}
AnyRef asAnyRef() { return AnyRef::fromJSObject((JSObject*)value_); }
void* forCompiledCode() const { return value_; }
JSFunction* asJSFunction() { return value_; }
bool isNull() { return value_ == nullptr; }
};
using RootedFuncRef = Rooted<FuncRef>;
using HandleFuncRef = Handle<FuncRef>;
using MutableHandleFuncRef = MutableHandle<FuncRef>;
// Given any FuncRef, unbox it as a JS Value -- always a JSFunction*.
Value UnboxFuncRef(FuncRef val);
// Code can be compiled either with the Baseline compiler or the Ion compiler,
// and tier-variant data are tagged with the Tier value.
//
// A tier value is used to request tier-variant aspects of code, metadata, or
// linkdata. The tiers are normally explicit (Baseline and Ion); implicit tiers
// can be obtained through accessors on Code objects (eg, stableTier).
enum class Tier {
Baseline,
Debug = Baseline,
Optimized,
Serialized = Optimized
};
// Which backend to use in the case of the optimized tier.
enum class OptimizedBackend {
Ion,
Cranelift,
};
// The CompileMode controls how compilation of a module is performed (notably,
// how many times we compile it).
enum class CompileMode { Once, Tier1, Tier2 };
// Typed enum for whether debugging is enabled.
enum class DebugEnabled { False, True };
// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
// shared memory (SharedArrayBuffer).
enum class MemoryUsage { None = false, Unshared = 1, Shared = 2 };
// Iterator over tiers present in a tiered data structure.
class Tiers {
Tier t_[2];
uint32_t n_;
public:
explicit Tiers() { n_ = 0; }
explicit Tiers(Tier t) {
t_[0] = t;
n_ = 1;
}
explicit Tiers(Tier t, Tier u) {
MOZ_ASSERT(t != u);
t_[0] = t;
t_[1] = u;
n_ = 2;
}
Tier* begin() { return t_; }
Tier* end() { return t_ + n_; }
};
// A Module can either be asm.js or wasm.
enum ModuleKind { Wasm, AsmJS };
enum class Shareable { False, True };
// The LitVal class represents a single WebAssembly value of a given value
// type, mostly for the purpose of numeric literals and initializers. A LitVal
// does not directly map to a JS value since there is not (currently) a precise
// representation of i64 values. A LitVal may contain non-canonical NaNs since,
// within WebAssembly, floats are not canonicalized. Canonicalization must
// happen at the JS boundary.
class LitVal {
protected:
ValType type_;
union U {
U() : i32_(0) {}
uint32_t i32_;
uint64_t i64_;
float f32_;
double f64_;
AnyRef ref_;
V128 v128_;
} u;
public:
LitVal() : type_(), u{} {}
explicit LitVal(ValType type) : type_(type) {
switch (type.kind()) {
case ValType::Kind::I32: {
u.i32_ = 0;
break;
}
case ValType::Kind::I64: {
u.i64_ = 0;
break;
}
case ValType::Kind::F32: {
u.f32_ = 0;
break;
}
case ValType::Kind::F64: {
u.f64_ = 0;
break;
}
case ValType::Kind::V128: {
new (&u.v128_) V128();
break;
}
case ValType::Kind::Ref: {
u.ref_ = AnyRef::null();
break;
}
}
}
explicit LitVal(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
explicit LitVal(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
explicit LitVal(float f32) : type_(ValType::F32) { u.f32_ = f32; }
explicit LitVal(double f64) : type_(ValType::F64) { u.f64_ = f64; }
explicit LitVal(V128 v128) : type_(ValType::V128) { u.v128_ = v128; }
explicit LitVal(ValType type, AnyRef any) : type_(type) {
MOZ_ASSERT(type.isReference());
MOZ_ASSERT(any.isNull(),
"use Val for non-nullptr ref types to get tracing");
u.ref_ = any;
}
ValType type() const { return type_; }
static constexpr size_t sizeofLargestValue() { return sizeof(u); }
uint32_t i32() const {
MOZ_ASSERT(type_ == ValType::I32);
return u.i32_;
}
uint64_t i64() const {
MOZ_ASSERT(type_ == ValType::I64);
return u.i64_;
}
const float& f32() const {
MOZ_ASSERT(type_ == ValType::F32);
return u.f32_;
}
const double& f64() const {
MOZ_ASSERT(type_ == ValType::F64);
return u.f64_;
}
AnyRef ref() const {
MOZ_ASSERT(type_.isReference());
return u.ref_;
}
const V128& v128() const {
MOZ_ASSERT(type_ == ValType::V128);
return u.v128_;
}
};
// A Val is a LitVal that can contain (non-null) pointers to GC things. All Vals
// must be stored in Rooteds so that their trace() methods are called during
// stack marking. Vals do not implement barriers and thus may not be stored on
// the heap.
class MOZ_NON_PARAM Val : public LitVal {
public:
Val() : LitVal() {}
explicit Val(ValType type) : LitVal(type) {}
explicit Val(const LitVal& val);
explicit Val(uint32_t i32) : LitVal(i32) {}
explicit Val(uint64_t i64) : LitVal(i64) {}
explicit Val(float f32) : LitVal(f32) {}
explicit Val(double f64) : LitVal(f64) {}
explicit Val(V128 v128) : LitVal(v128) {}
explicit Val(ValType type, AnyRef val) : LitVal(type, AnyRef::null()) {
MOZ_ASSERT(type.isReference());
u.ref_ = val;
}
explicit Val(ValType type, FuncRef val) : LitVal(type, AnyRef::null()) {
MOZ_ASSERT(type.isFuncRef());
u.ref_ = val.asAnyRef();
}
void trace(JSTracer* trc);
};
using RootedVal = Rooted<Val>;
using HandleVal = Handle<Val>;
using MutableHandleVal = MutableHandle<Val>;
typedef GCVector<Val, 0, SystemAllocPolicy> ValVector;
using RootedValVector = Rooted<ValVector>;
using HandleValVector = Handle<ValVector>;
using MutableHandleValVector = MutableHandle<ValVector>;
// The FuncType class represents a WebAssembly function signature which takes a
// list of value types and returns an expression type. The engine uses two
// in-memory representations of the argument Vector's memory (when elements do
// not fit inline): normal malloc allocation (via SystemAllocPolicy) and
// allocation in a LifoAlloc (via LifoAllocPolicy). The former FuncType objects
// can have any lifetime since they own the memory. The latter FuncType objects
// must not outlive the associated LifoAlloc mark/release interval (which is
// currently the duration of module validation+compilation). Thus, long-lived
// objects like WasmModule must use malloced allocation.
class FuncType {
ValTypeVector args_;
ValTypeVector results_;
public:
FuncType() : args_(), results_() {}
FuncType(ValTypeVector&& args, ValTypeVector&& results)
: args_(std::move(args)), results_(std::move(results)) {}
MOZ_MUST_USE bool clone(const FuncType& rhs) {
MOZ_ASSERT(args_.empty());
MOZ_ASSERT(results_.empty());
return args_.appendAll(rhs.args_) && results_.appendAll(rhs.results_);
}
ValType arg(unsigned i) const { return args_[i]; }
const ValTypeVector& args() const { return args_; }
ValType result(unsigned i) const { return results_[i]; }
const ValTypeVector& results() const { return results_; }
HashNumber hash() const {
HashNumber hn = 0;
for (const ValType& vt : args_) {
hn = mozilla::AddToHash(hn, HashNumber(vt.packed()));
}
for (const ValType& vt : results_) {
hn = mozilla::AddToHash(hn, HashNumber(vt.packed()));
}
return hn;
}
bool operator==(const FuncType& rhs) const {
return EqualContainers(args(), rhs.args()) &&
EqualContainers(results(), rhs.results());
}
bool operator!=(const FuncType& rhs) const { return !(*this == rhs); }
// Entry from JS to wasm via the JIT is currently unimplemented for
// functions that return multiple values.
bool temporarilyUnsupportedResultCountForJitEntry() const {
return results().length() > MaxResultsForJitEntry;
}
// Calls out from wasm to JS that return multiple values is currently
// unsupported.
bool temporarilyUnsupportedResultCountForJitExit() const {
return results().length() > MaxResultsForJitExit;
}
#ifdef ENABLE_WASM_SIMD
bool hasV128ArgOrRet() const {
for (ValType arg : args()) {
if (arg == ValType::V128) {
return true;
}
}
for (ValType result : results()) {
if (result == ValType::V128) {
return true;
}
}
return false;
}
#endif
// For JS->wasm jit entries, AnyRef parameters and returns are allowed, as are
// all reference types apart from TypeIndex. V128 types are excluded per spec
// but are guarded against separately.
bool temporarilyUnsupportedReftypeForEntry() const {
for (ValType arg : args()) {
if (arg.isReference() && !arg.isAnyRef()) {
return true;
}
}
for (ValType result : results()) {
if (result.isTypeIndex()) {
return true;
}
}
return false;
}
// For inlined JS->wasm jit entries, AnyRef parameters and returns are
// allowed, as are all reference types apart from TypeIndex. V128 types are
// excluded per spec but are guarded against separately.
bool temporarilyUnsupportedReftypeForInlineEntry() const {
for (ValType arg : args()) {
if (arg.isReference() && !arg.isAnyRef()) {
return true;
}
}
for (ValType result : results()) {
if (result.isTypeIndex()) {
return true;
}
}
return false;
}
// For wasm->JS jit exits, AnyRef parameters and returns are allowed, as are
// reference type parameters of all types except TypeIndex. V128 types are
// excluded per spec but are guarded against separately.
bool temporarilyUnsupportedReftypeForExit() const {
for (ValType arg : args()) {
if (arg.isTypeIndex()) {
return true;
}
}
for (ValType result : results()) {
if (result.isReference() && !result.isAnyRef()) {
return true;
}
}
return false;
}
bool jitExitRequiresArgCheck() const {
for (ValType arg : args()) {
if (arg.isEncodedAsJSValueOnEscape()) {
return true;
}
}
return false;
}
#ifdef WASM_PRIVATE_REFTYPES
bool exposesTypeIndex() const {
for (const ValType& arg : args()) {
if (arg.isTypeIndex()) {
return true;
}
}
for (const ValType& result : results()) {
if (result.isTypeIndex()) {
return true;
}
}
return false;
}
#endif
WASM_DECLARE_SERIALIZABLE(FuncType)
};
struct FuncTypeHashPolicy {
using Lookup = const FuncType&;
static HashNumber hash(Lookup ft) { return ft.hash(); }
static bool match(const FuncType* lhs, Lookup rhs) { return *lhs == rhs; }
};
// ArgTypeVector type.
//
// Functions usually receive one ABI argument per WebAssembly argument. However
// if a function has multiple results and some of those results go to the stack,
// then it additionally receives a synthetic ABI argument holding a pointer to
// the stack result area.
//
// Given the presence of synthetic arguments, sometimes we need a name for
// non-synthetic arguments. We call those "natural" arguments.
enum class StackResults { HasStackResults, NoStackResults };
class ArgTypeVector {
const ValTypeVector& args_;
bool hasStackResults_;
// To allow ABIArgIter<ArgTypeVector>, we define a private length()
// method. To prevent accidental errors, other users need to be
// explicit and call lengthWithStackResults() or
// lengthWithoutStackResults().
size_t length() const { return args_.length() + size_t(hasStackResults_); }
friend jit::ABIArgIter<ArgTypeVector>;
friend jit::ABIArgIter<const ArgTypeVector>;
public:
ArgTypeVector(const ValTypeVector& args, StackResults stackResults)
: args_(args),
hasStackResults_(stackResults == StackResults::HasStackResults) {}
explicit ArgTypeVector(const FuncType& funcType);
bool hasSyntheticStackResultPointerArg() const { return hasStackResults_; }
StackResults stackResults() const {
return hasSyntheticStackResultPointerArg() ? StackResults::HasStackResults
: StackResults::NoStackResults;
}
size_t lengthWithoutStackResults() const { return args_.length(); }
bool isSyntheticStackResultPointerArg(size_t idx) const {
// The pointer to stack results area, if present, is a synthetic argument
// tacked on at the end.
MOZ_ASSERT(idx < lengthWithStackResults());
return idx == args_.length();
}
bool isNaturalArg(size_t idx) const {
return !isSyntheticStackResultPointerArg(idx);
}
size_t naturalIndex(size_t idx) const {
MOZ_ASSERT(isNaturalArg(idx));
// Because the synthetic argument, if present, is tacked on the end, an
// argument index that isn't synthetic is natural.
return idx;
}
size_t lengthWithStackResults() const { return length(); }
jit::MIRType operator[](size_t i) const {
MOZ_ASSERT(i < lengthWithStackResults());
if (isSyntheticStackResultPointerArg(i)) {
return jit::MIRType::StackResults;
}
return ToMIRType(args_[naturalIndex(i)]);
}
};
template <typename PointerType>
class TaggedValue {
public:
enum Kind {
ImmediateKind1 = 0,
ImmediateKind2 = 1,
PointerKind1 = 2,
PointerKind2 = 3
};
private:
uintptr_t bits_;
static constexpr uintptr_t PayloadShift = 2;
static constexpr uintptr_t KindMask = 0x3;
static constexpr uintptr_t PointerKindBit = 0x2;
constexpr static bool IsPointerKind(Kind kind) {
return uintptr_t(kind) & PointerKindBit;
}
constexpr static bool IsImmediateKind(Kind kind) {
return !IsPointerKind(kind);
}
static_assert(IsImmediateKind(ImmediateKind1), "immediate kind 1");
static_assert(IsImmediateKind(ImmediateKind2), "immediate kind 2");
static_assert(IsPointerKind(PointerKind1), "pointer kind 1");
static_assert(IsPointerKind(PointerKind2), "pointer kind 2");
static uintptr_t PackImmediate(Kind kind, uint32_t imm) {
MOZ_ASSERT(IsImmediateKind(kind));
MOZ_ASSERT((uintptr_t(kind) & KindMask) == kind);
MOZ_ASSERT((imm & (uint32_t(KindMask) << (32 - PayloadShift))) == 0);
return uintptr_t(kind) | (uintptr_t(imm) << PayloadShift);
}
static uintptr_t PackPointer(Kind kind, PointerType* ptr) {
uintptr_t ptrBits = reinterpret_cast<uintptr_t>(ptr);
MOZ_ASSERT(IsPointerKind(kind));
MOZ_ASSERT((uintptr_t(kind) & KindMask) == kind);
MOZ_ASSERT((ptrBits & KindMask) == 0);
return uintptr_t(kind) | ptrBits;
}
public:
TaggedValue(Kind kind, uint32_t imm) : bits_(PackImmediate(kind, imm)) {}
TaggedValue(Kind kind, PointerType* ptr) : bits_(PackPointer(kind, ptr)) {}
uintptr_t bits() const { return bits_; }
Kind kind() const { return Kind(bits() & KindMask); }
uint32_t immediate() const {
MOZ_ASSERT(IsImmediateKind(kind()));
return mozilla::AssertedCast<uint32_t>(bits() >> PayloadShift);
}
PointerType* pointer() const {
MOZ_ASSERT(IsPointerKind(kind()));
return reinterpret_cast<PointerType*>(bits() & ~KindMask);
}
};
// ResultType represents the WebAssembly spec's `resulttype`. Semantically, a
// result type is just a vec(valtype). For effiency, though, the ResultType
// value is packed into a word, with separate encodings for these 3 cases:
// []
// [valtype]
// pointer to ValTypeVector
//
// Additionally there is an encoding indicating uninitialized ResultType
// values.
//
// Generally in the latter case the ValTypeVector is the args() or results() of
// a FuncType in the compilation unit, so as long as the lifetime of the
// ResultType value is less than the OpIter, we can just borrow the pointer
// without ownership or copying.
class ResultType {
using Tagged = TaggedValue<const ValTypeVector>;
Tagged tagged_;
enum Kind {
EmptyKind = Tagged::ImmediateKind1,
SingleKind = Tagged::ImmediateKind2,
#ifdef ENABLE_WASM_MULTI_VALUE
VectorKind = Tagged::PointerKind1,
#endif
InvalidKind = Tagged::PointerKind2,
};
ResultType(Kind kind, uint32_t imm) : tagged_(Tagged::Kind(kind), imm) {}
#ifdef ENABLE_WASM_MULTI_VALUE
explicit ResultType(const ValTypeVector* ptr)
: tagged_(Tagged::Kind(VectorKind), ptr) {}
#endif
Kind kind() const { return Kind(tagged_.kind()); }
ValType singleValType() const {
MOZ_ASSERT(kind() == SingleKind);
return ValType(PackedTypeCodeFromBits(tagged_.immediate()));
}
#ifdef ENABLE_WASM_MULTI_VALUE
const ValTypeVector& values() const {
MOZ_ASSERT(kind() == VectorKind);
return *tagged_.pointer();
}
#endif
public:
ResultType() : tagged_(Tagged::Kind(InvalidKind), nullptr) {}
static ResultType Empty() { return ResultType(EmptyKind, uint32_t(0)); }
static ResultType Single(ValType vt) {
return ResultType(SingleKind, vt.bitsUnsafe());
}
static ResultType Vector(const ValTypeVector& vals) {
switch (vals.length()) {
case 0:
return Empty();
case 1:
return Single(vals[0]);
default:
#ifdef ENABLE_WASM_MULTI_VALUE
return ResultType(&vals);
#else
MOZ_CRASH("multi-value returns not supported");
#endif
}
}
bool empty() const { return kind() == EmptyKind; }
size_t length() const {
switch (kind()) {
case EmptyKind:
return 0;
case SingleKind:
return 1;
#ifdef ENABLE_WASM_MULTI_VALUE
case VectorKind:
return values().length();
#endif
default:
MOZ_CRASH("bad resulttype");
}
}
ValType operator[](size_t i) const {
switch (kind()) {
case SingleKind:
MOZ_ASSERT(i == 0);
return singleValType();
#ifdef ENABLE_WASM_MULTI_VALUE
case VectorKind:
return values()[i];
#endif
default:
MOZ_CRASH("bad resulttype");
}
}
bool operator==(ResultType rhs) const {
switch (kind()) {
case EmptyKind:
case SingleKind:
case InvalidKind:
return tagged_.bits() == rhs.tagged_.bits();
#ifdef ENABLE_WASM_MULTI_VALUE
case VectorKind: {
if (rhs.kind() != VectorKind) {
return false;
}
return EqualContainers(values(), rhs.values());
}
#endif
default:
MOZ_CRASH("bad resulttype");
}
}
bool operator!=(ResultType rhs) const { return !(*this == rhs); }
};
// BlockType represents the WebAssembly spec's `blocktype`. Semantically, a
// block type is just a (vec(valtype) -> vec(valtype)) with four special
// encodings which are represented explicitly in BlockType:
// [] -> []
// [] -> [valtype]
// [params] -> [results] via pointer to FuncType
// [] -> [results] via pointer to FuncType (ignoring [params])
class BlockType {
using Tagged = TaggedValue<const FuncType>;
Tagged tagged_;
enum Kind {
VoidToVoidKind = Tagged::ImmediateKind1,
VoidToSingleKind = Tagged::ImmediateKind2,
#ifdef ENABLE_WASM_MULTI_VALUE
FuncKind = Tagged::PointerKind1,
FuncResultsKind = Tagged::PointerKind2
#endif
};
BlockType(Kind kind, uint32_t imm) : tagged_(Tagged::Kind(kind), imm) {}
#ifdef ENABLE_WASM_MULTI_VALUE
BlockType(Kind kind, const FuncType& type)
: tagged_(Tagged::Kind(kind), &type) {}
#endif
Kind kind() const { return Kind(tagged_.kind()); }
ValType singleValType() const {
MOZ_ASSERT(kind() == VoidToSingleKind);
return ValType(PackedTypeCodeFromBits(tagged_.immediate()));
}
#ifdef ENABLE_WASM_MULTI_VALUE
const FuncType& funcType() const { return *tagged_.pointer(); }
#endif
public:
BlockType()
: tagged_(Tagged::Kind(VoidToVoidKind),
uint32_t(InvalidPackedTypeCode())) {}
static BlockType VoidToVoid() {
return BlockType(VoidToVoidKind, uint32_t(0));
}
static BlockType VoidToSingle(ValType vt) {
return BlockType(VoidToSingleKind, vt.bitsUnsafe());
}
static BlockType Func(const FuncType& type) {
#ifdef ENABLE_WASM_MULTI_VALUE
if (type.args().length() == 0) {
return FuncResults(type);
}
return BlockType(FuncKind, type);
#else
MOZ_ASSERT(type.args().length() == 0);
return FuncResults(type);
#endif
}
static BlockType FuncResults(const FuncType& type) {
switch (type.results().length()) {
case 0:
return VoidToVoid();
case 1:
return VoidToSingle(type.results()[0]);
default:
#ifdef ENABLE_WASM_MULTI_VALUE
return BlockType(FuncResultsKind, type);
#else
MOZ_CRASH("multi-value returns not supported");
#endif
}
}
ResultType params() const {
switch (kind()) {
case VoidToVoidKind:
case VoidToSingleKind:
#ifdef ENABLE_WASM_MULTI_VALUE
case FuncResultsKind:
#endif
return ResultType::Empty();
#ifdef ENABLE_WASM_MULTI_VALUE
case FuncKind:
return ResultType::Vector(funcType().args());
#endif
default:
MOZ_CRASH("unexpected kind");
}
}
ResultType results() const {
switch (kind()) {
case VoidToVoidKind:
return ResultType::Empty();
case VoidToSingleKind:
return ResultType::Single(singleValType());
#ifdef ENABLE_WASM_MULTI_VALUE
case FuncKind:
case FuncResultsKind:
return ResultType::Vector(funcType().results());
#endif
default:
MOZ_CRASH("unexpected kind");
}
}
bool operator==(BlockType rhs) const {
if (kind() != rhs.kind()) {
return false;
}
switch (kind()) {
case VoidToVoidKind:
case VoidToSingleKind:
return tagged_.bits() == rhs.tagged_.bits();
#ifdef ENABLE_WASM_MULTI_VALUE
case FuncKind:
return funcType() == rhs.funcType();
case FuncResultsKind:
return EqualContainers(funcType().results(), rhs.funcType().results());
#endif
default:
MOZ_CRASH("unexpected kind");
}
}
bool operator!=(BlockType rhs) const { return !(*this == rhs); }
};
// Structure type.
//
// The Module owns a dense array of StructType values that represent the
// structure types that the module knows about. It is created from the sparse
// array of types in the ModuleEnvironment when the Module is created.
struct StructField {
ValType type;
uint32_t offset;
bool isMutable;
};
typedef Vector<StructField, 0, SystemAllocPolicy> StructFieldVector;
class StructType {
public:
StructFieldVector fields_; // Field type, offset, and mutability
uint32_t moduleIndex_; // Index in a dense array of structs in the module
bool isInline_; // True if this is an InlineTypedObject and we
// interpret the offsets from the object pointer;
// if false this is an OutlineTypedObject and we
// interpret everything relative to the pointer to
// the attached storage.
public:
StructType() : fields_(), moduleIndex_(0), isInline_(true) {}
StructType(StructFieldVector&& fields, uint32_t index, bool isInline)
: fields_(std::move(fields)), moduleIndex_(index), isInline_(isInline) {}
bool copyFrom(const StructType& src) {
if (!fields_.appendAll(src.fields_)) {
return false;
}
moduleIndex_ = src.moduleIndex_;
isInline_ = src.isInline_;
return true;
}
bool hasPrefix(const StructType& other) const;
WASM_DECLARE_SERIALIZABLE(StructType)
};
typedef Vector<StructType, 0, SystemAllocPolicy> StructTypeVector;
// An InitExpr describes a deferred initializer expression, used to initialize
// a global or a table element offset. Such expressions are created during
// decoding and actually executed on module instantiation.
class InitExpr {
public:
enum class Kind { Constant, GetGlobal, RefFunc };
private:
// Note: all this private data is currently (de)serialized via memcpy().
Kind kind_;
union U {
LitVal val_;
struct {
uint32_t index_;
ValType type_;
} global;
uint32_t refFuncIndex_;
U() : global{} {}
} u;
public:
InitExpr() = default;
static InitExpr fromConstant(LitVal val) {
InitExpr expr;
expr.kind_ = Kind::Constant;
expr.u.val_ = val;
return expr;
}
static InitExpr fromGetGlobal(uint32_t globalIndex, ValType type) {
InitExpr expr;
expr.kind_ = Kind::GetGlobal;
expr.u.global.index_ = globalIndex;
expr.u.global.type_ = type;
return expr;
}
static InitExpr fromRefFunc(uint32_t refFuncIndex) {
InitExpr expr;
expr.kind_ = Kind::RefFunc;
expr.u.refFuncIndex_ = refFuncIndex;
return expr;
}
Kind kind() const { return kind_; }
bool isVal() const { return kind() == Kind::Constant; }
LitVal val() const {
MOZ_ASSERT(isVal());
return u.val_;
}
uint32_t globalIndex() const {
MOZ_ASSERT(kind() == Kind::GetGlobal);
return u.global.index_;
}
uint32_t refFuncIndex() const {
MOZ_ASSERT(kind() == Kind::RefFunc);
return u.refFuncIndex_;
}
ValType type() const {
switch (kind()) {
case Kind::Constant:
return u.val_.type();
case Kind::GetGlobal:
return u.global.type_;
case Kind::RefFunc:
return ValType(RefType::func());
}
MOZ_CRASH("unexpected initExpr type");
}
};
// CacheableChars is used to cacheably store UniqueChars.
struct CacheableChars : UniqueChars {
CacheableChars() = default;
explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs)
: UniqueChars(std::move(rhs)) {}
WASM_DECLARE_SERIALIZABLE(CacheableChars)
};
typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
// Import describes a single wasm import. An ImportVector describes all
// of a single module's imports.
//
// ImportVector is built incrementally by ModuleGenerator and then stored
// immutably by Module.
struct Import {
CacheableChars module;
CacheableChars field;
DefinitionKind kind;
Import() = default;
Import(UniqueChars&& module, UniqueChars&& field, DefinitionKind kind)
: module(std::move(module)), field(std::move(field)), kind(kind) {}
WASM_DECLARE_SERIALIZABLE(Import)
};
typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
// Export describes the export of a definition in a Module to a field in the
// export object. The Export stores the index of the exported item in the
// appropriate type-specific module data structure (function table, global
// table, table table, and - eventually - memory table).
//
// Note a single definition can be exported by multiple Exports in the
// ExportVector.
//
// ExportVector is built incrementally by ModuleGenerator and then stored
// immutably by Module.
class Export {
CacheableChars fieldName_;
struct CacheablePod {
DefinitionKind kind_;
uint32_t index_;
} pod;
public:
Export() = default;
explicit Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind);
explicit Export(UniqueChars fieldName, DefinitionKind kind);
const char* fieldName() const { return fieldName_.get(); }
DefinitionKind kind() const { return pod.kind_; }
uint32_t funcIndex() const;
uint32_t globalIndex() const;
uint32_t tableIndex() const;
WASM_DECLARE_SERIALIZABLE(Export)
};
typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
// A GlobalDesc describes a single global variable.
//
// wasm can import and export mutable and immutable globals.
//
// asm.js can import mutable and immutable globals, but a mutable global has a
// location that is private to the module, and its initial value is copied into
// that cell from the environment. asm.js cannot export globals.
enum class GlobalKind { Import, Constant, Variable };
class GlobalDesc {
union V {
struct {
union U {
InitExpr initial_;
struct {
ValType type_;
uint32_t index_;
} import;
U() : import{} {}
} val;
unsigned offset_;
bool isMutable_;
bool isWasm_;
bool isExport_;
} var;
LitVal cst_;
V() {}
} u;
GlobalKind kind_;
// Private, as they have unusual semantics.
bool isExport() const { return !isConstant() && u.var.isExport_; }
bool isWasm() const { return !isConstant() && u.var.isWasm_; }
public:
GlobalDesc() = default;
explicit GlobalDesc(InitExpr initial, bool isMutable,
ModuleKind kind = ModuleKind::Wasm)
: kind_((isMutable || !initial.isVal()) ? GlobalKind::Variable
: GlobalKind::Constant) {
if (isVariable()) {
u.var.val.initial_ = initial;
u.var.isMutable_ = isMutable;
u.var.isWasm_ = kind == Wasm;
u.var.isExport_ = false;
u.var.offset_ = UINT32_MAX;
} else {
u.cst_ = initial.val();
}
}
explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex,
ModuleKind kind = ModuleKind::Wasm)
: kind_(GlobalKind::Import) {
u.var.val.import.type_ = type;
u.var.val.import.index_ = importIndex;
u.var.isMutable_ = isMutable;
u.var.isWasm_ = kind == Wasm;
u.var.isExport_ = false;
u.var.offset_ = UINT32_MAX;
}
void setOffset(unsigned offset) {
MOZ_ASSERT(!isConstant());
MOZ_ASSERT(u.var.offset_ == UINT32_MAX);
u.var.offset_ = offset;