Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmValidate.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/Unused.h"
#include "mozilla/Utf8.h"
#include "builtin/TypedObject.h"
#include "jit/JitOptions.h"
#include "js/Printf.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
#include "wasm/WasmOpIter.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::AsChars;
using mozilla::CheckedInt;
using mozilla::CheckedInt32;
using mozilla::IsUtf8;
using mozilla::MakeSpan;
using mozilla::Unused;
// Decoder implementation.
bool Decoder::failf(const char* msg, ...) {
va_list ap;
va_start(ap, msg);
UniqueChars str(JS_vsmprintf(msg, ap));
va_end(ap);
if (!str) {
return false;
}
return fail(str.get());
}
void Decoder::warnf(const char* msg, ...) {
if (!warnings_) {
return;
}
va_list ap;
va_start(ap, msg);
UniqueChars str(JS_vsmprintf(msg, ap));
va_end(ap);
if (!str) {
return;
}
Unused << warnings_->append(std::move(str));
}
bool Decoder::fail(size_t errorOffset, const char* msg) {
MOZ_ASSERT(error_);
UniqueChars strWithOffset(JS_smprintf("at offset %zu: %s", errorOffset, msg));
if (!strWithOffset) {
return false;
}
*error_ = std::move(strWithOffset);
return false;
}
bool Decoder::readSectionHeader(uint8_t* id, SectionRange* range) {
if (!readFixedU8(id)) {
return false;
}
uint32_t size;
if (!readVarU32(&size)) {
return false;
}
range->start = currentOffset();
range->size = size;
return true;
}
bool Decoder::startSection(SectionId id, ModuleEnvironment* env,
MaybeSectionRange* range, const char* sectionName) {
MOZ_ASSERT(!*range);
// Record state at beginning of section to allow rewinding to this point
// if, after skipping through several custom sections, we don't find the
// section 'id'.
const uint8_t* const initialCur = cur_;
const size_t initialCustomSectionsLength = env->customSections.length();
// Maintain a pointer to the current section that gets updated as custom
// sections are skipped.
const uint8_t* currentSectionStart = cur_;
// Only start a section with 'id', skipping any custom sections before it.
uint8_t idValue;
if (!readFixedU8(&idValue)) {
goto rewind;
}
while (idValue != uint8_t(id)) {
if (idValue != uint8_t(SectionId::Custom)) {
goto rewind;
}
// Rewind to the beginning of the current section since this is what
// skipCustomSection() assumes.
cur_ = currentSectionStart;
if (!skipCustomSection(env)) {
return false;
}
// Having successfully skipped a custom section, consider the next
// section.
currentSectionStart = cur_;
if (!readFixedU8(&idValue)) {
goto rewind;
}
}
// Don't check the size since the range of bytes being decoded might not
// contain the section body. (This is currently the case when streaming: the
// code section header is decoded with the module environment bytes, the
// body of the code section is streamed in separately.)
uint32_t size;
if (!readVarU32(&size)) {
goto fail;
}
range->emplace();
(*range)->start = currentOffset();
(*range)->size = size;
return true;
rewind:
cur_ = initialCur;
env->customSections.shrinkTo(initialCustomSectionsLength);
return true;
fail:
return failf("failed to start %s section", sectionName);
}
bool Decoder::finishSection(const SectionRange& range,
const char* sectionName) {
if (resilientMode_) {
return true;
}
if (range.size != currentOffset() - range.start) {
return failf("byte size mismatch in %s section", sectionName);
}
return true;
}
bool Decoder::startCustomSection(const char* expected, size_t expectedLength,
ModuleEnvironment* env,
MaybeSectionRange* range) {
// Record state at beginning of section to allow rewinding to this point
// if, after skipping through several custom sections, we don't find the
// section 'id'.
const uint8_t* const initialCur = cur_;
const size_t initialCustomSectionsLength = env->customSections.length();
while (true) {
// Try to start a custom section. If we can't, rewind to the beginning
// since we may have skipped several custom sections already looking for
// 'expected'.
if (!startSection(SectionId::Custom, env, range, "custom")) {
return false;
}
if (!*range) {
goto rewind;
}
if (bytesRemain() < (*range)->size) {
goto fail;
}
CustomSectionEnv sec;
if (!readVarU32(&sec.nameLength) || sec.nameLength > bytesRemain()) {
goto fail;
}
sec.nameOffset = currentOffset();
sec.payloadOffset = sec.nameOffset + sec.nameLength;
uint32_t payloadEnd = (*range)->start + (*range)->size;
if (sec.payloadOffset > payloadEnd) {
goto fail;
}
sec.payloadLength = payloadEnd - sec.payloadOffset;
// Now that we have a valid custom section, record its offsets in the
// metadata which can be queried by the user via Module.customSections.
// Note: after an entry is appended, it may be popped if this loop or
// the loop in startSection needs to rewind.
if (!env->customSections.append(sec)) {
return false;
}
// If this is the expected custom section, we're done.
if (!expected || (expectedLength == sec.nameLength &&
!memcmp(cur_, expected, sec.nameLength))) {
cur_ += sec.nameLength;
return true;
}
// Otherwise, blindly skip the custom section and keep looking.
skipAndFinishCustomSection(**range);
range->reset();
}
MOZ_CRASH("unreachable");
rewind:
cur_ = initialCur;
env->customSections.shrinkTo(initialCustomSectionsLength);
return true;
fail:
return fail("failed to start custom section");
}
void Decoder::finishCustomSection(const char* name, const SectionRange& range) {
MOZ_ASSERT(cur_ >= beg_);
MOZ_ASSERT(cur_ <= end_);
if (error_ && *error_) {
warnf("in the '%s' custom section: %s", name, error_->get());
skipAndFinishCustomSection(range);
return;
}
uint32_t actualSize = currentOffset() - range.start;
if (range.size != actualSize) {
if (actualSize < range.size) {
warnf("in the '%s' custom section: %" PRIu32 " unconsumed bytes", name,
uint32_t(range.size - actualSize));
} else {
warnf("in the '%s' custom section: %" PRIu32
" bytes consumed past the end",
name, uint32_t(actualSize - range.size));
}
skipAndFinishCustomSection(range);
return;
}
// Nothing to do! (c.f. skipAndFinishCustomSection())
}
void Decoder::skipAndFinishCustomSection(const SectionRange& range) {
MOZ_ASSERT(cur_ >= beg_);
MOZ_ASSERT(cur_ <= end_);
cur_ = (beg_ + (range.start - offsetInModule_)) + range.size;
MOZ_ASSERT(cur_ <= end_);
clearError();
}
bool Decoder::skipCustomSection(ModuleEnvironment* env) {
MaybeSectionRange range;
if (!startCustomSection(nullptr, 0, env, &range)) {
return false;
}
if (!range) {
return fail("expected custom section");
}
skipAndFinishCustomSection(*range);
return true;
}
bool Decoder::startNameSubsection(NameType nameType,
Maybe<uint32_t>* endOffset) {
MOZ_ASSERT(!*endOffset);
const uint8_t* const initialPosition = cur_;
uint8_t nameTypeValue;
if (!readFixedU8(&nameTypeValue)) {
goto rewind;
}
if (nameTypeValue != uint8_t(nameType)) {
goto rewind;
}
uint32_t payloadLength;
if (!readVarU32(&payloadLength) || payloadLength > bytesRemain()) {
return fail("bad name subsection payload length");
}
*endOffset = Some(currentOffset() + payloadLength);
return true;
rewind:
cur_ = initialPosition;
return true;
}
bool Decoder::finishNameSubsection(uint32_t expected) {
uint32_t actual = currentOffset();
if (expected != actual) {
return failf("bad name subsection length (expected: %" PRIu32
", actual: %" PRIu32 ")",
expected, actual);
}
return true;
}
bool Decoder::skipNameSubsection() {
uint8_t nameTypeValue;
if (!readFixedU8(&nameTypeValue)) {
return fail("unable to read name subsection id");
}
switch (nameTypeValue) {
case uint8_t(NameType::Module):
case uint8_t(NameType::Function):
return fail("out of order name subsections");
default:
break;
}
uint32_t payloadLength;
if (!readVarU32(&payloadLength) || !readBytes(payloadLength)) {
return fail("bad name subsection payload length");
}
return true;
}
// Misc helpers.
bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
if (locals.length() > MaxLocals) {
return false;
}
uint32_t numLocalEntries = 0;
if (locals.length()) {
ValType prev = locals[0];
numLocalEntries++;
for (ValType t : locals) {
if (t != prev) {
numLocalEntries++;
prev = t;
}
}
}
if (!e.writeVarU32(numLocalEntries)) {
return false;
}
if (numLocalEntries) {
ValType prev = locals[0];
uint32_t count = 1;
for (uint32_t i = 1; i < locals.length(); i++, count++) {
if (prev != locals[i]) {
if (!e.writeVarU32(count)) {
return false;
}
if (!e.writeValType(prev)) {
return false;
}
prev = locals[i];
count = 0;
}
}
if (!e.writeVarU32(count)) {
return false;
}
if (!e.writeValType(prev)) {
return false;
}
}
return true;
}
bool wasm::DecodeLocalEntries(Decoder& d, const TypeDefVector& types,
bool refTypesEnabled, bool gcTypesEnabled,
ValTypeVector* locals) {
uint32_t numLocalEntries;
if (!d.readVarU32(&numLocalEntries)) {
return d.fail("failed to read number of local entries");
}
for (uint32_t i = 0; i < numLocalEntries; i++) {
uint32_t count;
if (!d.readVarU32(&count)) {
return d.fail("failed to read local entry count");
}
if (MaxLocals - locals->length() < count) {
return d.fail("too many locals");
}
ValType type;
if (!d.readValType(types, refTypesEnabled, gcTypesEnabled, &type)) {
return false;
}
if (!locals->appendN(type, count)) {
return false;
}
}
return true;
}
bool wasm::DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals) {
uint32_t numLocalEntries;
MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
for (uint32_t i = 0; i < numLocalEntries; i++) {
uint32_t count = d.uncheckedReadVarU32();
MOZ_ASSERT(MaxLocals - locals->length() >= count);
if (!locals->appendN(d.uncheckedReadValType(), count)) {
return false;
}
}
return true;
}
// Function body validation.
class NothingVector {
Nothing unused_;
public:
bool resize(size_t length) { return true; }
Nothing& operator[](size_t) { return unused_; }
Nothing& back() { return unused_; }
};
struct ValidatingPolicy {
using Value = Nothing;
using ValueVector = NothingVector;
using ControlItem = Nothing;
};
using ValidatingOpIter = OpIter<ValidatingPolicy>;
static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
uint32_t funcIndex,
const ValTypeVector& locals,
const uint8_t* bodyEnd, Decoder* d) {
ValidatingOpIter iter(env, *d);
if (!iter.readFunctionStart(funcIndex)) {
return false;
}
#define CHECK(c) \
if (!(c)) return false; \
break
while (true) {
OpBytes op;
if (!iter.readOp(&op)) {
return false;
}
Nothing nothing;
NothingVector nothings;
ResultType unusedType;
switch (op.b0) {
case uint16_t(Op::End): {
LabelKind unusedKind;
if (!iter.readEnd(&unusedKind, &unusedType, &nothings, &nothings)) {
return false;
}
iter.popEnd();
if (iter.controlStackEmpty()) {
return iter.readFunctionEnd(bodyEnd);
}
break;
}
case uint16_t(Op::Nop):
CHECK(iter.readNop());
case uint16_t(Op::Drop):
CHECK(iter.readDrop());
case uint16_t(Op::Call): {
uint32_t unusedIndex;
NothingVector unusedArgs;
CHECK(iter.readCall(&unusedIndex, &unusedArgs));
}
case uint16_t(Op::CallIndirect): {
uint32_t unusedIndex, unusedIndex2;
NothingVector unusedArgs;
CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
&unusedArgs));
}
case uint16_t(Op::I32Const): {
int32_t unused;
CHECK(iter.readI32Const(&unused));
}
case uint16_t(Op::I64Const): {
int64_t unused;
CHECK(iter.readI64Const(&unused));
}
case uint16_t(Op::F32Const): {
float unused;
CHECK(iter.readF32Const(&unused));
}
case uint16_t(Op::F64Const): {
double unused;
CHECK(iter.readF64Const(&unused));
}
case uint16_t(Op::GetLocal): {
uint32_t unused;
CHECK(iter.readGetLocal(locals, &unused));
}
case uint16_t(Op::SetLocal): {
uint32_t unused;
CHECK(iter.readSetLocal(locals, &unused, &nothing));
}
case uint16_t(Op::TeeLocal): {
uint32_t unused;
CHECK(iter.readTeeLocal(locals, &unused, &nothing));
}
case uint16_t(Op::GetGlobal): {
uint32_t unused;
CHECK(iter.readGetGlobal(&unused));
}
case uint16_t(Op::SetGlobal): {
uint32_t unused;
CHECK(iter.readSetGlobal(&unused, &nothing));
}
#ifdef ENABLE_WASM_REFTYPES
case uint16_t(Op::TableGet): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedTableIndex;
CHECK(iter.readTableGet(&unusedTableIndex, &nothing));
}
case uint16_t(Op::TableSet): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedTableIndex;
CHECK(iter.readTableSet(&unusedTableIndex, &nothing, &nothing));
}
#endif
case uint16_t(Op::SelectNumeric): {
StackType unused;
CHECK(iter.readSelect(/*typed*/ false, &unused, &nothing, &nothing,
&nothing));
}
case uint16_t(Op::SelectTyped): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
StackType unused;
CHECK(iter.readSelect(/*typed*/ true, &unused, &nothing, &nothing,
&nothing));
}
case uint16_t(Op::Block):
CHECK(iter.readBlock(&unusedType));
case uint16_t(Op::Loop):
CHECK(iter.readLoop(&unusedType));
case uint16_t(Op::If):
CHECK(iter.readIf(&unusedType, &nothing));
case uint16_t(Op::Else):
CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
case uint16_t(Op::I32Clz):
case uint16_t(Op::I32Ctz):
case uint16_t(Op::I32Popcnt):
CHECK(iter.readUnary(ValType::I32, &nothing));
case uint16_t(Op::I64Clz):
case uint16_t(Op::I64Ctz):
case uint16_t(Op::I64Popcnt):
CHECK(iter.readUnary(ValType::I64, &nothing));
case uint16_t(Op::F32Abs):
case uint16_t(Op::F32Neg):
case uint16_t(Op::F32Ceil):
case uint16_t(Op::F32Floor):
case uint16_t(Op::F32Sqrt):
case uint16_t(Op::F32Trunc):
case uint16_t(Op::F32Nearest):
CHECK(iter.readUnary(ValType::F32, &nothing));
case uint16_t(Op::F64Abs):
case uint16_t(Op::F64Neg):
case uint16_t(Op::F64Ceil):
case uint16_t(Op::F64Floor):
case uint16_t(Op::F64Sqrt):
case uint16_t(Op::F64Trunc):
case uint16_t(Op::F64Nearest):
CHECK(iter.readUnary(ValType::F64, &nothing));
case uint16_t(Op::I32Add):
case uint16_t(Op::I32Sub):
case uint16_t(Op::I32Mul):
case uint16_t(Op::I32DivS):
case uint16_t(Op::I32DivU):
case uint16_t(Op::I32RemS):
case uint16_t(Op::I32RemU):
case uint16_t(Op::I32And):
case uint16_t(Op::I32Or):
case uint16_t(Op::I32Xor):
case uint16_t(Op::I32Shl):
case uint16_t(Op::I32ShrS):
case uint16_t(Op::I32ShrU):
case uint16_t(Op::I32Rotl):
case uint16_t(Op::I32Rotr):
CHECK(iter.readBinary(ValType::I32, &nothing, &nothing));
case uint16_t(Op::I64Add):
case uint16_t(Op::I64Sub):
case uint16_t(Op::I64Mul):
case uint16_t(Op::I64DivS):
case uint16_t(Op::I64DivU):
case uint16_t(Op::I64RemS):
case uint16_t(Op::I64RemU):
case uint16_t(Op::I64And):
case uint16_t(Op::I64Or):
case uint16_t(Op::I64Xor):
case uint16_t(Op::I64Shl):
case uint16_t(Op::I64ShrS):
case uint16_t(Op::I64ShrU):
case uint16_t(Op::I64Rotl):
case uint16_t(Op::I64Rotr):
CHECK(iter.readBinary(ValType::I64, &nothing, &nothing));
case uint16_t(Op::F32Add):
case uint16_t(Op::F32Sub):
case uint16_t(Op::F32Mul):
case uint16_t(Op::F32Div):
case uint16_t(Op::F32Min):
case uint16_t(Op::F32Max):
case uint16_t(Op::F32CopySign):
CHECK(iter.readBinary(ValType::F32, &nothing, &nothing));
case uint16_t(Op::F64Add):
case uint16_t(Op::F64Sub):
case uint16_t(Op::F64Mul):
case uint16_t(Op::F64Div):
case uint16_t(Op::F64Min):
case uint16_t(Op::F64Max):
case uint16_t(Op::F64CopySign):
CHECK(iter.readBinary(ValType::F64, &nothing, &nothing));
case uint16_t(Op::I32Eq):
case uint16_t(Op::I32Ne):
case uint16_t(Op::I32LtS):
case uint16_t(Op::I32LtU):
case uint16_t(Op::I32LeS):
case uint16_t(Op::I32LeU):
case uint16_t(Op::I32GtS):
case uint16_t(Op::I32GtU):
case uint16_t(Op::I32GeS):
case uint16_t(Op::I32GeU):
CHECK(iter.readComparison(ValType::I32, &nothing, &nothing));
case uint16_t(Op::I64Eq):
case uint16_t(Op::I64Ne):
case uint16_t(Op::I64LtS):
case uint16_t(Op::I64LtU):
case uint16_t(Op::I64LeS):
case uint16_t(Op::I64LeU):
case uint16_t(Op::I64GtS):
case uint16_t(Op::I64GtU):
case uint16_t(Op::I64GeS):
case uint16_t(Op::I64GeU):
CHECK(iter.readComparison(ValType::I64, &nothing, &nothing));
case uint16_t(Op::F32Eq):
case uint16_t(Op::F32Ne):
case uint16_t(Op::F32Lt):
case uint16_t(Op::F32Le):
case uint16_t(Op::F32Gt):
case uint16_t(Op::F32Ge):
CHECK(iter.readComparison(ValType::F32, &nothing, &nothing));
case uint16_t(Op::F64Eq):
case uint16_t(Op::F64Ne):
case uint16_t(Op::F64Lt):
case uint16_t(Op::F64Le):
case uint16_t(Op::F64Gt):
case uint16_t(Op::F64Ge):
CHECK(iter.readComparison(ValType::F64, &nothing, &nothing));
case uint16_t(Op::I32Eqz):
CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
case uint16_t(Op::I64Eqz):
case uint16_t(Op::I32WrapI64):
CHECK(iter.readConversion(ValType::I64, ValType::I32, &nothing));
case uint16_t(Op::I32TruncSF32):
case uint16_t(Op::I32TruncUF32):
case uint16_t(Op::I32ReinterpretF32):
CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
case uint16_t(Op::I32TruncSF64):
case uint16_t(Op::I32TruncUF64):
CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
case uint16_t(Op::I64ExtendSI32):
case uint16_t(Op::I64ExtendUI32):
CHECK(iter.readConversion(ValType::I32, ValType::I64, &nothing));
case uint16_t(Op::I64TruncSF32):
case uint16_t(Op::I64TruncUF32):
CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
case uint16_t(Op::I64TruncSF64):
case uint16_t(Op::I64TruncUF64):
case uint16_t(Op::I64ReinterpretF64):
CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
case uint16_t(Op::F32ConvertSI32):
case uint16_t(Op::F32ConvertUI32):
case uint16_t(Op::F32ReinterpretI32):
CHECK(iter.readConversion(ValType::I32, ValType::F32, &nothing));
case uint16_t(Op::F32ConvertSI64):
case uint16_t(Op::F32ConvertUI64):
CHECK(iter.readConversion(ValType::I64, ValType::F32, &nothing));
case uint16_t(Op::F32DemoteF64):
CHECK(iter.readConversion(ValType::F64, ValType::F32, &nothing));
case uint16_t(Op::F64ConvertSI32):
case uint16_t(Op::F64ConvertUI32):
CHECK(iter.readConversion(ValType::I32, ValType::F64, &nothing));
case uint16_t(Op::F64ConvertSI64):
case uint16_t(Op::F64ConvertUI64):
case uint16_t(Op::F64ReinterpretI64):
CHECK(iter.readConversion(ValType::I64, ValType::F64, &nothing));
case uint16_t(Op::F64PromoteF32):
CHECK(iter.readConversion(ValType::F32, ValType::F64, &nothing));
case uint16_t(Op::I32Extend8S):
case uint16_t(Op::I32Extend16S):
CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
case uint16_t(Op::I64Extend8S):
case uint16_t(Op::I64Extend16S):
case uint16_t(Op::I64Extend32S):
CHECK(iter.readConversion(ValType::I64, ValType::I64, &nothing));
case uint16_t(Op::I32Load8S):
case uint16_t(Op::I32Load8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I32, 1, &addr));
}
case uint16_t(Op::I32Load16S):
case uint16_t(Op::I32Load16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I32, 2, &addr));
}
case uint16_t(Op::I32Load): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I32, 4, &addr));
}
case uint16_t(Op::I64Load8S):
case uint16_t(Op::I64Load8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I64, 1, &addr));
}
case uint16_t(Op::I64Load16S):
case uint16_t(Op::I64Load16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I64, 2, &addr));
}
case uint16_t(Op::I64Load32S):
case uint16_t(Op::I64Load32U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I64, 4, &addr));
}
case uint16_t(Op::I64Load): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::I64, 8, &addr));
}
case uint16_t(Op::F32Load): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::F32, 4, &addr));
}
case uint16_t(Op::F64Load): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::F64, 8, &addr));
}
case uint16_t(Op::I32Store8): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I32, 1, &addr, &nothing));
}
case uint16_t(Op::I32Store16): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I32, 2, &addr, &nothing));
}
case uint16_t(Op::I32Store): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I32, 4, &addr, &nothing));
}
case uint16_t(Op::I64Store8): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I64, 1, &addr, &nothing));
}
case uint16_t(Op::I64Store16): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I64, 2, &addr, &nothing));
}
case uint16_t(Op::I64Store32): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I64, 4, &addr, &nothing));
}
case uint16_t(Op::I64Store): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::I64, 8, &addr, &nothing));
}
case uint16_t(Op::F32Store): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::F32, 4, &addr, &nothing));
}
case uint16_t(Op::F64Store): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::F64, 8, &addr, &nothing));
}
case uint16_t(Op::MemoryGrow):
CHECK(iter.readMemoryGrow(&nothing));
case uint16_t(Op::MemorySize):
CHECK(iter.readMemorySize());
case uint16_t(Op::Br): {
uint32_t unusedDepth;
CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
}
case uint16_t(Op::BrIf): {
uint32_t unusedDepth;
CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
}
case uint16_t(Op::BrTable): {
Uint32Vector unusedDepths;
uint32_t unusedDefault;
CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
&nothings, &nothing));
}
case uint16_t(Op::Return):
CHECK(iter.readReturn(&nothings));
case uint16_t(Op::Unreachable):
CHECK(iter.readUnreachable());
#ifdef ENABLE_WASM_GC
case uint16_t(Op::GcPrefix): {
switch (op.b1) {
case uint32_t(GcOp::StructNew): {
if (!env.gcTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedUint;
NothingVector unusedArgs;
CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
}
case uint32_t(GcOp::StructGet): {
if (!env.gcTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedUint1, unusedUint2;
CHECK(iter.readStructGet(&unusedUint1, &unusedUint2, &nothing));
}
case uint32_t(GcOp::StructSet): {
if (!env.gcTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedUint1, unusedUint2;
CHECK(iter.readStructSet(&unusedUint1, &unusedUint2, &nothing,
&nothing));
}
case uint32_t(GcOp::StructNarrow): {
if (!env.gcTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
ValType unusedTy, unusedTy2;
CHECK(iter.readStructNarrow(&unusedTy, &unusedTy2, &nothing));
}
default:
return iter.unrecognizedOpcode(&op);
}
break;
}
#endif
#ifdef ENABLE_WASM_SIMD
case uint16_t(Op::SimdPrefix): {
if (!env.v128Enabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t noIndex;
switch (op.b1) {
case uint32_t(SimdOp::I8x16ExtractLaneS):
case uint32_t(SimdOp::I8x16ExtractLaneU):
CHECK(iter.readExtractLane(ValType::I32, 16, &noIndex, &nothing));
case uint32_t(SimdOp::I16x8ExtractLaneS):
case uint32_t(SimdOp::I16x8ExtractLaneU):
CHECK(iter.readExtractLane(ValType::I32, 8, &noIndex, &nothing));
case uint32_t(SimdOp::I32x4ExtractLane):
CHECK(iter.readExtractLane(ValType::I32, 4, &noIndex, &nothing));
case uint32_t(SimdOp::I64x2ExtractLane):
CHECK(iter.readExtractLane(ValType::I64, 2, &noIndex, &nothing));
case uint32_t(SimdOp::F32x4ExtractLane):
CHECK(iter.readExtractLane(ValType::F32, 4, &noIndex, &nothing));
case uint32_t(SimdOp::F64x2ExtractLane):
CHECK(iter.readExtractLane(ValType::F64, 2, &noIndex, &nothing));
case uint32_t(SimdOp::I8x16Splat):
case uint32_t(SimdOp::I16x8Splat):
case uint32_t(SimdOp::I32x4Splat):
CHECK(iter.readConversion(ValType::I32, ValType::V128, &nothing));
case uint32_t(SimdOp::I64x2Splat):
CHECK(iter.readConversion(ValType::I64, ValType::V128, &nothing));
case uint32_t(SimdOp::F32x4Splat):
CHECK(iter.readConversion(ValType::F32, ValType::V128, &nothing));
case uint32_t(SimdOp::F64x2Splat):
CHECK(iter.readConversion(ValType::F64, ValType::V128, &nothing));
case uint32_t(SimdOp::I8x16AnyTrue):
case uint32_t(SimdOp::I8x16AllTrue):
case uint32_t(SimdOp::I16x8AnyTrue):
case uint32_t(SimdOp::I16x8AllTrue):
case uint32_t(SimdOp::I32x4AnyTrue):
case uint32_t(SimdOp::I32x4AllTrue):
case uint32_t(SimdOp::I8x16Bitmask):
case uint32_t(SimdOp::I16x8Bitmask):
case uint32_t(SimdOp::I32x4Bitmask):
CHECK(iter.readConversion(ValType::V128, ValType::I32, &nothing));
case uint32_t(SimdOp::I8x16ReplaceLane):
CHECK(iter.readReplaceLane(ValType::I32, 16, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::I16x8ReplaceLane):
CHECK(iter.readReplaceLane(ValType::I32, 8, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::I32x4ReplaceLane):
CHECK(iter.readReplaceLane(ValType::I32, 4, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::I64x2ReplaceLane):
CHECK(iter.readReplaceLane(ValType::I64, 2, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::F32x4ReplaceLane):
CHECK(iter.readReplaceLane(ValType::F32, 4, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::F64x2ReplaceLane):
CHECK(iter.readReplaceLane(ValType::F64, 2, &noIndex, &nothing,
&nothing));
case uint32_t(SimdOp::I8x16Eq):
case uint32_t(SimdOp::I8x16Ne):
case uint32_t(SimdOp::I8x16LtS):
case uint32_t(SimdOp::I8x16LtU):
case uint32_t(SimdOp::I8x16GtS):
case uint32_t(SimdOp::I8x16GtU):
case uint32_t(SimdOp::I8x16LeS):
case uint32_t(SimdOp::I8x16LeU):
case uint32_t(SimdOp::I8x16GeS):
case uint32_t(SimdOp::I8x16GeU):
case uint32_t(SimdOp::I16x8Eq):
case uint32_t(SimdOp::I16x8Ne):
case uint32_t(SimdOp::I16x8LtS):
case uint32_t(SimdOp::I16x8LtU):
case uint32_t(SimdOp::I16x8GtS):
case uint32_t(SimdOp::I16x8GtU):
case uint32_t(SimdOp::I16x8LeS):
case uint32_t(SimdOp::I16x8LeU):
case uint32_t(SimdOp::I16x8GeS):
case uint32_t(SimdOp::I16x8GeU):
case uint32_t(SimdOp::I32x4Eq):
case uint32_t(SimdOp::I32x4Ne):
case uint32_t(SimdOp::I32x4LtS):
case uint32_t(SimdOp::I32x4LtU):
case uint32_t(SimdOp::I32x4GtS):
case uint32_t(SimdOp::I32x4GtU):
case uint32_t(SimdOp::I32x4LeS):
case uint32_t(SimdOp::I32x4LeU):
case uint32_t(SimdOp::I32x4GeS):
case uint32_t(SimdOp::I32x4GeU):
case uint32_t(SimdOp::F32x4Eq):
case uint32_t(SimdOp::F32x4Ne):
case uint32_t(SimdOp::F32x4Lt):
case uint32_t(SimdOp::F32x4Gt):
case uint32_t(SimdOp::F32x4Le):
case uint32_t(SimdOp::F32x4Ge):
case uint32_t(SimdOp::F64x2Eq):
case uint32_t(SimdOp::F64x2Ne):
case uint32_t(SimdOp::F64x2Lt):
case uint32_t(SimdOp::F64x2Gt):
case uint32_t(SimdOp::F64x2Le):
case uint32_t(SimdOp::F64x2Ge):
case uint32_t(SimdOp::V128And):
case uint32_t(SimdOp::V128Or):
case uint32_t(SimdOp::V128Xor):
case uint32_t(SimdOp::V128AndNot):
case uint32_t(SimdOp::I8x16AvgrU):
case uint32_t(SimdOp::I16x8AvgrU):
case uint32_t(SimdOp::I8x16Add):
case uint32_t(SimdOp::I8x16AddSaturateS):
case uint32_t(SimdOp::I8x16AddSaturateU):
case uint32_t(SimdOp::I8x16Sub):
case uint32_t(SimdOp::I8x16SubSaturateS):
case uint32_t(SimdOp::I8x16SubSaturateU):
case uint32_t(SimdOp::I8x16MinS):
case uint32_t(SimdOp::I8x16MinU):
case uint32_t(SimdOp::I8x16MaxS):
case uint32_t(SimdOp::I8x16MaxU):
case uint32_t(SimdOp::I16x8Add):
case uint32_t(SimdOp::I16x8AddSaturateS):
case uint32_t(SimdOp::I16x8AddSaturateU):
case uint32_t(SimdOp::I16x8Sub):
case uint32_t(SimdOp::I16x8SubSaturateS):
case uint32_t(SimdOp::I16x8SubSaturateU):
case uint32_t(SimdOp::I16x8Mul):
case uint32_t(SimdOp::I16x8MinS):
case uint32_t(SimdOp::I16x8MinU):
case uint32_t(SimdOp::I16x8MaxS):
case uint32_t(SimdOp::I16x8MaxU):
case uint32_t(SimdOp::I32x4Add):
case uint32_t(SimdOp::I32x4Sub):
case uint32_t(SimdOp::I32x4Mul):
case uint32_t(SimdOp::I32x4MinS):
case uint32_t(SimdOp::I32x4MinU):
case uint32_t(SimdOp::I32x4MaxS):
case uint32_t(SimdOp::I32x4MaxU):
case uint32_t(SimdOp::I64x2Add):
case uint32_t(SimdOp::I64x2Sub):
case uint32_t(SimdOp::I64x2Mul):
case uint32_t(SimdOp::F32x4Add):
case uint32_t(SimdOp::F32x4Sub):
case uint32_t(SimdOp::F32x4Mul):
case uint32_t(SimdOp::F32x4Div):
case uint32_t(SimdOp::F32x4Min):
case uint32_t(SimdOp::F32x4Max):
case uint32_t(SimdOp::F64x2Add):
case uint32_t(SimdOp::F64x2Sub):
case uint32_t(SimdOp::F64x2Mul):
case uint32_t(SimdOp::F64x2Div):
case uint32_t(SimdOp::F64x2Min):
case uint32_t(SimdOp::F64x2Max):
case uint32_t(SimdOp::I8x16NarrowSI16x8):
case uint32_t(SimdOp::I8x16NarrowUI16x8):
case uint32_t(SimdOp::I16x8NarrowSI32x4):
case uint32_t(SimdOp::I16x8NarrowUI32x4):
case uint32_t(SimdOp::V8x16Swizzle):
CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
case uint32_t(SimdOp::I8x16Neg):
case uint32_t(SimdOp::I16x8Neg):
case uint32_t(SimdOp::I16x8WidenLowSI8x16):
case uint32_t(SimdOp::I16x8WidenHighSI8x16):
case uint32_t(SimdOp::I16x8WidenLowUI8x16):
case uint32_t(SimdOp::I16x8WidenHighUI8x16):
case uint32_t(SimdOp::I32x4Neg):
case uint32_t(SimdOp::I32x4WidenLowSI16x8):
case uint32_t(SimdOp::I32x4WidenHighSI16x8):
case uint32_t(SimdOp::I32x4WidenLowUI16x8):
case uint32_t(SimdOp::I32x4WidenHighUI16x8):
case uint32_t(SimdOp::I32x4TruncSSatF32x4):
case uint32_t(SimdOp::I32x4TruncUSatF32x4):
case uint32_t(SimdOp::I64x2Neg):
case uint32_t(SimdOp::F32x4Abs):
case uint32_t(SimdOp::F32x4Neg):
case uint32_t(SimdOp::F32x4Sqrt):
case uint32_t(SimdOp::F32x4ConvertSI32x4):
case uint32_t(SimdOp::F32x4ConvertUI32x4):
case uint32_t(SimdOp::F64x2Abs):
case uint32_t(SimdOp::F64x2Neg):
case uint32_t(SimdOp::F64x2Sqrt):
case uint32_t(SimdOp::V128Not):
case uint32_t(SimdOp::I8x16Abs):
case uint32_t(SimdOp::I16x8Abs):
case uint32_t(SimdOp::I32x4Abs):
CHECK(iter.readUnary(ValType::V128, &nothing));
case uint32_t(SimdOp::I8x16Shl):
case uint32_t(SimdOp::I8x16ShrS):
case uint32_t(SimdOp::I8x16ShrU):
case uint32_t(SimdOp::I16x8Shl):
case uint32_t(SimdOp::I16x8ShrS):
case uint32_t(SimdOp::I16x8ShrU):
case uint32_t(SimdOp::I32x4Shl):
case uint32_t(SimdOp::I32x4ShrS):
case uint32_t(SimdOp::I32x4ShrU):
case uint32_t(SimdOp::I64x2Shl):
case uint32_t(SimdOp::I64x2ShrS):
case uint32_t(SimdOp::I64x2ShrU):
CHECK(iter.readVectorShift(&nothing, &nothing));
case uint32_t(SimdOp::V128Bitselect):
CHECK(iter.readVectorSelect(&nothing, &nothing, &nothing));
case uint32_t(SimdOp::V8x16Shuffle): {
V128 mask;
CHECK(iter.readVectorShuffle(&nothing, &nothing, &mask));
}
case uint32_t(SimdOp::V128Const): {
V128 noVector;
CHECK(iter.readV128Const(&noVector));
}
case uint32_t(SimdOp::V128Load): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoad(ValType::V128, 16, &addr));
}
case uint32_t(SimdOp::V8x16LoadSplat): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadSplat(1, &addr));
}
case uint32_t(SimdOp::V16x8LoadSplat): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadSplat(2, &addr));
}
case uint32_t(SimdOp::V32x4LoadSplat): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadSplat(4, &addr));
}
case uint32_t(SimdOp::V64x2LoadSplat): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadSplat(8, &addr));
}
case uint32_t(SimdOp::I16x8LoadS8x8):
case uint32_t(SimdOp::I16x8LoadU8x8): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadExtend(&addr));
}
case uint32_t(SimdOp::I32x4LoadS16x4):
case uint32_t(SimdOp::I32x4LoadU16x4): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadExtend(&addr));
}
case uint32_t(SimdOp::I64x2LoadS32x2):
case uint32_t(SimdOp::I64x2LoadU32x2): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readLoadExtend(&addr));
}
case uint32_t(SimdOp::V128Store): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readStore(ValType::V128, 16, &addr, &nothing));
}
default:
return iter.unrecognizedOpcode(&op);
}
break;
}
#endif // ENABLE_WASM_SIMD
case uint16_t(Op::MiscPrefix): {
switch (op.b1) {
case uint32_t(MiscOp::I32TruncSSatF32):
case uint32_t(MiscOp::I32TruncUSatF32):
CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
case uint32_t(MiscOp::I32TruncSSatF64):
case uint32_t(MiscOp::I32TruncUSatF64):
CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
case uint32_t(MiscOp::I64TruncSSatF32):
case uint32_t(MiscOp::I64TruncUSatF32):
CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
case uint32_t(MiscOp::I64TruncSSatF64):
case uint32_t(MiscOp::I64TruncUSatF64):
CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
case uint32_t(MiscOp::MemCopy): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedDestMemIndex;
uint32_t unusedSrcMemIndex;
CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
&nothing, &unusedSrcMemIndex,
&nothing, &nothing));
}
case uint32_t(MiscOp::DataDrop): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedSegIndex;
CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
}
case uint32_t(MiscOp::MemFill):
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
CHECK(iter.readMemFill(&nothing, &nothing, &nothing));
case uint32_t(MiscOp::MemInit): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedSegIndex;
uint32_t unusedTableIndex;
CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
&unusedTableIndex, &nothing, &nothing,
&nothing));
}
case uint32_t(MiscOp::TableCopy): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedDestTableIndex;
uint32_t unusedSrcTableIndex;
CHECK(iter.readMemOrTableCopy(
/*isMem=*/false, &unusedDestTableIndex, &nothing,
&unusedSrcTableIndex, &nothing, &nothing));
}
case uint32_t(MiscOp::ElemDrop): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedSegIndex;
CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
}
case uint32_t(MiscOp::TableInit): {
#ifndef ENABLE_WASM_BULKMEM_OPS
// Bulk memory must be available if shared memory is enabled.
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.fail("bulk memory ops disabled");
}
#endif
uint32_t unusedSegIndex;
uint32_t unusedTableIndex;
CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
&unusedTableIndex, &nothing, &nothing,
&nothing));
}
#ifdef ENABLE_WASM_REFTYPES
case uint32_t(MiscOp::TableFill): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedTableIndex;
CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
&nothing));
}
case uint32_t(MiscOp::TableGrow): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedTableIndex;
CHECK(iter.readTableGrow(&unusedTableIndex, &nothing, &nothing));
}
case uint32_t(MiscOp::TableSize): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedTableIndex;
CHECK(iter.readTableSize(&unusedTableIndex));
}
#endif
default:
return iter.unrecognizedOpcode(&op);
}
break;
}
#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefEq): {
if (!env.gcTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
CHECK(iter.readComparison(RefType::any(), &nothing, &nothing));
}
#endif
#ifdef ENABLE_WASM_REFTYPES
case uint16_t(Op::RefFunc): {
uint32_t unusedIndex;
CHECK(iter.readRefFunc(&unusedIndex));
}
case uint16_t(Op::RefNull): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
CHECK(iter.readRefNull());
}
case uint16_t(Op::RefIsNull): {
if (!env.refTypesEnabled()) {
return iter.unrecognizedOpcode(&op);
}
Nothing nothing;
CHECK(iter.readRefIsNull(&nothing));
}
#endif
case uint16_t(Op::ThreadPrefix): {
if (env.sharedMemoryEnabled == Shareable::False) {
return iter.unrecognizedOpcode(&op);
}
switch (op.b1) {
case uint32_t(ThreadOp::Wake): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readWake(&addr, &nothing));
}
case uint32_t(ThreadOp::I32Wait): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readWait(&addr, ValType::I32, 4, &nothing, &nothing));
}
case uint32_t(ThreadOp::I64Wait): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readWait(&addr, ValType::I64, 8, &nothing, &nothing));
}
case uint32_t(ThreadOp::Fence): {
CHECK(iter.readFence());
}
case uint32_t(ThreadOp::I32AtomicLoad): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I32, 4));
}
case uint32_t(ThreadOp::I64AtomicLoad): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I64, 8));
}
case uint32_t(ThreadOp::I32AtomicLoad8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I32, 1));
}
case uint32_t(ThreadOp::I32AtomicLoad16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I32, 2));
}
case uint32_t(ThreadOp::I64AtomicLoad8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I64, 1));
}
case uint32_t(ThreadOp::I64AtomicLoad16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I64, 2));
}
case uint32_t(ThreadOp::I64AtomicLoad32U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicLoad(&addr, ValType::I64, 4));
}
case uint32_t(ThreadOp::I32AtomicStore): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I32, 4, &nothing));
}
case uint32_t(ThreadOp::I64AtomicStore): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I64, 8, &nothing));
}
case uint32_t(ThreadOp::I32AtomicStore8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I32, 1, &nothing));
}
case uint32_t(ThreadOp::I32AtomicStore16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I32, 2, &nothing));
}
case uint32_t(ThreadOp::I64AtomicStore8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I64, 1, &nothing));
}
case uint32_t(ThreadOp::I64AtomicStore16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I64, 2, &nothing));
}
case uint32_t(ThreadOp::I64AtomicStore32U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicStore(&addr, ValType::I64, 4, &nothing));
}
case uint32_t(ThreadOp::I32AtomicAdd):
case uint32_t(ThreadOp::I32AtomicSub):
case uint32_t(ThreadOp::I32AtomicAnd):
case uint32_t(ThreadOp::I32AtomicOr):
case uint32_t(ThreadOp::I32AtomicXor):
case uint32_t(ThreadOp::I32AtomicXchg): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I32, 4, &nothing));
}
case uint32_t(ThreadOp::I64AtomicAdd):
case uint32_t(ThreadOp::I64AtomicSub):
case uint32_t(ThreadOp::I64AtomicAnd):
case uint32_t(ThreadOp::I64AtomicOr):
case uint32_t(ThreadOp::I64AtomicXor):
case uint32_t(ThreadOp::I64AtomicXchg): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I64, 8, &nothing));
}
case uint32_t(ThreadOp::I32AtomicAdd8U):
case uint32_t(ThreadOp::I32AtomicSub8U):
case uint32_t(ThreadOp::I32AtomicAnd8U):
case uint32_t(ThreadOp::I32AtomicOr8U):
case uint32_t(ThreadOp::I32AtomicXor8U):
case uint32_t(ThreadOp::I32AtomicXchg8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I32, 1, &nothing));
}
case uint32_t(ThreadOp::I32AtomicAdd16U):
case uint32_t(ThreadOp::I32AtomicSub16U):
case uint32_t(ThreadOp::I32AtomicAnd16U):
case uint32_t(ThreadOp::I32AtomicOr16U):
case uint32_t(ThreadOp::I32AtomicXor16U):
case uint32_t(ThreadOp::I32AtomicXchg16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I32, 2, &nothing));
}
case uint32_t(ThreadOp::I64AtomicAdd8U):
case uint32_t(ThreadOp::I64AtomicSub8U):
case uint32_t(ThreadOp::I64AtomicAnd8U):
case uint32_t(ThreadOp::I64AtomicOr8U):
case uint32_t(ThreadOp::I64AtomicXor8U):
case uint32_t(ThreadOp::I64AtomicXchg8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I64, 1, &nothing));
}
case uint32_t(ThreadOp::I64AtomicAdd16U):
case uint32_t(ThreadOp::I64AtomicSub16U):
case uint32_t(ThreadOp::I64AtomicAnd16U):
case uint32_t(ThreadOp::I64AtomicOr16U):
case uint32_t(ThreadOp::I64AtomicXor16U):
case uint32_t(ThreadOp::I64AtomicXchg16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I64, 2, &nothing));
}
case uint32_t(ThreadOp::I64AtomicAdd32U):
case uint32_t(ThreadOp::I64AtomicSub32U):
case uint32_t(ThreadOp::I64AtomicAnd32U):
case uint32_t(ThreadOp::I64AtomicOr32U):
case uint32_t(ThreadOp::I64AtomicXor32U):
case uint32_t(ThreadOp::I64AtomicXchg32U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicRMW(&addr, ValType::I64, 4, &nothing));
}
case uint32_t(ThreadOp::I32AtomicCmpXchg): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 4, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I64AtomicCmpXchg): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 8, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I32AtomicCmpXchg8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 1, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I32AtomicCmpXchg16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 2, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I64AtomicCmpXchg8U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 1, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I64AtomicCmpXchg16U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 2, &nothing,
&nothing));
}
case uint32_t(ThreadOp::I64AtomicCmpXchg32U): {
LinearMemoryAddress<Nothing> addr;
CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 4, &nothing,
&nothing));
}
default:
return iter.unrecognizedOpcode(&op);
}
break;
}
case uint16_t(Op::MozPrefix):
return iter.unrecognizedOpcode(&op);
default:
return iter.unrecognizedOpcode(&op);
}
}
MOZ_CRASH("unreachable");
#undef CHECK
}
bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
uint32_t funcIndex, uint32_t bodySize,
Decoder& d) {
ValTypeVector locals;
if (!locals.appendAll(env.funcTypes[funcIndex]->args())) {
return false;
}
const uint8_t* bodyBegin = d.currentPosition();
if (!DecodeLocalEntries(d, env.types, env.refTypesEnabled(),
env.gcTypesEnabled(), &locals)) {
return false;
}
if (!DecodeFunctionBodyExprs(env, funcIndex, locals, bodyBegin + bodySize,
&d)) {
return false;
}
return true;
}
// Section macros.
static bool DecodePreamble(Decoder& d) {
if (d.bytesRemain() > MaxModuleBytes) {
return d.fail("module too big");
}
uint32_t u32;
if (!d.readFixedU32(&u32) || u32 != MagicNumber) {
return d.fail("failed to match magic number");
}
if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
return d.failf("binary version 0x%" PRIx32
" does not match expected version 0x%" PRIx32,
u32, EncodingVersion);
}
return true;
}
enum class TypeState { None, Struct, ForwardStruct, Func };
typedef Vector<TypeState, 0, SystemAllocPolicy> TypeStateVector;
static bool ValidateTypeState(Decoder& d, TypeStateVector* typeState,
ValType type) {
if (!type.isTypeIndex()) {
return true;
}
uint32_t refTypeIndex = type.refType().typeIndex();
switch ((*typeState)[refTypeIndex]) {
case TypeState::None:
(*typeState)[refTypeIndex] = TypeState::ForwardStruct;
break;
case TypeState::Struct:
case TypeState::ForwardStruct:
break;
case TypeState::Func:
return d.fail("ref does not reference a struct type");
}
return true;
}
#ifdef WASM_PRIVATE_REFTYPES
static bool FuncTypeIsJSCompatible(Decoder& d, const FuncType& ft) {
if (ft.exposesTypeIndex()) {
return d.fail("cannot expose indexed reference type");
}
return true;
}
#endif
static bool DecodeTypeVector(Decoder& d, ModuleEnvironment* env,
TypeStateVector* typeState, uint32_t count,
ValTypeVector* types) {
if (!types->resize(count)) {
return false;
}
for (uint32_t i = 0; i < count; i++) {
if (!d.readValType(env->types.length(), env->refTypesEnabled(),
env->gcTypesEnabled(), &(*types)[i])) {
return false;
}
if (!ValidateTypeState(d, typeState, (*types)[i])) {
return false;
}
}
return true;
}
static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
TypeStateVector* typeState, uint32_t typeIndex) {
uint32_t numArgs;
if (!d.readVarU32(&numArgs)) {
return d.fail("bad number of function args");
}
if (numArgs > MaxParams) {
return d.fail("too many arguments in signature");
}
ValTypeVector args;
if (!DecodeTypeVector(d, env, typeState, numArgs, &args)) {
return false;
}
uint32_t numResults;
if (!d.readVarU32(&numResults)) {
return d.fail("bad number of function returns");
}
if (numResults > env->funcMaxResults()) {
return d.fail("too many returns in signature");
}
ValTypeVector results;
if (!DecodeTypeVector(d, env, typeState, numResults, &results)) {
return false;
}
if ((*typeState)[typeIndex] != TypeState::None) {
return d.fail("function type entry referenced as struct");
}
env->types[typeIndex] =
TypeDef(FuncType(std::move(args), std::move(results)));
(*typeState)[typeIndex] = TypeState::Func;
return true;
}
static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
TypeStateVector* typeState, uint32_t typeIndex) {
if (!env->gcTypesEnabled()) {
return d.fail("Structure types not enabled");
}
uint32_t numFields;
if (!d.readVarU32(&numFields)) {
return d.fail("Bad number of fields");
}
if (numFields > MaxStructFields) {
return d.fail("too many fields in structure");
}
StructFieldVector fields;
if (!fields.resize(numFields)) {
return false;
}
StructMetaTypeDescr::Layout layout;
for (uint32_t i = 0; i < numFields; i++) {
if (!d.readValType(env->types.length(), env->refTypesEnabled(),
env->gcTypesEnabled(), &fields[i].type)) {
return false;
}
uint8_t flags;
if (!d.readFixedU8(&flags)) {
return d.fail("expected flag");
}
if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
return d.fail("garbage flag bits");
}
fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
if (!ValidateTypeState(d, typeState, fields[i].type)) {
return false;
}
CheckedInt32 offset;
switch (fields[i].type.kind()) {
case ValType::I32:
offset = layout.addScalar(Scalar::Int32);
break;
case ValType::I64:
offset = layout.addScalar(Scalar::Int64);
break;
case ValType::F32:
offset = layout.addScalar(Scalar::Float32);
break;
case ValType::F64:
offset = layout.addScalar(Scalar::Float64);
break;
case ValType::V128:
offset = layout.addScalar(Scalar::Simd128);
break;
case ValType::Ref:
switch (fields[i].type.refTypeKind()) {
case RefType::TypeIndex:
offset = layout.addReference(ReferenceType::TYPE_OBJECT);
break;
case RefType::Func:
case RefType::Any:
offset = layout.addReference(ReferenceType::TYPE_WASM_ANYREF);