Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2016 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmInstance.h"
20
21
#include <algorithm>
22
23
#include "jit/AtomicOperations.h"
24
#include "jit/Disassemble.h"
25
#include "jit/InlinableNatives.h"
26
#include "jit/JitCommon.h"
27
#include "jit/JitRealm.h"
28
#include "jit/JitScript.h"
29
#include "util/StringBuffer.h"
30
#include "util/Text.h"
31
#include "vm/BigIntType.h"
32
#include "wasm/WasmBuiltins.h"
33
#include "wasm/WasmModule.h"
34
#include "wasm/WasmStubs.h"
35
36
#include "gc/StoreBuffer-inl.h"
37
#include "vm/ArrayBufferObject-inl.h"
38
#include "vm/JSObject-inl.h"
39
40
using namespace js;
41
using namespace js::jit;
42
using namespace js::wasm;
43
using mozilla::BitwiseCast;
44
45
typedef CheckedInt<uint32_t> CheckedU32;
46
47
class FuncTypeIdSet {
48
typedef HashMap<const FuncType*, uint32_t, FuncTypeHashPolicy,
49
SystemAllocPolicy>
50
Map;
51
Map map_;
52
53
public:
54
~FuncTypeIdSet() {
55
MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), map_.empty());
56
}
57
58
bool allocateFuncTypeId(JSContext* cx, const FuncType& funcType,
59
const void** funcTypeId) {
60
Map::AddPtr p = map_.lookupForAdd(funcType);
61
if (p) {
62
MOZ_ASSERT(p->value() > 0);
63
p->value()++;
64
*funcTypeId = p->key();
65
return true;
66
}
67
68
UniquePtr<FuncType> clone = MakeUnique<FuncType>();
69
if (!clone || !clone->clone(funcType) || !map_.add(p, clone.get(), 1)) {
70
ReportOutOfMemory(cx);
71
return false;
72
}
73
74
*funcTypeId = clone.release();
75
MOZ_ASSERT(!(uintptr_t(*funcTypeId) & FuncTypeIdDesc::ImmediateBit));
76
return true;
77
}
78
79
void deallocateFuncTypeId(const FuncType& funcType, const void* funcTypeId) {
80
Map::Ptr p = map_.lookup(funcType);
81
MOZ_RELEASE_ASSERT(p && p->key() == funcTypeId && p->value() > 0);
82
83
p->value()--;
84
if (!p->value()) {
85
js_delete(p->key());
86
map_.remove(p);
87
}
88
}
89
};
90
91
ExclusiveData<FuncTypeIdSet> funcTypeIdSet(mutexid::WasmFuncTypeIdSet);
92
93
const void** Instance::addressOfFuncTypeId(
94
const FuncTypeIdDesc& funcTypeId) const {
95
return (const void**)(globalData() + funcTypeId.globalDataOffset());
96
}
97
98
FuncImportTls& Instance::funcImportTls(const FuncImport& fi) {
99
return *(FuncImportTls*)(globalData() + fi.tlsDataOffset());
100
}
101
102
TableTls& Instance::tableTls(const TableDesc& td) const {
103
return *(TableTls*)(globalData() + td.globalDataOffset);
104
}
105
106
bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
107
unsigned argc, const uint64_t* argv,
108
MutableHandleValue rval) {
109
AssertRealmUnchanged aru(cx);
110
111
Tier tier = code().bestTier();
112
113
const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
114
115
InvokeArgs args(cx);
116
if (!args.init(cx, argc)) {
117
return false;
118
}
119
120
if (fi.funcType().hasI64ArgOrRet() && !HasI64BigIntSupport(cx)) {
121
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
122
JSMSG_WASM_BAD_I64_TYPE);
123
return false;
124
}
125
126
MOZ_ASSERT(fi.funcType().args().length() == argc);
127
for (size_t i = 0; i < argc; i++) {
128
switch (fi.funcType().args()[i].code()) {
129
case ValType::I32:
130
args[i].set(Int32Value(*(int32_t*)&argv[i]));
131
break;
132
case ValType::F32:
133
args[i].set(JS::CanonicalizedDoubleValue(*(float*)&argv[i]));
134
break;
135
case ValType::F64:
136
args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
137
break;
138
case ValType::FuncRef:
139
args[i].set(UnboxFuncRef(FuncRef::fromCompiledCode(*(void**)&argv[i])));
140
break;
141
case ValType::AnyRef:
142
args[i].set(UnboxAnyRef(AnyRef::fromCompiledCode(*(void**)&argv[i])));
143
break;
144
case ValType::I64: {
145
#ifdef ENABLE_WASM_BIGINT
146
MOZ_ASSERT(HasI64BigIntSupport(cx));
147
// If bi is manipulated other than test & storing, it would need
148
// to be rooted here.
149
BigInt* bi = BigInt::createFromInt64(cx, *(int64_t*)&argv[i]);
150
if (!bi) {
151
return false;
152
}
153
args[i].set(BigIntValue(bi));
154
break;
155
#else
156
MOZ_CRASH("unhandled type in callImport");
157
#endif
158
}
159
case ValType::Ref:
160
MOZ_CRASH("temporarily unsupported Ref type in callImport");
161
case ValType::NullRef:
162
MOZ_CRASH("NullRef not expressible");
163
}
164
}
165
166
FuncImportTls& import = funcImportTls(fi);
167
RootedFunction importFun(cx, import.fun);
168
MOZ_ASSERT(cx->realm() == importFun->realm());
169
170
RootedValue fval(cx, ObjectValue(*importFun));
171
RootedValue thisv(cx, UndefinedValue());
172
if (!Call(cx, fval, thisv, args, rval)) {
173
return false;
174
}
175
176
if (!JitOptions.enableWasmJitExit) {
177
return true;
178
}
179
180
// The import may already have become optimized.
181
for (auto t : code().tiers()) {
182
void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
183
if (import.code == jitExitCode) {
184
return true;
185
}
186
}
187
188
void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
189
190
// Test if the function is JIT compiled.
191
if (!importFun->hasScript()) {
192
return true;
193
}
194
195
JSScript* script = importFun->nonLazyScript();
196
if (!script->hasJitScript()) {
197
return true;
198
}
199
200
// Ensure the argument types are included in the argument TypeSets stored in
201
// the JitScript. This is necessary for Ion, because the import will use
202
// the skip-arg-checks entry point. When the JitScript is discarded the import
203
// is patched back.
204
AutoSweepJitScript sweep(script);
205
JitScript* jitScript = script->jitScript();
206
207
StackTypeSet* thisTypes = jitScript->thisTypes(sweep, script);
208
if (!thisTypes->hasType(TypeSet::UndefinedType())) {
209
return true;
210
}
211
212
// Functions with unsupported reference types in signature don't have a jit
213
// exit at the moment.
214
if (fi.funcType().temporarilyUnsupportedReftypeForExit()) {
215
return true;
216
}
217
218
const ValTypeVector& importArgs = fi.funcType().args();
219
220
size_t numKnownArgs = std::min(importArgs.length(), importFun->nargs());
221
for (uint32_t i = 0; i < numKnownArgs; i++) {
222
StackTypeSet* argTypes = jitScript->argTypes(sweep, script, i);
223
switch (importArgs[i].code()) {
224
case ValType::I32:
225
if (!argTypes->hasType(TypeSet::Int32Type())) {
226
return true;
227
}
228
break;
229
case ValType::F32:
230
if (!argTypes->hasType(TypeSet::DoubleType())) {
231
return true;
232
}
233
break;
234
case ValType::F64:
235
if (!argTypes->hasType(TypeSet::DoubleType())) {
236
return true;
237
}
238
break;
239
case ValType::I64:
240
#ifdef ENABLE_WASM_BIGINT
241
if (!argTypes->hasType(TypeSet::BigIntType())) {
242
return true;
243
}
244
break;
245
#else
246
MOZ_CRASH("NYI");
247
#endif
248
case ValType::AnyRef:
249
// We don't know what type the value will be, so we can't really check
250
// whether the callee will accept it. It doesn't make much sense to see
251
// if the callee accepts all of the types an AnyRef might represent
252
// because most callees will not have been exposed to all those types
253
// and so we'll never pass the test. Instead, we must use the callee's
254
// arg-type-checking entry point, and not check anything here. See
255
// FuncType::jitExitRequiresArgCheck().
256
break;
257
case ValType::FuncRef:
258
// We handle FuncRef as we do AnyRef: by checking the type dynamically
259
// in the callee. Code in the stubs layer must box up the FuncRef as a
260
// Value.
261
break;
262
case ValType::Ref:
263
MOZ_CRASH("case guarded above");
264
case ValType::NullRef:
265
MOZ_CRASH("NullRef not expressible");
266
}
267
}
268
269
// These arguments will be filled with undefined at runtime by the
270
// arguments rectifier: check that the imported function can handle
271
// undefined there.
272
for (uint32_t i = importArgs.length(); i < importFun->nargs(); i++) {
273
StackTypeSet* argTypes = jitScript->argTypes(sweep, script, i);
274
if (!argTypes->hasType(TypeSet::UndefinedType())) {
275
return true;
276
}
277
}
278
279
// Let's optimize it!
280
if (!jitScript->addDependentWasmImport(cx, *this, funcImportIndex)) {
281
return false;
282
}
283
284
import.code = jitExitCode;
285
import.jitScript = jitScript;
286
return true;
287
}
288
289
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
290
Instance::callImport_void(Instance* instance, int32_t funcImportIndex,
291
int32_t argc, uint64_t* argv) {
292
JSContext* cx = TlsContext.get();
293
RootedValue rval(cx);
294
return instance->callImport(cx, funcImportIndex, argc, argv, &rval);
295
}
296
297
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
298
Instance::callImport_i32(Instance* instance, int32_t funcImportIndex,
299
int32_t argc, uint64_t* argv) {
300
JSContext* cx = TlsContext.get();
301
RootedValue rval(cx);
302
if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval)) {
303
return false;
304
}
305
306
return ToInt32(cx, rval, (int32_t*)argv);
307
}
308
309
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
310
Instance::callImport_i64(Instance* instance, int32_t funcImportIndex,
311
int32_t argc, uint64_t* argv) {
312
JSContext* cx = TlsContext.get();
313
#ifdef ENABLE_WASM_BIGINT
314
RootedValue rval(cx);
315
if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval)) {
316
return false;
317
}
318
319
JS_TRY_VAR_OR_RETURN_FALSE(cx, *argv, ToBigInt64(cx, rval));
320
return true;
321
#else
322
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
323
JSMSG_WASM_BAD_I64_TYPE);
324
return false;
325
#endif
326
}
327
328
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
329
Instance::callImport_f64(Instance* instance, int32_t funcImportIndex,
330
int32_t argc, uint64_t* argv) {
331
JSContext* cx = TlsContext.get();
332
RootedValue rval(cx);
333
if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval)) {
334
return false;
335
}
336
337
return ToNumber(cx, rval, (double*)argv);
338
}
339
340
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
341
Instance::callImport_anyref(Instance* instance, int32_t funcImportIndex,
342
int32_t argc, uint64_t* argv) {
343
JSContext* cx = TlsContext.get();
344
RootedValue rval(cx);
345
if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval)) {
346
return false;
347
}
348
RootedAnyRef result(cx, AnyRef::null());
349
if (!BoxAnyRef(cx, rval, &result)) {
350
return false;
351
}
352
static_assert(sizeof(argv[0]) >= sizeof(void*), "fits");
353
*(void**)argv = result.get().forCompiledCode();
354
return true;
355
}
356
357
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
358
Instance::callImport_funcref(Instance* instance, int32_t funcImportIndex,
359
int32_t argc, uint64_t* argv) {
360
JSContext* cx = TlsContext.get();
361
RootedValue rval(cx);
362
if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval)) {
363
return false;
364
}
365
366
RootedFunction fun(cx);
367
if (!CheckFuncRefValue(cx, rval, &fun)) {
368
return false;
369
}
370
371
*(void**)argv = fun;
372
return true;
373
}
374
375
/* static */ uint32_t Instance::memoryGrow_i32(Instance* instance,
376
uint32_t delta) {
377
MOZ_ASSERT(SASigMemoryGrow.failureMode == FailureMode::Infallible);
378
MOZ_ASSERT(!instance->isAsmJS());
379
380
JSContext* cx = TlsContext.get();
381
RootedWasmMemoryObject memory(cx, instance->memory_);
382
383
uint32_t ret = WasmMemoryObject::grow(memory, delta, cx);
384
385
// If there has been a moving grow, this Instance should have been notified.
386
MOZ_RELEASE_ASSERT(instance->tlsData()->memoryBase ==
387
instance->memory_->buffer().dataPointerEither());
388
389
return ret;
390
}
391
392
/* static */ uint32_t Instance::memorySize_i32(Instance* instance) {
393
MOZ_ASSERT(SASigMemorySize.failureMode == FailureMode::Infallible);
394
395
// This invariant must hold when running Wasm code. Assert it here so we can
396
// write tests for cross-realm calls.
397
MOZ_ASSERT(TlsContext.get()->realm() == instance->realm());
398
399
uint32_t byteLength = instance->memory()->volatileMemoryLength();
400
MOZ_ASSERT(byteLength % wasm::PageSize == 0);
401
return byteLength / wasm::PageSize;
402
}
403
404
template <typename T>
405
static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
406
int64_t timeout_ns) {
407
JSContext* cx = TlsContext.get();
408
409
if (byteOffset & (sizeof(T) - 1)) {
410
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
411
JSMSG_WASM_UNALIGNED_ACCESS);
412
return -1;
413
}
414
415
if (byteOffset + sizeof(T) > instance->memory()->volatileMemoryLength()) {
416
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
417
JSMSG_WASM_OUT_OF_BOUNDS);
418
return -1;
419
}
420
421
mozilla::Maybe<mozilla::TimeDuration> timeout;
422
if (timeout_ns >= 0) {
423
timeout = mozilla::Some(
424
mozilla::TimeDuration::FromMicroseconds(timeout_ns / 1000));
425
}
426
427
switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(), byteOffset,
428
value, timeout)) {
429
case FutexThread::WaitResult::OK:
430
return 0;
431
case FutexThread::WaitResult::NotEqual:
432
return 1;
433
case FutexThread::WaitResult::TimedOut:
434
return 2;
435
case FutexThread::WaitResult::Error:
436
return -1;
437
default:
438
MOZ_CRASH();
439
}
440
}
441
442
/* static */ int32_t Instance::wait_i32(Instance* instance, uint32_t byteOffset,
443
int32_t value, int64_t timeout_ns) {
444
MOZ_ASSERT(SASigWaitI32.failureMode == FailureMode::FailOnNegI32);
445
return PerformWait<int32_t>(instance, byteOffset, value, timeout_ns);
446
}
447
448
/* static */ int32_t Instance::wait_i64(Instance* instance, uint32_t byteOffset,
449
int64_t value, int64_t timeout_ns) {
450
MOZ_ASSERT(SASigWaitI64.failureMode == FailureMode::FailOnNegI32);
451
return PerformWait<int64_t>(instance, byteOffset, value, timeout_ns);
452
}
453
454
/* static */ int32_t Instance::wake(Instance* instance, uint32_t byteOffset,
455
int32_t count) {
456
MOZ_ASSERT(SASigWake.failureMode == FailureMode::FailOnNegI32);
457
458
JSContext* cx = TlsContext.get();
459
460
// The alignment guard is not in the wasm spec as of 2017-11-02, but is
461
// considered likely to appear, as 4-byte alignment is required for WAKE by
462
// the spec's validation algorithm.
463
464
if (byteOffset & 3) {
465
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
466
JSMSG_WASM_UNALIGNED_ACCESS);
467
return -1;
468
}
469
470
if (byteOffset >= instance->memory()->volatileMemoryLength()) {
471
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
472
JSMSG_WASM_OUT_OF_BOUNDS);
473
return -1;
474
}
475
476
int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(),
477
byteOffset, int64_t(count));
478
479
if (woken > INT32_MAX) {
480
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
481
JSMSG_WASM_WAKE_OVERFLOW);
482
return -1;
483
}
484
485
return int32_t(woken);
486
}
487
488
template <typename T, typename F>
489
inline int32_t WasmMemoryCopy(T memBase, uint32_t memLen,
490
uint32_t dstByteOffset, uint32_t srcByteOffset,
491
uint32_t len, F memMove) {
492
// Bounds check and deal with arithmetic overflow.
493
uint64_t dstOffsetLimit = uint64_t(dstByteOffset) + uint64_t(len);
494
uint64_t srcOffsetLimit = uint64_t(srcByteOffset) + uint64_t(len);
495
496
if (dstOffsetLimit > memLen || srcOffsetLimit > memLen) {
497
JSContext* cx = TlsContext.get();
498
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
499
JSMSG_WASM_OUT_OF_BOUNDS);
500
return -1;
501
}
502
503
memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
504
return 0;
505
}
506
507
/* static */ int32_t Instance::memCopy(Instance* instance,
508
uint32_t dstByteOffset,
509
uint32_t srcByteOffset, uint32_t len,
510
uint8_t* memBase) {
511
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
512
513
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
514
uint32_t memLen = rawBuf->byteLength();
515
516
return WasmMemoryCopy(memBase, memLen, dstByteOffset, srcByteOffset, len,
517
memmove);
518
}
519
520
/* static */ int32_t Instance::memCopyShared(Instance* instance,
521
uint32_t dstByteOffset,
522
uint32_t srcByteOffset,
523
uint32_t len, uint8_t* memBase) {
524
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
525
526
typedef void (*RacyMemMove)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
527
528
const SharedArrayRawBuffer* rawBuf =
529
SharedArrayRawBuffer::fromDataPtr(memBase);
530
uint32_t memLen = rawBuf->volatileByteLength();
531
532
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
533
SharedMem<uint8_t*>::shared(memBase), memLen, dstByteOffset,
534
srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
535
}
536
537
/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
538
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
539
540
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
541
"ensured by validation");
542
543
if (!instance->passiveDataSegments_[segIndex]) {
544
return 0;
545
}
546
547
SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
548
MOZ_RELEASE_ASSERT(!segRefPtr->active());
549
550
// Drop this instance's reference to the DataSegment so it can be released.
551
segRefPtr = nullptr;
552
return 0;
553
}
554
555
template <typename T, typename F>
556
inline int32_t WasmMemoryFill(T memBase, uint32_t memLen, uint32_t byteOffset,
557
uint32_t value, uint32_t len, F memSet) {
558
// Bounds check and deal with arithmetic overflow.
559
uint64_t offsetLimit = uint64_t(byteOffset) + uint64_t(len);
560
561
if (offsetLimit > memLen) {
562
JSContext* cx = TlsContext.get();
563
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
564
JSMSG_WASM_OUT_OF_BOUNDS);
565
return -1;
566
}
567
568
// The required write direction is upward, but that is not currently
569
// observable as there are no fences nor any read/write protect operation.
570
memSet(memBase + byteOffset, int(value), size_t(len));
571
return 0;
572
}
573
574
/* static */ int32_t Instance::memFill(Instance* instance, uint32_t byteOffset,
575
uint32_t value, uint32_t len,
576
uint8_t* memBase) {
577
MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
578
579
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
580
uint32_t memLen = rawBuf->byteLength();
581
582
return WasmMemoryFill(memBase, memLen, byteOffset, value, len, memset);
583
}
584
585
/* static */ int32_t Instance::memFillShared(Instance* instance,
586
uint32_t byteOffset,
587
uint32_t value, uint32_t len,
588
uint8_t* memBase) {
589
MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
590
591
const SharedArrayRawBuffer* rawBuf =
592
SharedArrayRawBuffer::fromDataPtr(memBase);
593
uint32_t memLen = rawBuf->volatileByteLength();
594
595
return WasmMemoryFill(SharedMem<uint8_t*>::shared(memBase), memLen,
596
byteOffset, value, len,
597
AtomicOperations::memsetSafeWhenRacy);
598
}
599
600
/* static */ int32_t Instance::memInit(Instance* instance, uint32_t dstOffset,
601
uint32_t srcOffset, uint32_t len,
602
uint32_t segIndex) {
603
MOZ_ASSERT(SASigMemInit.failureMode == FailureMode::FailOnNegI32);
604
605
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
606
"ensured by validation");
607
608
if (!instance->passiveDataSegments_[segIndex]) {
609
if (len == 0 && srcOffset == 0) {
610
return 0;
611
}
612
613
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
614
JSMSG_WASM_OUT_OF_BOUNDS);
615
return -1;
616
}
617
618
const DataSegment& seg = *instance->passiveDataSegments_[segIndex];
619
MOZ_RELEASE_ASSERT(!seg.active());
620
621
const uint32_t segLen = seg.bytes.length();
622
623
WasmMemoryObject* mem = instance->memory();
624
const uint32_t memLen = mem->volatileMemoryLength();
625
626
// We are proposing to copy
627
//
628
// seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
629
// to
630
// memoryBase[ dstOffset .. dstOffset + len - 1 ]
631
632
// Bounds check and deal with arithmetic overflow.
633
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
634
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
635
636
if (dstOffsetLimit > memLen || srcOffsetLimit > segLen) {
637
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
638
JSMSG_WASM_OUT_OF_BOUNDS);
639
return -1;
640
}
641
642
// The required read/write direction is upward, but that is not currently
643
// observable as there are no fences nor any read/write protect operation.
644
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
645
if (mem->isShared()) {
646
AtomicOperations::memcpySafeWhenRacy(
647
dataPtr + dstOffset, (uint8_t*)seg.bytes.begin() + srcOffset, len);
648
} else {
649
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
650
memcpy(rawBuf + dstOffset, (const char*)seg.bytes.begin() + srcOffset, len);
651
}
652
return 0;
653
}
654
655
/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
656
uint32_t srcOffset, uint32_t len,
657
uint32_t dstTableIndex,
658
uint32_t srcTableIndex) {
659
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
660
661
const SharedTable& srcTable = instance->tables()[srcTableIndex];
662
uint32_t srcTableLen = srcTable->length();
663
664
const SharedTable& dstTable = instance->tables()[dstTableIndex];
665
uint32_t dstTableLen = dstTable->length();
666
667
// Bounds check and deal with arithmetic overflow.
668
uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
669
uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
670
671
if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
672
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
673
JSMSG_WASM_OUT_OF_BOUNDS);
674
return -1;
675
}
676
677
bool isOOM = false;
678
679
if (&srcTable == &dstTable && dstOffset > srcOffset) {
680
for (uint32_t i = len; i > 0; i--) {
681
if (!dstTable->copy(*srcTable, dstOffset + (i - 1),
682
srcOffset + (i - 1))) {
683
isOOM = true;
684
break;
685
}
686
}
687
} else if (&srcTable == &dstTable && dstOffset == srcOffset) {
688
// No-op
689
} else {
690
for (uint32_t i = 0; i < len; i++) {
691
if (!dstTable->copy(*srcTable, dstOffset + i, srcOffset + i)) {
692
isOOM = true;
693
break;
694
}
695
}
696
}
697
698
if (isOOM) {
699
return -1;
700
}
701
return 0;
702
}
703
704
/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
705
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
706
707
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
708
"ensured by validation");
709
710
if (!instance->passiveElemSegments_[segIndex]) {
711
return 0;
712
}
713
714
SharedElemSegment& segRefPtr = instance->passiveElemSegments_[segIndex];
715
MOZ_RELEASE_ASSERT(!segRefPtr->active());
716
717
// Drop this instance's reference to the ElemSegment so it can be released.
718
segRefPtr = nullptr;
719
return 0;
720
}
721
722
bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
723
uint32_t dstOffset, uint32_t srcOffset, uint32_t len) {
724
Table& table = *tables_[tableIndex];
725
MOZ_ASSERT(dstOffset <= table.length());
726
MOZ_ASSERT(len <= table.length() - dstOffset);
727
728
Tier tier = code().bestTier();
729
const MetadataTier& metadataTier = metadata(tier);
730
const FuncImportVector& funcImports = metadataTier.funcImports;
731
const CodeRangeVector& codeRanges = metadataTier.codeRanges;
732
const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
733
const Uint32Vector& elemFuncIndices = seg.elemFuncIndices;
734
MOZ_ASSERT(srcOffset <= elemFuncIndices.length());
735
MOZ_ASSERT(len <= elemFuncIndices.length() - srcOffset);
736
737
uint8_t* codeBaseTier = codeBase(tier);
738
for (uint32_t i = 0; i < len; i++) {
739
uint32_t funcIndex = elemFuncIndices[srcOffset + i];
740
if (funcIndex == NullFuncIndex) {
741
table.setNull(dstOffset + i);
742
} else if (!table.isFunction()) {
743
// Note, fnref must be rooted if we do anything more than just store it.
744
void* fnref = Instance::funcRef(this, funcIndex);
745
if (fnref == AnyRef::invalid().forCompiledCode()) {
746
return false; // OOM, which has already been reported.
747
}
748
table.fillAnyRef(dstOffset + i, 1, AnyRef::fromCompiledCode(fnref));
749
} else {
750
if (funcIndex < funcImports.length()) {
751
FuncImportTls& import = funcImportTls(funcImports[funcIndex]);
752
JSFunction* fun = import.fun;
753
if (IsWasmExportedFunction(fun)) {
754
// This element is a wasm function imported from another
755
// instance. To preserve the === function identity required by
756
// the JS embedding spec, we must set the element to the
757
// imported function's underlying CodeRange.funcTableEntry and
758
// Instance so that future Table.get()s produce the same
759
// function object as was imported.
760
WasmInstanceObject* calleeInstanceObj =
761
ExportedFunctionToInstanceObject(fun);
762
Instance& calleeInstance = calleeInstanceObj->instance();
763
Tier calleeTier = calleeInstance.code().bestTier();
764
const CodeRange& calleeCodeRange =
765
calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier);
766
void* code = calleeInstance.codeBase(calleeTier) +
767
calleeCodeRange.funcTableEntry();
768
table.setFuncRef(dstOffset + i, code, &calleeInstance);
769
continue;
770
}
771
}
772
void* code = codeBaseTier +
773
codeRanges[funcToCodeRange[funcIndex]].funcTableEntry();
774
table.setFuncRef(dstOffset + i, code, this);
775
}
776
}
777
return true;
778
}
779
780
/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
781
uint32_t srcOffset, uint32_t len,
782
uint32_t segIndex,
783
uint32_t tableIndex) {
784
MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
785
786
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
787
"ensured by validation");
788
789
if (!instance->passiveElemSegments_[segIndex]) {
790
if (len == 0 && srcOffset == 0) {
791
return 0;
792
}
793
794
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
795
JSMSG_WASM_OUT_OF_BOUNDS);
796
return -1;
797
}
798
799
const ElemSegment& seg = *instance->passiveElemSegments_[segIndex];
800
MOZ_RELEASE_ASSERT(!seg.active());
801
const uint32_t segLen = seg.length();
802
803
const Table& table = *instance->tables()[tableIndex];
804
const uint32_t tableLen = table.length();
805
806
// We are proposing to copy
807
//
808
// seg[ srcOffset .. srcOffset + len - 1 ]
809
// to
810
// tableBase[ dstOffset .. dstOffset + len - 1 ]
811
812
// Bounds check and deal with arithmetic overflow.
813
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
814
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
815
816
if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
817
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
818
JSMSG_WASM_OUT_OF_BOUNDS);
819
return -1;
820
}
821
822
if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
823
return -1; // OOM, which has already been reported.
824
}
825
826
return 0;
827
}
828
829
/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
830
void* value, uint32_t len,
831
uint32_t tableIndex) {
832
MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
833
834
JSContext* cx = TlsContext.get();
835
Table& table = *instance->tables()[tableIndex];
836
837
// Bounds check and deal with arithmetic overflow.
838
uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
839
840
if (offsetLimit > table.length()) {
841
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
842
JSMSG_WASM_OUT_OF_BOUNDS);
843
return -1;
844
}
845
846
switch (table.kind()) {
847
case TableKind::AnyRef:
848
table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
849
break;
850
case TableKind::FuncRef:
851
table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
852
break;
853
case TableKind::AsmJS:
854
MOZ_CRASH("not asm.js");
855
}
856
857
return 0;
858
}
859
860
/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
861
uint32_t tableIndex) {
862
MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
863
864
const Table& table = *instance->tables()[tableIndex];
865
if (index >= table.length()) {
866
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
867
JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
868
return AnyRef::invalid().forCompiledCode();
869
}
870
871
if (table.kind() == TableKind::AnyRef) {
872
return table.getAnyRef(index).forCompiledCode();
873
}
874
875
MOZ_RELEASE_ASSERT(table.kind() == TableKind::FuncRef);
876
877
JSContext* cx = TlsContext.get();
878
RootedFunction fun(cx);
879
if (!table.getFuncRef(cx, index, &fun)) {
880
return AnyRef::invalid().forCompiledCode();
881
}
882
883
return FuncRef::fromJSFunction(fun).forCompiledCode();
884
}
885
886
/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
887
uint32_t delta, uint32_t tableIndex) {
888
MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
889
890
RootedAnyRef ref(TlsContext.get(), AnyRef::fromCompiledCode(initValue));
891
Table& table = *instance->tables()[tableIndex];
892
893
uint32_t oldSize = table.grow(delta);
894
895
if (oldSize != uint32_t(-1) && initValue != nullptr) {
896
switch (table.kind()) {
897
case TableKind::AnyRef:
898
table.fillAnyRef(oldSize, delta, ref);
899
break;
900
case TableKind::FuncRef:
901
table.fillFuncRef(oldSize, delta, FuncRef::fromAnyRefUnchecked(ref),
902
TlsContext.get());
903
break;
904
case TableKind::AsmJS:
905
MOZ_CRASH("not asm.js");
906
}
907
}
908
909
return oldSize;
910
}
911
912
/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
913
void* value, uint32_t tableIndex) {
914
MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
915
916
Table& table = *instance->tables()[tableIndex];
917
if (index >= table.length()) {
918
JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
919
JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
920
return -1;
921
}
922
923
switch (table.kind()) {
924
case TableKind::AnyRef:
925
table.fillAnyRef(index, 1, AnyRef::fromCompiledCode(value));
926
break;
927
case TableKind::FuncRef:
928
table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value),
929
TlsContext.get());
930
break;
931
case TableKind::AsmJS:
932
MOZ_CRASH("not asm.js");
933
}
934
935
return 0;
936
}
937
938
/* static */ uint32_t Instance::tableSize(Instance* instance,
939
uint32_t tableIndex) {
940
MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
941
Table& table = *instance->tables()[tableIndex];
942
return table.length();
943
}
944
945
/* static */ void* Instance::funcRef(Instance* instance, uint32_t funcIndex) {
946
MOZ_ASSERT(SASigFuncRef.failureMode == FailureMode::FailOnInvalidRef);
947
JSContext* cx = TlsContext.get();
948
949
Tier tier = instance->code().bestTier();
950
const MetadataTier& metadataTier = instance->metadata(tier);
951
const FuncImportVector& funcImports = metadataTier.funcImports;
952
953
// If this is an import, we need to recover the original function to maintain
954
// reference equality between a re-exported function and 'ref.func'. The
955
// identity of the imported function object is stable across tiers, which is
956
// what we want.
957
//
958
// Use the imported function only if it is an exported function, otherwise
959
// fall through to get a (possibly new) exported function.
960
if (funcIndex < funcImports.length()) {
961
FuncImportTls& import = instance->funcImportTls(funcImports[funcIndex]);
962
if (IsWasmExportedFunction(import.fun)) {
963
return FuncRef::fromJSFunction(import.fun).forCompiledCode();
964
}
965
}
966
967
RootedFunction fun(cx);
968
RootedWasmInstanceObject instanceObj(cx, instance->object());
969
if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
970
&fun)) {
971
// Validation ensures that we always have a valid funcIndex, so we must
972
// have OOM'ed
973
ReportOutOfMemory(cx);
974
return AnyRef::invalid().forCompiledCode();
975
}
976
977
return FuncRef::fromJSFunction(fun).forCompiledCode();
978
}
979
980
/* static */ void Instance::postBarrier(Instance* instance,
981
gc::Cell** location) {
982
MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
983
MOZ_ASSERT(location);
984
TlsContext.get()->runtime()->gc.storeBuffer().putCell(
985
reinterpret_cast<JSObject**>(location));
986
}
987
988
/* static */ void Instance::postBarrierFiltering(Instance* instance,
989
gc::Cell** location) {
990
MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
991
MOZ_ASSERT(location);
992
if (*location == nullptr || !gc::IsInsideNursery(*location)) {
993
return;
994
}
995
TlsContext.get()->runtime()->gc.storeBuffer().putCell(
996
reinterpret_cast<JSObject**>(location));
997
}
998
999
// The typeIndex is an index into the structTypeDescrs_ table in the instance.
1000
// That table holds TypeDescr objects.
1001
//
1002
// When we fail to allocate we return a nullptr; the wasm side must check this
1003
// and propagate it as an error.
1004
1005
/* static */ void* Instance::structNew(Instance* instance, uint32_t typeIndex) {
1006
MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
1007
JSContext* cx = TlsContext.get();
1008
Rooted<TypeDescr*> typeDescr(cx, instance->structTypeDescrs_[typeIndex]);
1009
return TypedObject::createZeroed(cx, typeDescr);
1010
}
1011
1012
/* static */ void* Instance::structNarrow(Instance* instance,
1013
uint32_t mustUnboxAnyref,
1014
uint32_t outputTypeIndex,
1015
void* maybeNullPtr) {
1016
MOZ_ASSERT(SASigStructNarrow.failureMode == FailureMode::Infallible);
1017
1018
JSContext* cx = TlsContext.get();
1019
1020
Rooted<TypedObject*> obj(cx);
1021
Rooted<StructTypeDescr*> typeDescr(cx);
1022
1023
if (maybeNullPtr == nullptr) {
1024
return maybeNullPtr;
1025
}
1026
1027
void* nonnullPtr = maybeNullPtr;
1028
if (mustUnboxAnyref) {
1029
// TODO/AnyRef-boxing: With boxed immediates and strings, unboxing
1030
// AnyRef is not a no-op.
1031
ASSERT_ANYREF_IS_JSOBJECT;
1032
1033
Rooted<NativeObject*> no(cx, static_cast<NativeObject*>(nonnullPtr));
1034
if (!no->is<TypedObject>()) {
1035
return nullptr;
1036
}
1037
obj = &no->as<TypedObject>();
1038
Rooted<TypeDescr*> td(cx, &obj->typeDescr());
1039
if (td->kind() != type::Struct) {
1040
return nullptr;
1041
}
1042
typeDescr = &td->as<StructTypeDescr>();
1043
} else {
1044
obj = static_cast<TypedObject*>(nonnullPtr);
1045
typeDescr = &obj->typeDescr().as<StructTypeDescr>();
1046
}
1047
1048
// Optimization opportunity: instead of this loop we could perhaps load an
1049
// index from `typeDescr` and use that to index into the structTypes table
1050
// of the instance. If the index is in bounds and the desc at that index is
1051
// the desc we have then we know the index is good, and we can use that for
1052
// the prefix check.
1053
1054
uint32_t found = UINT32_MAX;
1055
for (uint32_t i = 0; i < instance->structTypeDescrs_.length(); i++) {
1056
if (instance->structTypeDescrs_[i] == typeDescr) {
1057
found = i;
1058
break;
1059
}
1060
}
1061
1062
if (found == UINT32_MAX) {
1063
return nullptr;
1064
}
1065
1066
// Also asserted in constructor; let's just be double sure.
1067
1068
MOZ_ASSERT(instance->structTypeDescrs_.length() ==
1069
instance->structTypes().length());
1070
1071
// Now we know that the object was created by the instance, and we know its
1072
// concrete type. We need to check that its type is an extension of the
1073
// type of outputTypeIndex.
1074
1075
if (!instance->structTypes()[found].hasPrefix(
1076
instance->structTypes()[outputTypeIndex])) {
1077
return nullptr;
1078
}
1079
1080
return nonnullPtr;
1081
}
1082
1083
// Note, dst must point into nonmoveable storage that is not in the nursery,
1084
// this matters for the write barriers. Furthermore, for pointer types the
1085
// current value of *dst must be null so that only a post-barrier is required.
1086
//
1087
// Regarding the destination not being in the nursery, we have these cases.
1088
// Either the written location is in the global data section in the
1089
// WasmInstanceObject, or the Cell of a WasmGlobalObject:
1090
//
1091
// - WasmInstanceObjects are always tenured and u.ref_ may point to a
1092
// nursery object, so we need a post-barrier since the global data of an
1093
// instance is effectively a field of the WasmInstanceObject.
1094
//
1095
// - WasmGlobalObjects are always tenured, and they have a Cell field, so a
1096
// post-barrier may be needed for the same reason as above.
1097
1098
void CopyValPostBarriered(uint8_t* dst, const Val& src) {
1099
switch (src.type().code()) {
1100
case ValType::I32: {
1101
int32_t x = src.i32();
1102
memcpy(dst, &x, sizeof(x));
1103
break;
1104
}
1105
case ValType::F32: {
1106
float x = src.f32();
1107
memcpy(dst, &x, sizeof(x));
1108
break;
1109
}
1110
case ValType::I64: {
1111
int64_t x = src.i64();
1112
memcpy(dst, &x, sizeof(x));
1113
break;
1114
}
1115
case ValType::F64: {
1116
double x = src.f64();
1117
memcpy(dst, &x, sizeof(x));
1118
break;
1119
}
1120
case ValType::Ref:
1121
case ValType::FuncRef:
1122
case ValType::AnyRef: {
1123
// TODO/AnyRef-boxing: With boxed immediates and strings, the write
1124
// barrier is going to have to be more complicated.
1125
ASSERT_ANYREF_IS_JSOBJECT;
1126
MOZ_ASSERT(*(void**)dst == nullptr,
1127
"should be null so no need for a pre-barrier");
1128
AnyRef x = src.ref();
1129
memcpy(dst, x.asJSObjectAddress(), sizeof(*x.asJSObjectAddress()));
1130
if (!x.isNull()) {
1131
JSObject::writeBarrierPost((JSObject**)dst, nullptr, x.asJSObject());
1132
}
1133
break;
1134
}
1135
case ValType::NullRef: {
1136
MOZ_CRASH("unexpected Val type");
1137
}
1138
}
1139
}
1140
1141
Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
1142
SharedCode code, UniqueTlsData tlsDataIn,
1143
HandleWasmMemoryObject memory, SharedTableVector&& tables,
1144
StructTypeDescrVector&& structTypeDescrs,
1145
const JSFunctionVector& funcImports,
1146
const ValVector& globalImportValues,
1147
const WasmGlobalObjectVector& globalObjs,
1148
UniqueDebugState maybeDebug)
1149
: realm_(cx->realm()),
1150
object_(object),
1151
jsJitArgsRectifier_(
1152
cx->runtime()->jitRuntime()->getArgumentsRectifier().value),
1153
jsJitExceptionHandler_(
1154
cx->runtime()->jitRuntime()->getExceptionTail().value),
1155
preBarrierCode_(
1156
cx->runtime()->jitRuntime()->preBarrier(MIRType::Object).value),
1157
code_(code),
1158
tlsData_(std::move(tlsDataIn)),
1159
memory_(memory),
1160
tables_(std::move(tables)),
1161
maybeDebug_(std::move(maybeDebug)),
1162
structTypeDescrs_(std::move(structTypeDescrs)) {
1163
MOZ_ASSERT(!!maybeDebug_ == metadata().debugEnabled);
1164
MOZ_ASSERT(structTypeDescrs_.length() == structTypes().length());
1165
1166
#ifdef DEBUG
1167
for (auto t : code_->tiers()) {
1168
MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
1169
}
1170
#endif
1171
MOZ_ASSERT(tables_.length() == metadata().tables.length());
1172
1173
tlsData()->memoryBase =
1174
memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
1175
tlsData()->boundsCheckLimit = memory ? memory->boundsCheckLimit() : 0;
1176
tlsData()->instance = this;
1177
tlsData()->realm = realm_;
1178
tlsData()->cx = cx;
1179
tlsData()->valueBoxClass = &WasmValueBox::class_;
1180
tlsData()->resetInterrupt(cx);
1181
tlsData()->jumpTable = code_->tieringJumpTable();
1182
tlsData()->addressOfNeedsIncrementalBarrier =
1183
(uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
1184
1185
Tier callerTier = code_->bestTier();
1186
for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
1187
JSFunction* f = funcImports[i];
1188
const FuncImport& fi = metadata(callerTier).funcImports[i];
1189
FuncImportTls& import = funcImportTls(fi);
1190
import.fun = f;
1191
if (!isAsmJS() && IsWasmExportedFunction(f)) {
1192
WasmInstanceObject* calleeInstanceObj =
1193
ExportedFunctionToInstanceObject(f);
1194
Instance& calleeInstance = calleeInstanceObj->instance();
1195
Tier calleeTier = calleeInstance.code().bestTier();
1196
const CodeRange& codeRange =
1197
calleeInstanceObj->getExportedFunctionCodeRange(f, calleeTier);
1198
import.tls = calleeInstance.tlsData();
1199
import.realm = f->realm();
1200
import.code =
1201
calleeInstance.codeBase(calleeTier) + codeRange.funcNormalEntry();
1202
import.jitScript = nullptr;
1203
} else if (void* thunk = MaybeGetBuiltinThunk(f, fi.funcType())) {
1204
import.tls = tlsData();
1205
import.realm = f->realm();
1206
import.code = thunk;
1207
import.jitScript = nullptr;
1208
} else {
1209
import.tls = tlsData();
1210
import.realm = f->realm();
1211
import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
1212
import.jitScript = nullptr;
1213
}
1214
}
1215
1216
for (size_t i = 0; i < tables_.length(); i++) {
1217
const TableDesc& td = metadata().tables[i];
1218
TableTls& table = tableTls(td);
1219
table.length = tables_[i]->length();
1220
table.functionBase = tables_[i]->functionBase();
1221
}
1222
1223
for (size_t i = 0; i < metadata().globals.length(); i++) {
1224
const GlobalDesc& global = metadata().globals[i];
1225
1226
// Constants are baked into the code, never stored in the global area.
1227
if (global.isConstant()) {
1228
continue;
1229
}
1230
1231
uint8_t* globalAddr = globalData() + global.offset();
1232
switch (global.kind()) {
1233
case GlobalKind::Import: {
1234
size_t imported = global.importIndex();
1235
if (global.isIndirect()) {
1236
*(void**)globalAddr = globalObjs[imported]->cell();
1237
} else {
1238
CopyValPostBarriered(globalAddr, globalImportValues[imported]);
1239
}
1240
break;
1241
}
1242
case GlobalKind::Variable: {
1243
const InitExpr& init = global.initExpr();
1244
switch (init.kind()) {
1245
case InitExpr::Kind::Constant: {
1246
if (global.isIndirect()) {
1247
*(void**)globalAddr = globalObjs[i]->cell();
1248
} else {
1249
CopyValPostBarriered(globalAddr, Val(init.val()));
1250
}
1251
break;
1252
}
1253
case InitExpr::Kind::GetGlobal: {
1254
const GlobalDesc& imported = metadata().globals[init.globalIndex()];
1255
1256
// Global-ref initializers cannot reference mutable globals, so
1257
// the source global should never be indirect.
1258
MOZ_ASSERT(!imported.isIndirect());
1259
1260
RootedVal dest(cx, globalImportValues[imported.importIndex()]);
1261
if (global.isIndirect()) {
1262
void* address = globalObjs[i]->cell();
1263
*(void**)globalAddr = address;
1264
CopyValPostBarriered((uint8_t*)address, dest.get());
1265
} else {
1266
CopyValPostBarriered(globalAddr, dest.get());
1267
}
1268
break;
1269
}
1270
}
1271
break;
1272
}
1273
case GlobalKind::Constant: {
1274
MOZ_CRASH("skipped at the top");
1275
}
1276
}
1277
}
1278
}
1279
1280
bool Instance::init(JSContext* cx, const DataSegmentVector& dataSegments,
1281
const ElemSegmentVector& elemSegments) {
1282
if (memory_ && memory_->movingGrowable() &&
1283
!memory_->addMovingGrowObserver(cx, object_)) {
1284
return false;
1285
}
1286
1287
for (const SharedTable& table : tables_) {
1288
if (table->movingGrowable() && !table->addMovingGrowObserver(cx, object_)) {
1289
return false;
1290
}
1291
}
1292
1293
if (!metadata().funcTypeIds.empty()) {
1294
ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
1295
funcTypeIdSet.lock();
1296
1297
for (const FuncTypeWithId& funcType : metadata().funcTypeIds) {
1298
const void* funcTypeId;
1299
if (!lockedFuncTypeIdSet->allocateFuncTypeId(cx, funcType, &funcTypeId)) {
1300
return false;
1301
}
1302
1303
*addressOfFuncTypeId(funcType.id) = funcTypeId;
1304
}
1305
}
1306
1307
if (!passiveDataSegments_.resize(dataSegments.length())) {
1308
return false;
1309
}
1310
for (size_t i = 0; i < dataSegments.length(); i++) {
1311
if (!dataSegments[i]->active()) {
1312
passiveDataSegments_[i] = dataSegments[i];
1313
}
1314
}
1315
1316
if (!passiveElemSegments_.resize(elemSegments.length())) {
1317
return false;
1318
}
1319
for (size_t i = 0; i < elemSegments.length(); i++) {
1320
if (elemSegments[i]->kind == ElemSegment::Kind::Passive) {
1321
passiveElemSegments_[i] = elemSegments[i];
1322
}
1323
}
1324
1325
return true;
1326
}
1327
1328
Instance::~Instance() {
1329
realm_->wasm.unregisterInstance(*this);
1330
1331
const FuncImportVector& funcImports =
1332
metadata(code().stableTier()).funcImports;
1333
1334
for (unsigned i = 0; i < funcImports.length(); i++) {
1335
FuncImportTls& import = funcImportTls(funcImports[i]);
1336
if (import.jitScript) {
1337
import.jitScript->removeDependentWasmImport(*this, i);
1338
}
1339
}
1340
1341
if (!metadata().funcTypeIds.empty()) {
1342
ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
1343
funcTypeIdSet.lock();
1344
1345
for (const FuncTypeWithId& funcType : metadata().funcTypeIds) {
1346
if (const void* funcTypeId = *addressOfFuncTypeId(funcType.id)) {
1347
lockedFuncTypeIdSet->deallocateFuncTypeId(funcType, funcTypeId);
1348
}
1349
}
1350
}
1351
}
1352
1353
size_t Instance::memoryMappedSize() const {
1354
return memory_->buffer().wasmMappedSize();
1355
}
1356
1357
bool Instance::memoryAccessInGuardRegion(uint8_t* addr,
1358
unsigned numBytes) const {
1359
MOZ_ASSERT(numBytes > 0);
1360
1361
if (!metadata().usesMemory()) {
1362
return false;
1363
}
1364
1365
uint8_t* base = memoryBase().unwrap(/* comparison */);
1366
if (addr < base) {
1367
return false;
1368
}
1369
1370
size_t lastByteOffset = addr - base + (numBytes - 1);
1371
return lastByteOffset >= memory()->volatileMemoryLength() &&
1372
lastByteOffset < memoryMappedSize();
1373
}
1374
1375
bool Instance::memoryAccessInBounds(uint8_t* addr, unsigned numBytes) const {
1376
MOZ_ASSERT(numBytes > 0 && numBytes <= sizeof(double));
1377
1378
if (!metadata().usesMemory()) {
1379
return false;
1380
}
1381
1382
uint8_t* base = memoryBase().unwrap(/* comparison */);
1383
if (addr < base) {
1384
return false;
1385
}
1386
1387
uint32_t length = memory()->volatileMemoryLength();
1388
if (addr >= base + length) {
1389
return false;
1390
}
1391
1392
// The pointer points into the memory. Now check for partial OOB.
1393
//
1394
// This calculation can't wrap around because the access is small and there
1395
// always is a guard page following the memory.
1396
size_t lastByteOffset = addr - base + (numBytes - 1);
1397
if (lastByteOffset >= length) {
1398
return false;
1399
}
1400
1401
return true;
1402
}
1403
1404
void Instance::tracePrivate(JSTracer* trc) {
1405
// This method is only called from WasmInstanceObject so the only reason why
1406
// TraceEdge is called is so that the pointer can be updated during a moving
1407
// GC.
1408
MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_));
1409
TraceEdge(trc, &object_, "wasm instance object");
1410
1411
// OK to just do one tier here; though the tiers have different funcImports
1412
// tables, they share the tls object.
1413
for (const FuncImport& fi : metadata(code().stableTier()).funcImports) {
1414
TraceNullableEdge(trc, &funcImportTls(fi).fun, "wasm import");
1415
}
1416
1417
for (const SharedTable& table : tables_) {
1418
table->trace(trc);
1419
}
1420
1421
for (const GlobalDesc& global : code().metadata().globals) {
1422
// Indirect reference globals get traced by the owning WebAssembly.Global.
1423
if (!global.type().isReference() || global.isConstant() ||
1424
global.isIndirect()) {
1425
continue;
1426
}
1427
GCPtrObject* obj = (GCPtrObject*)(globalData() + global.offset());
1428
TraceNullableEdge(trc, obj, "wasm reference-typed global");
1429
}
1430
1431
TraceNullableEdge(trc, &memory_, "wasm buffer");
1432
structTypeDescrs_.trace(trc);
1433
1434
if (maybeDebug_) {
1435
maybeDebug_->trace(trc);
1436
}
1437
}
1438
1439
void Instance::trace(JSTracer* trc) {
1440
// Technically, instead of having this method, the caller could use
1441
// Instance::object() to get the owning WasmInstanceObject to mark,
1442
// but this method is simpler and more efficient. The trace hook of
1443
// WasmInstanceObject will call Instance::tracePrivate at which point we
1444
// can mark the rest of the children.
1445
TraceEdge(trc, &object_, "wasm instance object");
1446
}
1447
1448
uintptr_t Instance::traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
1449
uint8_t* nextPC,
1450
uintptr_t highestByteVisitedInPrevFrame) {
1451
const StackMap* map = code().lookupStackMap(nextPC);
1452
if (!map) {
1453
return 0;
1454
}
1455
1456
Frame* frame = wfi.frame();
1457
1458
// |frame| points somewhere in the middle of the area described by |map|.
1459
// We have to calculate |scanStart|, the lowest address that is described by
1460
// |map|, by consulting |map->frameOffsetFromTop|.
1461
1462
const size_t numMappedBytes = map->numMappedWords * sizeof(void*);
1463
const uintptr_t scanStart = uintptr_t(frame) +
1464
(map->frameOffsetFromTop * sizeof(void*)) -
1465
numMappedBytes;
1466
MOZ_ASSERT(0 == scanStart % sizeof(void*));
1467
1468
// Do what we can to assert that, for consecutive wasm frames, their stack
1469
// maps also abut exactly. This is a useful sanity check on the sizing of
1470
// stack maps.
1471
//
1472
// In debug builds, the stackmap construction machinery goes to considerable
1473
// efforts to ensure that the stackmaps for consecutive frames abut exactly.
1474
// This is so as to ensure there are no areas of stack inadvertently ignored
1475
// by a stackmap, nor covered by two stackmaps. Hence any failure of this
1476
// assertion is serious and should be investigated.
1477
MOZ_ASSERT_IF(highestByteVisitedInPrevFrame != 0,
1478
highestByteVisitedInPrevFrame + 1 == scanStart);
1479
1480
uintptr_t* stackWords = (uintptr_t*)scanStart;
1481
1482
// If we have some exit stub words, this means the map also covers an area
1483
// created by a exit stub, and so the highest word of that should be a
1484
// constant created by (code created by) GenerateTrapExit.
1485
MOZ_ASSERT_IF(
1486
map->numExitStubWords > 0,
1487
stackWords[map->numExitStubWords - 1 - TrapExitDummyValueOffsetFromTop] ==
1488
TrapExitDummyValue);
1489
1490
// And actually hand them off to the GC.
1491
for (uint32_t i = 0; i < map->numMappedWords; i++) {
1492
if (map->getBit(i) == 0) {
1493
continue;
1494
}
1495
1496
// TODO/AnyRef-boxing: With boxed immediates and strings, the value may
1497
// not be a traceable JSObject*.
1498
ASSERT_ANYREF_IS_JSOBJECT;
1499
1500
// This assertion seems at least moderately effective in detecting
1501
// discrepancies or misalignments between the map and reality.
1502
MOZ_ASSERT(js::gc::IsCellPointerValidOrNull((const void*)stackWords[i]));
1503
1504
if (stackWords[i]) {
1505
TraceRoot(trc, (JSObject**)&stackWords[i],
1506
"Instance::traceWasmFrame: normal word");
1507
}
1508
}
1509
1510
// Finally, deal with a ref-typed DebugFrame if it is present.
1511
if (map->hasRefTypedDebugFrame) {
1512
DebugFrame* debugFrame = DebugFrame::from(frame);
1513
char* debugFrameP = (char*)debugFrame;
1514
1515
// TODO/AnyRef-boxing: With boxed immediates and strings, the value may
1516
// not be a traceable JSObject*.
1517
ASSERT_ANYREF_IS_JSOBJECT;
1518
1519
char* resultRefP = debugFrameP + DebugFrame::offsetOfResults();
1520
if (*(intptr_t*)resultRefP) {
1521
TraceRoot(trc, (JSObject**)resultRefP,
1522
"Instance::traceWasmFrame: DebugFrame::resultRef_");
1523
}
1524
1525
if (debugFrame->hasCachedReturnJSValue()) {
1526
char* cachedReturnJSValueP =
1527
debugFrameP + DebugFrame::offsetOfCachedReturnJSValue();
1528
TraceRoot(trc, (js::Value*)cachedReturnJSValueP,
1529
"Instance::traceWasmFrame: DebugFrame::cachedReturnJSValue_");
1530
}
1531
}
1532
1533
return scanStart + numMappedBytes - 1;
1534
}
1535
1536
WasmMemoryObject* Instance::memory() const { return memory_; }
1537
1538
SharedMem<uint8_t*> Instance::memoryBase() const {
1539
MOZ_ASSERT(metadata().usesMemory());
1540
MOZ_ASSERT(tlsData()->memoryBase == memory_->buffer().dataPointerEither());
1541
return memory_->buffer().dataPointerEither();
1542
}
1543
1544
SharedArrayRawBuffer* Instance::sharedMemoryBuffer() const {
1545
MOZ_ASSERT(memory_->isShared());
1546
return memory_->sharedArrayRawBuffer();
1547
}
1548
1549
WasmInstanceObject* Instance::objectUnbarriered() const {
1550
return object_.unbarrieredGet();
1551
}
1552
1553
WasmInstanceObject* Instance::object() const { return object_; }
1554
1555
static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
1556
const FuncExport** funcExport,
1557
void** interpEntry) {
1558
Tier tier = instance.code().bestTier();
1559
1560
size_t funcExportIndex;
1561
*funcExport =
1562
&instance.metadata(tier).lookupFuncExport(funcIndex, &funcExportIndex);
1563
1564
const FuncExport& fe = **funcExport;
1565
if (fe.hasEagerStubs()) {
1566
*interpEntry = instance.codeBase(tier) + fe.eagerInterpEntryOffset();
1567
return true;
1568
}
1569
1570
MOZ_ASSERT(!instance.isAsmJS(), "only wasm can lazily export functions");
1571
1572
// If the best tier is Ion, life is simple: background compilation has
1573
// already completed and has been committed, so there's no risk of race
1574
// conditions here.
1575
//
1576
// If the best tier is Baseline, there could be a background compilation
1577
// happening at the same time. The background compilation will lock the
1578
// first tier lazy stubs first to stop new baseline stubs from being
1579
// generated, then the second tier stubs to generate them.
1580
//
1581
// - either we take the tier1 lazy stub lock before the background
1582
// compilation gets it, then we generate the lazy stub for tier1. When the
1583
// background thread gets the tier1 lazy stub lock, it will see it has a
1584
// lazy stub and will recompile it for tier2.
1585
// - or we don't take the lock here first. Background compilation won't
1586
// find a lazy stub for this function, thus won't generate it. So we'll do
1587
// it ourselves after taking the tier2 lock.
1588
1589
auto stubs = instance.code(tier).lazyStubs().lock();
1590
*interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
1591
if (*interpEntry) {
1592
return true;
1593
}
1594
1595
// The best tier might have changed after we've taken the lock.
1596
Tier prevTier = tier;
1597
tier = instance.code().bestTier();
1598
const CodeTier& codeTier = instance.code(tier);
1599
if (tier == prevTier) {
1600
if (!stubs->createOne(funcExportIndex, codeTier)) {
1601
return false;
1602
}
1603
1604
*interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
1605
MOZ_ASSERT(*interpEntry);
1606
return true;
1607
}
1608
1609
MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized);
1610
auto stubs2 = instance.code(tier).lazyStubs().lock();
1611
1612
// If it didn't have a stub in the first tier, background compilation
1613
// shouldn't have made one in the second tier.
1614
MOZ_ASSERT(!stubs2->hasStub(fe.funcIndex()));
1615
1616
if (!stubs2->createOne(funcExportIndex, codeTier)) {
1617
return false;
1618
}
1619
1620
*interpEntry = stubs2->lookupInterpEntry(fe.funcIndex());
1621
MOZ_ASSERT(*interpEntry);
1622
return true;
1623
}
1624
1625
static bool GetInterpEntry(Instance& instance, uint32_t funcIndex,
1626
CallArgs args, void** interpEntry,
1627
const FuncType** funcType) {
1628
const FuncExport* funcExport;
1629
if (!EnsureEntryStubs(instance, funcIndex, &funcExport, interpEntry)) {
1630
return false;
1631
}
1632
1633
// EnsureEntryStubs() has ensured jit-entry stubs have been created and
1634
// installed in funcIndex's JumpTable entry, so we can now set the
1635
// JSFunction's jit-entry. See WasmInstanceObject::getExportedFunction().
1636
if (!funcExport->hasEagerStubs() && funcExport->canHaveJitEntry()) {
1637
JSFunction& callee = args.callee().as<JSFunction>();
1638
MOZ_ASSERT(!callee.isAsmJSNative(), "asm.js only has eager stubs");
1639
if (!callee.isWasmWithJitEntry()) {
1640
callee.setWasmJitEntry(instance.code().getAddressOfJitEntry(funcIndex));
1641
}
1642
}
1643
1644
*funcType = &funcExport->funcType();
1645
return true;
1646
}
1647
1648
bool Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args) {
1649
if (memory_) {
1650
// If there has been a moving grow, this Instance should have been notified.
1651
MOZ_RELEASE_ASSERT(memory_->buffer().dataPointerEither() == memoryBase());
1652
}
1653
1654
void* interpEntry;
1655
const FuncType* funcType;
1656
if (!GetInterpEntry(*this, funcIndex, args, &interpEntry, &funcType)) {
1657
return false;
1658
}
1659
1660
if (funcType->hasI64ArgOrRet() && !HasI64BigIntSupport(cx)) {
1661
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
1662
JSMSG_WASM_BAD_I64_TYPE);
1663
return false;
1664
}
1665
1666
// The calling convention for an external call into wasm is to pass an
1667
// array of 16-byte values where each value contains either a coerced int32
1668
// (in the low word), or a double value (in the low dword) value, with the
1669
// coercions specified by the wasm signature. The external entry point
1670
// unpacks this array into the system-ABI-specified registers and stack
1671
// memory and then calls into the internal entry point. The return value is
1672
// stored in the first element of the array (which, therefore, must have
1673
// length >= 1).
1674
Vector<ExportArg, 8> exportArgs(cx);
1675
if (!exportArgs.resize(std::max<size_t>(1, funcType->args().length()))) {
1676
return false;
1677
}
1678
1679
ASSERT_ANYREF_IS_JSOBJECT;
1680
Rooted<GCVector<JSObject*, 8, SystemAllocPolicy>> refs(cx);
1681
1682
DebugCodegen(DebugChannel::Function, "wasm-function[%d]; arguments ",
1683
funcIndex);
1684
RootedValue v(cx);
1685
for (size_t i = 0; i < funcType->args().length(); ++i) {
1686
v = i < args.length() ? args[i] : UndefinedValue();
1687
switch (funcType->arg(i).code()) {
1688
case ValType::I32:
1689
if (!ToInt32(cx, v, (int32_t*)&exportArgs[i])) {
1690
return false;
1691
}
1692
DebugCodegen(DebugChannel::Function, "i32(%d) ",
1693
*(int32_t*)&exportArgs[i]);
1694
break;
1695
case ValType::I64: {
1696
#ifdef ENABLE_WASM_BIGINT
1697
MOZ_ASSERT(HasI64BigIntSupport(cx),
1698
"unexpected i64 flowing into callExport");
1699
RootedBigInt bigint(cx, ToBigInt(cx, v));
1700
if (!bigint) {
1701
return false;
1702
}
1703
1704
int64_t* res = (int64_t*)&exportArgs[i];
1705
*res = BigInt::toInt64(bigint);
1706
DebugCodegen(DebugChannel::Function, "i64(%" PRId64 ") ",
1707
*(int64_t*)&exportArgs[i]);
1708
break;
1709
#else
1710
MOZ_CRASH("unexpected i64 flowing into callExport");
1711
#endif
1712
}
1713
case ValType::F32:
1714
if (!RoundFloat32(cx, v, (float*)&exportArgs[i])) {
1715
return false;
1716
}
1717
DebugCodegen(DebugChannel::Function, "f32(%f) ",
1718
*(float*)&exportArgs[i]);
1719
break;
1720
case ValType::F64:
1721
if (!ToNumber(cx, v, (double*)&exportArgs[i])) {
1722
return false;