Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2015 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmTypes.h"
20
21
#include "js/Printf.h"
22
#include "util/Memory.h"
23
#include "vm/ArrayBufferObject.h"
24
#include "wasm/WasmBaselineCompile.h"
25
#include "wasm/WasmInstance.h"
26
#include "wasm/WasmSerialize.h"
27
28
#include "vm/JSObject-inl.h"
29
#include "vm/NativeObject-inl.h"
30
31
using namespace js;
32
using namespace js::jit;
33
using namespace js::wasm;
34
35
using mozilla::IsPowerOfTwo;
36
using mozilla::MakeEnumeratedRange;
37
38
// We have only tested huge memory on x64 and arm64.
39
40
#if defined(WASM_SUPPORTS_HUGE_MEMORY)
41
# if !(defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64))
42
# error "Not an expected configuration"
43
# endif
44
#endif
45
46
// More sanity checks.
47
48
static_assert(MaxMemoryInitialPages <=
49
ArrayBufferObject::MaxBufferByteLength / PageSize,
50
"Memory sizing constraint");
51
52
// All plausible targets must be able to do at least IEEE754 double
53
// loads/stores, hence the lower limit of 8. Some Intel processors support
54
// AVX-512 loads/stores, hence the upper limit of 64.
55
static_assert(MaxMemoryAccessSize >= 8, "MaxMemoryAccessSize too low");
56
static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
57
static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize - 1)) == 0,
58
"MaxMemoryAccessSize is not a power of two");
59
60
#if defined(WASM_SUPPORTS_HUGE_MEMORY)
61
static_assert(HugeMappedSize > ArrayBufferObject::MaxBufferByteLength,
62
"Normal array buffer could be confused with huge memory");
63
#endif
64
65
Val::Val(const LitVal& val) {
66
type_ = val.type();
67
switch (type_.code()) {
68
case ValType::I32:
69
u.i32_ = val.i32();
70
return;
71
case ValType::F32:
72
u.f32_ = val.f32();
73
return;
74
case ValType::I64:
75
u.i64_ = val.i64();
76
return;
77
case ValType::F64:
78
u.f64_ = val.f64();
79
return;
80
case ValType::Ref:
81
case ValType::FuncRef:
82
case ValType::AnyRef:
83
u.ref_ = val.ref();
84
return;
85
case ValType::NullRef:
86
break;
87
}
88
MOZ_CRASH();
89
}
90
91
void Val::trace(JSTracer* trc) {
92
if (type_.isValid() && type_.isReference() && !u.ref_.isNull()) {
93
// TODO/AnyRef-boxing: With boxed immediates and strings, the write
94
// barrier is going to have to be more complicated.
95
ASSERT_ANYREF_IS_JSOBJECT;
96
TraceManuallyBarrieredEdge(trc, u.ref_.asJSObjectAddress(),
97
"wasm reference-typed global");
98
}
99
}
100
101
void AnyRef::trace(JSTracer* trc) {
102
if (value_) {
103
TraceManuallyBarrieredEdge(trc, &value_, "wasm anyref referent");
104
}
105
}
106
107
const JSClass WasmValueBox::class_ = {
108
"WasmValueBox", JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS)};
109
110
WasmValueBox* WasmValueBox::create(JSContext* cx, HandleValue val) {
111
WasmValueBox* obj = (WasmValueBox*)NewObjectWithGivenProto(
112
cx, &WasmValueBox::class_, nullptr);
113
if (!obj) {
114
return nullptr;
115
}
116
obj->setFixedSlot(VALUE_SLOT, val);
117
return obj;
118
}
119
120
bool wasm::BoxAnyRef(JSContext* cx, HandleValue val, MutableHandleAnyRef addr) {
121
if (val.isNull()) {
122
addr.set(AnyRef::null());
123
return true;
124
}
125
126
if (val.isObject()) {
127
JSObject* obj = &val.toObject();
128
MOZ_ASSERT(!obj->is<WasmValueBox>());
129
MOZ_ASSERT(obj->compartment() == cx->compartment());
130
addr.set(AnyRef::fromJSObject(obj));
131
return true;
132
}
133
134
WasmValueBox* box = WasmValueBox::create(cx, val);
135
if (!box) return false;
136
addr.set(AnyRef::fromJSObject(box));
137
return true;
138
}
139
140
JSObject* wasm::BoxBoxableValue(JSContext* cx, HandleValue val) {
141
MOZ_ASSERT(!val.isNull() && !val.isObject());
142
return WasmValueBox::create(cx, val);
143
}
144
145
Value wasm::UnboxAnyRef(AnyRef val) {
146
// If UnboxAnyRef needs to allocate then we need a more complicated API, and
147
// we need to root the value in the callers, see comments in callExport().
148
JSObject* obj = val.asJSObject();
149
Value result;
150
if (obj == nullptr) {
151
result.setNull();
152
} else if (obj->is<WasmValueBox>()) {
153
result = obj->as<WasmValueBox>().value();
154
} else {
155
result.setObjectOrNull(obj);
156
}
157
return result;
158
}
159
160
Value wasm::UnboxFuncRef(FuncRef val) {
161
JSFunction* fn = val.asJSFunction();
162
Value result;
163
MOZ_ASSERT_IF(fn, fn->is<JSFunction>());
164
result.setObjectOrNull(fn);
165
return result;
166
}
167
168
bool js::IsBoxedWasmAnyRef(JSContext* cx, unsigned argc, Value* vp) {
169
CallArgs args = CallArgsFromVp(argc, vp);
170
MOZ_ASSERT(args.length() == 1);
171
args.rval().setBoolean(args[0].isObject() &&
172
args[0].toObject().is<WasmValueBox>());
173
return true;
174
}
175
176
bool js::IsBoxableWasmAnyRef(JSContext* cx, unsigned argc, Value* vp) {
177
CallArgs args = CallArgsFromVp(argc, vp);
178
MOZ_ASSERT(args.length() == 1);
179
args.rval().setBoolean(!(args[0].isObject() || args[0].isNull()));
180
return true;
181
}
182
183
bool js::BoxWasmAnyRef(JSContext* cx, unsigned argc, Value* vp) {
184
CallArgs args = CallArgsFromVp(argc, vp);
185
MOZ_ASSERT(args.length() == 1);
186
WasmValueBox* box = WasmValueBox::create(cx, args[0]);
187
if (!box) return false;
188
args.rval().setObject(*box);
189
return true;
190
}
191
192
bool js::UnboxBoxedWasmAnyRef(JSContext* cx, unsigned argc, Value* vp) {
193
CallArgs args = CallArgsFromVp(argc, vp);
194
MOZ_ASSERT(args.length() == 1);
195
WasmValueBox* box = &args[0].toObject().as<WasmValueBox>();
196
args.rval().set(box->value());
197
return true;
198
}
199
200
bool wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode) {
201
switch (callee) {
202
case SymbolicAddress::FloorD:
203
case SymbolicAddress::FloorF:
204
*mode = jit::RoundingMode::Down;
205
return true;
206
case SymbolicAddress::CeilD:
207
case SymbolicAddress::CeilF:
208
*mode = jit::RoundingMode::Up;
209
return true;
210
case SymbolicAddress::TruncD:
211
case SymbolicAddress::TruncF:
212
*mode = jit::RoundingMode::TowardsZero;
213
return true;
214
case SymbolicAddress::NearbyIntD:
215
case SymbolicAddress::NearbyIntF:
216
*mode = jit::RoundingMode::NearestTiesToEven;
217
return true;
218
default:
219
return false;
220
}
221
}
222
223
size_t FuncType::serializedSize() const {
224
return SerializedPodVectorSize(results_) + SerializedPodVectorSize(args_);
225
}
226
227
uint8_t* FuncType::serialize(uint8_t* cursor) const {
228
cursor = SerializePodVector(cursor, results_);
229
cursor = SerializePodVector(cursor, args_);
230
return cursor;
231
}
232
233
const uint8_t* FuncType::deserialize(const uint8_t* cursor) {
234
cursor = DeserializePodVector(cursor, &results_);
235
if (!cursor) {
236
return nullptr;
237
}
238
return DeserializePodVector(cursor, &args_);
239
}
240
241
size_t FuncType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
242
return args_.sizeOfExcludingThis(mallocSizeOf);
243
}
244
245
typedef uint32_t ImmediateType; // for 32/64 consistency
246
static const unsigned sTotalBits = sizeof(ImmediateType) * 8;
247
static const unsigned sTagBits = 1;
248
static const unsigned sReturnBit = 1;
249
static const unsigned sLengthBits = 4;
250
static const unsigned sTypeBits = 3;
251
static const unsigned sMaxTypes =
252
(sTotalBits - sTagBits - sReturnBit - sLengthBits) / sTypeBits;
253
254
static bool IsImmediateType(ValType vt) {
255
switch (vt.code()) {
256
case ValType::I32:
257
case ValType::I64:
258
case ValType::F32:
259
case ValType::F64:
260
case ValType::FuncRef:
261
case ValType::AnyRef:
262
return true;
263
case ValType::NullRef:
264
case ValType::Ref:
265
return false;
266
}
267
MOZ_CRASH("bad ValType");
268
}
269
270
static unsigned EncodeImmediateType(ValType vt) {
271
static_assert(4 < (1 << sTypeBits), "fits");
272
switch (vt.code()) {
273
case ValType::I32:
274
return 0;
275
case ValType::I64:
276
return 1;
277
case ValType::F32:
278
return 2;
279
case ValType::F64:
280
return 3;
281
case ValType::FuncRef:
282
return 4;
283
case ValType::AnyRef:
284
return 5;
285
case ValType::NullRef:
286
case ValType::Ref:
287
break;
288
}
289
MOZ_CRASH("bad ValType");
290
}
291
292
/* static */
293
bool FuncTypeIdDesc::isGlobal(const FuncType& funcType) {
294
const ValTypeVector& results = funcType.results();
295
const ValTypeVector& args = funcType.args();
296
if (results.length() + args.length() > sMaxTypes) {
297
return true;
298
}
299
300
if (results.length() > 1) {
301
return true;
302
}
303
304
for (ValType v : results) {
305
if (!IsImmediateType(v)) {
306
return true;
307
}
308
}
309
310
for (ValType v : args) {
311
if (!IsImmediateType(v)) {
312
return true;
313
}
314
}
315
316
return false;
317
}
318
319
/* static */
320
FuncTypeIdDesc FuncTypeIdDesc::global(const FuncType& funcType,
321
uint32_t globalDataOffset) {
322
MOZ_ASSERT(isGlobal(funcType));
323
return FuncTypeIdDesc(FuncTypeIdDescKind::Global, globalDataOffset);
324
}
325
326
static ImmediateType LengthToBits(uint32_t length) {
327
static_assert(sMaxTypes <= ((1 << sLengthBits) - 1), "fits");
328
MOZ_ASSERT(length <= sMaxTypes);
329
return length;
330
}
331
332
/* static */
333
FuncTypeIdDesc FuncTypeIdDesc::immediate(const FuncType& funcType) {
334
ImmediateType immediate = ImmediateBit;
335
uint32_t shift = sTagBits;
336
337
if (funcType.results().length() > 0) {
338
MOZ_ASSERT(funcType.results().length() == 1);
339
immediate |= (1 << shift);
340
shift += sReturnBit;
341
342
immediate |= EncodeImmediateType(funcType.results()[0]) << shift;
343
shift += sTypeBits;
344
} else {
345
shift += sReturnBit;
346
}
347
348
immediate |= LengthToBits(funcType.args().length()) << shift;
349
shift += sLengthBits;
350
351
for (ValType argType : funcType.args()) {
352
immediate |= EncodeImmediateType(argType) << shift;
353
shift += sTypeBits;
354
}
355
356
MOZ_ASSERT(shift <= sTotalBits);
357
return FuncTypeIdDesc(FuncTypeIdDescKind::Immediate, immediate);
358
}
359
360
size_t FuncTypeWithId::serializedSize() const {
361
return FuncType::serializedSize() + sizeof(id);
362
}
363
364
uint8_t* FuncTypeWithId::serialize(uint8_t* cursor) const {
365
cursor = FuncType::serialize(cursor);
366
cursor = WriteBytes(cursor, &id, sizeof(id));
367
return cursor;
368
}
369
370
const uint8_t* FuncTypeWithId::deserialize(const uint8_t* cursor) {
371
(cursor = FuncType::deserialize(cursor)) &&
372
(cursor = ReadBytes(cursor, &id, sizeof(id)));
373
return cursor;
374
}
375
376
size_t FuncTypeWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
377
return FuncType::sizeOfExcludingThis(mallocSizeOf);
378
}
379
380
// A simple notion of prefix: types and mutability must match exactly.
381
382
bool StructType::hasPrefix(const StructType& other) const {
383
if (fields_.length() < other.fields_.length()) {
384
return false;
385
}
386
uint32_t limit = other.fields_.length();
387
for (uint32_t i = 0; i < limit; i++) {
388
if (fields_[i].type != other.fields_[i].type ||
389
fields_[i].isMutable != other.fields_[i].isMutable) {
390
return false;
391
}
392
}
393
return true;
394
}
395
396
size_t StructType::serializedSize() const {
397
return SerializedPodVectorSize(fields_);
398
}
399
400
uint8_t* StructType::serialize(uint8_t* cursor) const {
401
cursor = SerializePodVector(cursor, fields_);
402
return cursor;
403
}
404
405
const uint8_t* StructType::deserialize(const uint8_t* cursor) {
406
(cursor = DeserializePodVector(cursor, &fields_));
407
return cursor;
408
}
409
410
size_t StructType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
411
return fields_.sizeOfExcludingThis(mallocSizeOf);
412
}
413
414
size_t Import::serializedSize() const {
415
return module.serializedSize() + field.serializedSize() + sizeof(kind);
416
}
417
418
uint8_t* Import::serialize(uint8_t* cursor) const {
419
cursor = module.serialize(cursor);
420
cursor = field.serialize(cursor);
421
cursor = WriteScalar<DefinitionKind>(cursor, kind);
422
return cursor;
423
}
424
425
const uint8_t* Import::deserialize(const uint8_t* cursor) {
426
(cursor = module.deserialize(cursor)) &&
427
(cursor = field.deserialize(cursor)) &&
428
(cursor = ReadScalar<DefinitionKind>(cursor, &kind));
429
return cursor;
430
}
431
432
size_t Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
433
return module.sizeOfExcludingThis(mallocSizeOf) +
434
field.sizeOfExcludingThis(mallocSizeOf);
435
}
436
437
Export::Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind)
438
: fieldName_(std::move(fieldName)) {
439
pod.kind_ = kind;
440
pod.index_ = index;
441
}
442
443
Export::Export(UniqueChars fieldName, DefinitionKind kind)
444
: fieldName_(std::move(fieldName)) {
445
pod.kind_ = kind;
446
pod.index_ = 0;
447
}
448
449
uint32_t Export::funcIndex() const {
450
MOZ_ASSERT(pod.kind_ == DefinitionKind::Function);
451
return pod.index_;
452
}
453
454
uint32_t Export::globalIndex() const {
455
MOZ_ASSERT(pod.kind_ == DefinitionKind::Global);
456
return pod.index_;
457
}
458
459
uint32_t Export::tableIndex() const {
460
MOZ_ASSERT(pod.kind_ == DefinitionKind::Table);
461
return pod.index_;
462
}
463
464
size_t Export::serializedSize() const {
465
return fieldName_.serializedSize() + sizeof(pod);
466
}
467
468
uint8_t* Export::serialize(uint8_t* cursor) const {
469
cursor = fieldName_.serialize(cursor);
470
cursor = WriteBytes(cursor, &pod, sizeof(pod));
471
return cursor;
472
}
473
474
const uint8_t* Export::deserialize(const uint8_t* cursor) {
475
(cursor = fieldName_.deserialize(cursor)) &&
476
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
477
return cursor;
478
}
479
480
size_t Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
481
return fieldName_.sizeOfExcludingThis(mallocSizeOf);
482
}
483
484
size_t ElemSegment::serializedSize() const {
485
return sizeof(kind) + sizeof(tableIndex) + sizeof(elementType) +
486
sizeof(offsetIfActive) + SerializedPodVectorSize(elemFuncIndices);
487
}
488
489
uint8_t* ElemSegment::serialize(uint8_t* cursor) const {
490
cursor = WriteBytes(cursor, &kind, sizeof(kind));
491
cursor = WriteBytes(cursor, &tableIndex, sizeof(tableIndex));
492
cursor = WriteBytes(cursor, &elementType, sizeof(elementType));
493
cursor = WriteBytes(cursor, &offsetIfActive, sizeof(offsetIfActive));
494
cursor = SerializePodVector(cursor, elemFuncIndices);
495
return cursor;
496
}
497
498
const uint8_t* ElemSegment::deserialize(const uint8_t* cursor) {
499
(cursor = ReadBytes(cursor, &kind, sizeof(kind))) &&
500
(cursor = ReadBytes(cursor, &tableIndex, sizeof(tableIndex))) &&
501
(cursor = ReadBytes(cursor, &elementType, sizeof(elementType))) &&
502
(cursor = ReadBytes(cursor, &offsetIfActive, sizeof(offsetIfActive))) &&
503
(cursor = DeserializePodVector(cursor, &elemFuncIndices));
504
return cursor;
505
}
506
507
size_t ElemSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
508
return elemFuncIndices.sizeOfExcludingThis(mallocSizeOf);
509
}
510
511
size_t DataSegment::serializedSize() const {
512
return sizeof(offsetIfActive) + SerializedPodVectorSize(bytes);
513
}
514
515
uint8_t* DataSegment::serialize(uint8_t* cursor) const {
516
cursor = WriteBytes(cursor, &offsetIfActive, sizeof(offsetIfActive));
517
cursor = SerializePodVector(cursor, bytes);
518
return cursor;
519
}
520
521
const uint8_t* DataSegment::deserialize(const uint8_t* cursor) {
522
(cursor = ReadBytes(cursor, &offsetIfActive, sizeof(offsetIfActive))) &&
523
(cursor = DeserializePodVector(cursor, &bytes));
524
return cursor;
525
}
526
527
size_t DataSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
528
return bytes.sizeOfExcludingThis(mallocSizeOf);
529
}
530
531
size_t CustomSection::serializedSize() const {
532
return SerializedPodVectorSize(name) +
533
SerializedPodVectorSize(payload->bytes);
534
}
535
536
uint8_t* CustomSection::serialize(uint8_t* cursor) const {
537
cursor = SerializePodVector(cursor, name);
538
cursor = SerializePodVector(cursor, payload->bytes);
539
return cursor;
540
}
541
542
const uint8_t* CustomSection::deserialize(const uint8_t* cursor) {
543
cursor = DeserializePodVector(cursor, &name);
544
if (!cursor) {
545
return nullptr;
546
}
547
548
Bytes bytes;
549
cursor = DeserializePodVector(cursor, &bytes);
550
if (!cursor) {
551
return nullptr;
552
}
553
payload = js_new<ShareableBytes>(std::move(bytes));
554
if (!payload) {
555
return nullptr;
556
}
557
558
return cursor;
559
}
560
561
size_t CustomSection::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
562
return name.sizeOfExcludingThis(mallocSizeOf) + sizeof(*payload) +
563
payload->sizeOfExcludingThis(mallocSizeOf);
564
}
565
566
// Heap length on ARM should fit in an ARM immediate. We approximate the set
567
// of valid ARM immediates with the predicate:
568
// 2^n for n in [16, 24)
569
// or
570
// 2^24 * n for n >= 1.
571
bool wasm::IsValidARMImmediate(uint32_t i) {
572
bool valid = (IsPowerOfTwo(i) || (i & 0x00ffffff) == 0);
573
574
MOZ_ASSERT_IF(valid, i % PageSize == 0);
575
576
return valid;
577
}
578
579
uint32_t wasm::RoundUpToNextValidARMImmediate(uint32_t i) {
580
MOZ_ASSERT(i <= 0xff000000);
581
582
if (i <= 16 * 1024 * 1024) {
583
i = i ? mozilla::RoundUpPow2(i) : 0;
584
} else {
585
i = (i + 0x00ffffff) & ~0x00ffffff;
586
}
587
588
MOZ_ASSERT(IsValidARMImmediate(i));
589
590
return i;
591
}
592
593
bool wasm::IsValidBoundsCheckImmediate(uint32_t i) {
594
#ifdef JS_CODEGEN_ARM
595
return IsValidARMImmediate(i);
596
#else
597
return true;
598
#endif
599
}
600
601
size_t wasm::ComputeMappedSize(uint32_t maxSize) {
602
MOZ_ASSERT(maxSize % PageSize == 0);
603
604
// It is the bounds-check limit, not the mapped size, that gets baked into
605
// code. Thus round up the maxSize to the next valid immediate value
606
// *before* adding in the guard page.
607
608
#ifdef JS_CODEGEN_ARM
609
uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
610
#else
611
uint32_t boundsCheckLimit = maxSize;
612
#endif
613
MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
614
615
MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
616
MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
617
return boundsCheckLimit + GuardSize;
618
}
619
620
/* static */
621
DebugFrame* DebugFrame::from(Frame* fp) {
622
MOZ_ASSERT(fp->tls->instance->code().metadata().debugEnabled);
623
auto* df =
624
reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
625
MOZ_ASSERT(fp->instance() == df->instance());
626
return df;
627
}
628
629
void DebugFrame::alignmentStaticAsserts() {
630
// VS2017 doesn't consider offsetOfFrame() to be a constexpr, so we have
631
// to use offsetof directly. These asserts can't be at class-level
632
// because the type is incomplete.
633
634
static_assert(WasmStackAlignment >= Alignment,
635
"Aligned by ABI before pushing DebugFrame");
636
static_assert((offsetof(DebugFrame, frame_) + sizeof(Frame)) % Alignment == 0,
637
"Aligned after pushing DebugFrame");
638
#ifdef JS_CODEGEN_ARM64
639
// This constraint may or may not be necessary. If you hit this because
640
// you've changed the frame size then feel free to remove it, but be extra
641
// aware of possible problems.
642
static_assert(sizeof(DebugFrame) % 16 == 0, "ARM64 SP alignment");
643
#endif
644
}
645
646
GlobalObject* DebugFrame::global() const {
647
return &instance()->object()->global();
648
}
649
650
bool DebugFrame::hasGlobal(const GlobalObject* global) const {
651
return global == &instance()->objectUnbarriered()->global();
652
}
653
654
JSObject* DebugFrame::environmentChain() const {
655
return &global()->lexicalEnvironment();
656
}
657
658
bool DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp) {
659
ValTypeVector locals;
660
size_t argsLength;
661
if (!instance()->debug().debugGetLocalTypes(funcIndex(), &locals,
662
&argsLength)) {
663
return false;
664
}
665
666
BaseLocalIter iter(locals, argsLength, /* debugEnabled = */ true);
667
while (!iter.done() && iter.index() < localIndex) {
668
iter++;
669
}
670
MOZ_ALWAYS_TRUE(!iter.done());
671
672
uint8_t* frame = static_cast<uint8_t*>((void*)this) + offsetOfFrame();
673
void* dataPtr = frame - iter.frameOffset();
674
switch (iter.mirType()) {
675
case jit::MIRType::Int32:
676
vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
677
break;
678
case jit::MIRType::Int64:
679
// Just display as a Number; it's ok if we lose some precision
680
vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
681
break;
682
case jit::MIRType::Float32:
683
vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
684
break;
685
case jit::MIRType::Double:
686
vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
687
break;
688
case jit::MIRType::RefOrNull:
689
vp.set(ObjectOrNullValue(*(JSObject**)dataPtr));
690
break;
691
default:
692
MOZ_CRASH("local type");
693
}
694
return true;
695
}
696
697
bool DebugFrame::updateReturnJSValue() {
698
hasCachedReturnJSValue_ = true;
699
ValTypeVector results;
700
if (!instance()->debug().debugGetResultTypes(funcIndex(), &results)) {
701
return false;
702
}
703
if (results.length() == 0) {
704
cachedReturnJSValue_.setUndefined();
705
return true;
706
}
707
MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
708
switch (results[0].code()) {
709
case ValType::I32:
710
cachedReturnJSValue_.setInt32(resultI32_);
711
break;
712
case ValType::I64:
713
// Just display as a Number; it's ok if we lose some precision
714
cachedReturnJSValue_.setDouble((double)resultI64_);
715
break;
716
case ValType::F32:
717
cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF32_));
718
break;
719
case ValType::F64:
720
cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF64_));
721
break;
722
case ValType::Ref:
723
cachedReturnJSValue_ = ObjectOrNullValue((JSObject*)resultRef_);
724
break;
725
case ValType::FuncRef:
726
cachedReturnJSValue_ =
727
UnboxFuncRef(FuncRef::fromAnyRefUnchecked(resultAnyRef_));
728
break;
729
case ValType::AnyRef:
730
cachedReturnJSValue_ = UnboxAnyRef(resultAnyRef_);
731
break;
732
default:
733
MOZ_CRASH("result type");
734
}
735
return true;
736
}
737
738
HandleValue DebugFrame::returnValue() const {
739
MOZ_ASSERT(hasCachedReturnJSValue_);
740
return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
741
}
742
743
void DebugFrame::clearReturnJSValue() {
744
hasCachedReturnJSValue_ = true;
745
cachedReturnJSValue_.setUndefined();
746
}
747
748
void DebugFrame::observe(JSContext* cx) {
749
if (!observing_) {
750
instance()->debug().adjustEnterAndLeaveFrameTrapsState(
751
cx, /* enabled = */ true);
752
observing_ = true;
753
}
754
}
755
756
void DebugFrame::leave(JSContext* cx) {
757
if (observing_) {
758
instance()->debug().adjustEnterAndLeaveFrameTrapsState(
759
cx, /* enabled = */ false);
760
observing_ = false;
761
}
762
}
763
764
bool TrapSiteVectorArray::empty() const {
765
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
766
if (!(*this)[trap].empty()) {
767
return false;
768
}
769
}
770
771
return true;
772
}
773
774
void TrapSiteVectorArray::clear() {
775
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
776
(*this)[trap].clear();
777
}
778
}
779
780
void TrapSiteVectorArray::swap(TrapSiteVectorArray& rhs) {
781
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
782
(*this)[trap].swap(rhs[trap]);
783
}
784
}
785
786
void TrapSiteVectorArray::podResizeToFit() {
787
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
788
(*this)[trap].podResizeToFit();
789
}
790
}
791
792
size_t TrapSiteVectorArray::serializedSize() const {
793
size_t ret = 0;
794
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
795
ret += SerializedPodVectorSize((*this)[trap]);
796
}
797
return ret;
798
}
799
800
uint8_t* TrapSiteVectorArray::serialize(uint8_t* cursor) const {
801
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
802
cursor = SerializePodVector(cursor, (*this)[trap]);
803
}
804
return cursor;
805
}
806
807
const uint8_t* TrapSiteVectorArray::deserialize(const uint8_t* cursor) {
808
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
809
cursor = DeserializePodVector(cursor, &(*this)[trap]);
810
if (!cursor) {
811
return nullptr;
812
}
813
}
814
return cursor;
815
}
816
817
size_t TrapSiteVectorArray::sizeOfExcludingThis(
818
MallocSizeOf mallocSizeOf) const {
819
size_t ret = 0;
820
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
821
ret += (*this)[trap].sizeOfExcludingThis(mallocSizeOf);
822
}
823
return ret;
824
}
825
826
CodeRange::CodeRange(Kind kind, Offsets offsets)
827
: begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
828
MOZ_ASSERT(begin_ <= end_);
829
PodZero(&u);
830
#ifdef DEBUG
831
switch (kind_) {
832
case FarJumpIsland:
833
case TrapExit:
834
case Throw:
835
break;
836
default:
837
MOZ_CRASH("should use more specific constructor");
838
}
839
#endif
840
}
841
842
CodeRange::CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets)
843
: begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
844
u.funcIndex_ = funcIndex;
845
u.func.lineOrBytecode_ = 0;
846
u.func.beginToNormalEntry_ = 0;
847
u.func.beginToTierEntry_ = 0;
848
MOZ_ASSERT(isEntry());
849
MOZ_ASSERT(begin_ <= end_);
850
}
851
852
CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
853
: begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
854
MOZ_ASSERT(begin_ < ret_);
855
MOZ_ASSERT(ret_ < end_);
856
PodZero(&u);
857
#ifdef DEBUG
858
switch (kind_) {
859
case DebugTrap:
860
case BuiltinThunk:
861
break;
862
default:
863
MOZ_CRASH("should use more specific constructor");
864
}
865
#endif
866
}
867
868
CodeRange::CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets offsets)
869
: begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
870
MOZ_ASSERT(isImportExit() && !isImportJitExit());
871
MOZ_ASSERT(begin_ < ret_);
872
MOZ_ASSERT(ret_ < end_);
873
u.funcIndex_ = funcIndex;
874
u.func.lineOrBytecode_ = 0;
875
u.func.beginToNormalEntry_ = 0;
876
u.func.beginToTierEntry_ = 0;
877
}
878
879
CodeRange::CodeRange(uint32_t funcIndex, JitExitOffsets offsets)
880
: begin_(offsets.begin),
881
ret_(offsets.ret),
882
end_(offsets.end),
883
kind_(ImportJitExit) {
884
MOZ_ASSERT(isImportJitExit());
885
MOZ_ASSERT(begin_ < ret_);
886
MOZ_ASSERT(ret_ < end_);
887
u.funcIndex_ = funcIndex;
888
u.jitExit.beginToUntrustedFPStart_ = offsets.untrustedFPStart - begin_;
889
u.jitExit.beginToUntrustedFPEnd_ = offsets.untrustedFPEnd - begin_;
890
MOZ_ASSERT(jitExitUntrustedFPStart() == offsets.untrustedFPStart);
891
MOZ_ASSERT(jitExitUntrustedFPEnd() == offsets.untrustedFPEnd);
892
}
893
894
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode,
895
FuncOffsets offsets)
896
: begin_(offsets.begin),
897
ret_(offsets.ret),
898
end_(offsets.end),
899
kind_(Function) {
900
MOZ_ASSERT(begin_ < ret_);
901
MOZ_ASSERT(ret_ < end_);
902
MOZ_ASSERT(offsets.normalEntry - begin_ <= UINT8_MAX);
903
MOZ_ASSERT(offsets.tierEntry - begin_ <= UINT8_MAX);
904
u.funcIndex_ = funcIndex;
905
u.func.lineOrBytecode_ = funcLineOrBytecode;
906
u.func.beginToNormalEntry_ = offsets.normalEntry - begin_;
907
u.func.beginToTierEntry_ = offsets.tierEntry - begin_;
908
}
909
910
const CodeRange* wasm::LookupInSorted(const CodeRangeVector& codeRanges,
911
CodeRange::OffsetInCode target) {
912
size_t lowerBound = 0;
913
size_t upperBound = codeRanges.length();
914
915
size_t match;
916
if (!BinarySearch(codeRanges, lowerBound, upperBound, target, &match)) {
917
return nullptr;
918
}
919
920
return &codeRanges[match];
921
}
922
923
UniqueTlsData wasm::CreateTlsData(uint32_t globalDataLength) {
924
void* allocatedBase = js_calloc(TlsDataAlign + offsetof(TlsData, globalArea) +
925
globalDataLength);
926
if (!allocatedBase) {
927
return nullptr;
928
}
929
930
auto* tlsData = reinterpret_cast<TlsData*>(
931
AlignBytes(uintptr_t(allocatedBase), TlsDataAlign));
932
tlsData->allocatedBase = allocatedBase;
933
934
return UniqueTlsData(tlsData);
935
}
936
937
void TlsData::setInterrupt() {
938
interrupt = true;
939
stackLimit = UINTPTR_MAX;
940
}
941
942
bool TlsData::isInterrupted() const {
943
return interrupt || stackLimit == UINTPTR_MAX;
944
}
945
946
void TlsData::resetInterrupt(JSContext* cx) {
947
interrupt = false;
948
stackLimit = cx->stackLimitForJitCode(JS::StackForUntrustedScript);
949
}
950
951
void wasm::Log(JSContext* cx, const char* fmt, ...) {
952
MOZ_ASSERT(!cx->isExceptionPending());
953
954
if (!cx->options().wasmVerbose()) {
955
return;
956
}
957
958
va_list args;
959
va_start(args, fmt);
960
961
if (UniqueChars chars = JS_vsmprintf(fmt, args)) {
962
JS_ReportErrorFlagsAndNumberASCII(cx, JSREPORT_WARNING, GetErrorMessage,
963
nullptr, JSMSG_WASM_VERBOSE, chars.get());
964
if (cx->isExceptionPending()) {
965
cx->clearPendingException();
966
}
967
}
968
969
va_end(args);
970
}
971
972
#ifdef WASM_CODEGEN_DEBUG
973
bool wasm::IsCodegenDebugEnabled(DebugChannel channel) {
974
switch (channel) {
975
case DebugChannel::Function:
976
return JitOptions.enableWasmFuncCallSpew;
977
case DebugChannel::Import:
978
return JitOptions.enableWasmImportCallSpew;
979
}
980
return false;
981
}
982
#endif
983
984
void wasm::DebugCodegen(DebugChannel channel, const char* fmt, ...) {
985
#ifdef WASM_CODEGEN_DEBUG
986
if (!IsCodegenDebugEnabled(channel)) {
987
return;
988
}
989
va_list ap;
990
va_start(ap, fmt);
991
vfprintf(stderr, fmt, ap);
992
va_end(ap);
993
#endif
994
}