Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
/*
8
* JS Atomics pseudo-module.
9
*
10
* See "Spec: JavaScript Shared Memory, Atomics, and Locks" for the
11
* full specification.
12
*
13
* In addition to what is specified there, we throw an Error object if
14
* the futex API hooks have not been installed on the runtime.
15
* Essentially that is an implementation error at a higher level.
16
*
17
*
18
* Note on the current implementation of atomic operations.
19
*
20
* The Mozilla atomics are not sufficient to implement these APIs
21
* because we need to support 8-bit, 16-bit, and 32-bit data: the
22
* Mozilla atomics only support 32-bit data.
23
*
24
* At the moment we include mozilla/Atomics.h, which will define
25
* MOZ_HAVE_CXX11_ATOMICS and include <atomic> if we have C++11
26
* atomics.
27
*
28
* If MOZ_HAVE_CXX11_ATOMICS is set we'll use C++11 atomics.
29
*
30
* Otherwise, if the compiler has them we'll fall back on gcc/Clang
31
* intrinsics.
32
*
33
* Otherwise, if we're on VC++2012, we'll use C++11 atomics even if
34
* MOZ_HAVE_CXX11_ATOMICS is not defined. The compiler has the
35
* atomics but they are disabled in Mozilla due to a performance bug.
36
* That performance bug does not affect the Atomics code. See
37
* mozilla/Atomics.h for further comments on that bug.
38
*
39
* Otherwise, if we're on VC++2010 or VC++2008, we'll emulate the
40
* gcc/Clang intrinsics with simple code below using the VC++
41
* intrinsics, like the VC++2012 solution this is a stopgap since
42
* we're about to start using VC++2013 anyway.
43
*
44
* If none of those options are available then the build must disable
45
* shared memory, or compilation will fail with a predictable error.
46
*/
47
48
#include "builtin/AtomicsObject.h"
49
50
#include "mozilla/Atomics.h"
51
#include "mozilla/CheckedInt.h"
52
#include "mozilla/FloatingPoint.h"
53
#include "mozilla/Maybe.h"
54
#include "mozilla/ScopeExit.h"
55
#include "mozilla/Unused.h"
56
57
#include "jsapi.h"
58
#include "jsfriendapi.h"
59
#include "jsnum.h"
60
61
#include "jit/AtomicOperations.h"
62
#include "jit/InlinableNatives.h"
63
#include "js/Class.h"
64
#include "js/PropertySpec.h"
65
#include "js/Result.h"
66
#include "vm/GlobalObject.h"
67
#include "vm/Time.h"
68
#include "vm/TypedArrayObject.h"
69
#include "wasm/WasmInstance.h"
70
71
#include "vm/JSObject-inl.h"
72
73
using namespace js;
74
75
const JSClass AtomicsObject::class_ = {
76
"Atomics", JSCLASS_HAS_CACHED_PROTO(JSProto_Atomics)};
77
78
static bool ReportBadArrayType(JSContext* cx) {
79
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
80
JSMSG_ATOMICS_BAD_ARRAY);
81
return false;
82
}
83
84
static bool ReportOutOfRange(JSContext* cx) {
85
// Use JSMSG_BAD_INDEX here, it is what ToIndex uses for some cases that it
86
// reports directly.
87
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
88
return false;
89
}
90
91
static bool GetSharedTypedArray(JSContext* cx, HandleValue v, bool waitable,
92
MutableHandle<TypedArrayObject*> viewp) {
93
if (!v.isObject()) {
94
return ReportBadArrayType(cx);
95
}
96
if (!v.toObject().is<TypedArrayObject>()) {
97
return ReportBadArrayType(cx);
98
}
99
viewp.set(&v.toObject().as<TypedArrayObject>());
100
if (!viewp->isSharedMemory()) {
101
return ReportBadArrayType(cx);
102
}
103
if (waitable) {
104
switch (viewp->type()) {
105
case Scalar::Int32:
106
case Scalar::BigInt64:
107
break;
108
default:
109
return ReportBadArrayType(cx);
110
}
111
} else {
112
switch (viewp->type()) {
113
case Scalar::Int8:
114
case Scalar::Uint8:
115
case Scalar::Int16:
116
case Scalar::Uint16:
117
case Scalar::Int32:
118
case Scalar::Uint32:
119
case Scalar::BigInt64:
120
case Scalar::BigUint64:
121
break;
122
default:
123
return ReportBadArrayType(cx);
124
}
125
}
126
return true;
127
}
128
129
static bool GetTypedArrayIndex(JSContext* cx, HandleValue v,
130
Handle<TypedArrayObject*> view,
131
uint32_t* offset) {
132
uint64_t index;
133
if (!ToIndex(cx, v, &index)) {
134
return false;
135
}
136
if (index >= view->length()) {
137
return ReportOutOfRange(cx);
138
}
139
*offset = uint32_t(index);
140
return true;
141
}
142
143
template <typename T>
144
struct ArrayOps {
145
static JS::Result<T> convertValue(JSContext* cx, HandleValue v) {
146
int32_t n;
147
if (!ToInt32(cx, v, &n)) {
148
return cx->alreadyReportedError();
149
}
150
return (T)n;
151
}
152
153
static JS::Result<T> convertValue(JSContext* cx, HandleValue v,
154
MutableHandleValue result) {
155
double d;
156
if (!ToInteger(cx, v, &d)) {
157
return cx->alreadyReportedError();
158
}
159
result.setNumber(d);
160
return (T)JS::ToInt32(d);
161
}
162
163
static JS::Result<> storeResult(JSContext* cx, T v,
164
MutableHandleValue result) {
165
result.setInt32(v);
166
return Ok();
167
}
168
};
169
170
template <>
171
JS::Result<> ArrayOps<uint32_t>::storeResult(JSContext* cx, uint32_t v,
172
MutableHandleValue result) {
173
result.setNumber(v);
174
return Ok();
175
}
176
177
template <>
178
struct ArrayOps<int64_t> {
179
static JS::Result<int64_t> convertValue(JSContext* cx, HandleValue v) {
180
BigInt* bi = ToBigInt(cx, v);
181
if (!bi) {
182
return cx->alreadyReportedError();
183
}
184
return BigInt::toInt64(bi);
185
}
186
187
static JS::Result<int64_t> convertValue(JSContext* cx, HandleValue v,
188
MutableHandleValue result) {
189
BigInt* bi = ToBigInt(cx, v);
190
if (!bi) {
191
return cx->alreadyReportedError();
192
}
193
result.setBigInt(bi);
194
return BigInt::toInt64(bi);
195
}
196
197
static JS::Result<> storeResult(JSContext* cx, int64_t v,
198
MutableHandleValue result) {
199
BigInt* bi = BigInt::createFromInt64(cx, v);
200
if (!bi) {
201
return cx->alreadyReportedError();
202
}
203
result.setBigInt(bi);
204
return Ok();
205
}
206
};
207
208
template <>
209
struct ArrayOps<uint64_t> {
210
static JS::Result<uint64_t> convertValue(JSContext* cx, HandleValue v) {
211
BigInt* bi = ToBigInt(cx, v);
212
if (!bi) {
213
return cx->alreadyReportedError();
214
}
215
return BigInt::toUint64(bi);
216
}
217
218
static JS::Result<uint64_t> convertValue(JSContext* cx, HandleValue v,
219
MutableHandleValue result) {
220
BigInt* bi = ToBigInt(cx, v);
221
if (!bi) {
222
return cx->alreadyReportedError();
223
}
224
result.setBigInt(bi);
225
return BigInt::toUint64(bi);
226
}
227
228
static JS::Result<> storeResult(JSContext* cx, uint64_t v,
229
MutableHandleValue result) {
230
BigInt* bi = BigInt::createFromUint64(cx, v);
231
if (!bi) {
232
return cx->alreadyReportedError();
233
}
234
result.setBigInt(bi);
235
return Ok();
236
}
237
};
238
239
template <template <typename> class F, typename... Args>
240
bool perform(JSContext* cx, HandleValue objv, HandleValue idxv, Args... args) {
241
Rooted<TypedArrayObject*> view(cx, nullptr);
242
if (!GetSharedTypedArray(cx, objv, false, &view)) {
243
return false;
244
}
245
uint32_t offset;
246
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
247
return false;
248
}
249
SharedMem<void*> viewData = view->dataPointerShared();
250
switch (view->type()) {
251
case Scalar::Int8:
252
return F<int8_t>::run(cx, viewData.cast<int8_t*>() + offset, args...);
253
case Scalar::Uint8:
254
return F<uint8_t>::run(cx, viewData.cast<uint8_t*>() + offset, args...);
255
case Scalar::Int16:
256
return F<int16_t>::run(cx, viewData.cast<int16_t*>() + offset, args...);
257
case Scalar::Uint16:
258
return F<uint16_t>::run(cx, viewData.cast<uint16_t*>() + offset, args...);
259
case Scalar::Int32:
260
return F<int32_t>::run(cx, viewData.cast<int32_t*>() + offset, args...);
261
case Scalar::Uint32:
262
return F<uint32_t>::run(cx, viewData.cast<uint32_t*>() + offset, args...);
263
case Scalar::BigInt64:
264
return F<int64_t>::run(cx, viewData.cast<int64_t*>() + offset, args...);
265
case Scalar::BigUint64:
266
return F<uint64_t>::run(cx, viewData.cast<uint64_t*>() + offset, args...);
267
case Scalar::Float32:
268
case Scalar::Float64:
269
case Scalar::Uint8Clamped:
270
case Scalar::MaxTypedArrayViewType:
271
case Scalar::Int64:
272
break;
273
}
274
MOZ_CRASH("Unsupported TypedArray type");
275
}
276
277
template <typename T>
278
struct DoCompareExchange {
279
static bool run(JSContext* cx, SharedMem<T*> addr, HandleValue oldv,
280
HandleValue newv, MutableHandleValue result) {
281
using Ops = ArrayOps<T>;
282
T oldval;
283
JS_TRY_VAR_OR_RETURN_FALSE(cx, oldval, Ops::convertValue(cx, oldv));
284
T newval;
285
JS_TRY_VAR_OR_RETURN_FALSE(cx, newval, Ops::convertValue(cx, newv));
286
287
oldval = jit::AtomicOperations::compareExchangeSeqCst(addr, oldval, newval);
288
289
JS_TRY_OR_RETURN_FALSE(cx, Ops::storeResult(cx, oldval, result));
290
return true;
291
}
292
};
293
294
bool js::atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp) {
295
CallArgs args = CallArgsFromVp(argc, vp);
296
return perform<DoCompareExchange>(cx, args.get(0), args.get(1), args.get(2),
297
args.get(3), args.rval());
298
}
299
300
template <typename T>
301
struct DoLoad {
302
static bool run(JSContext* cx, SharedMem<T*> addr,
303
MutableHandleValue result) {
304
using Ops = ArrayOps<T>;
305
T v = jit::AtomicOperations::loadSeqCst(addr);
306
JS_TRY_OR_RETURN_FALSE(cx, Ops::storeResult(cx, v, result));
307
return true;
308
}
309
};
310
311
bool js::atomics_load(JSContext* cx, unsigned argc, Value* vp) {
312
CallArgs args = CallArgsFromVp(argc, vp);
313
return perform<DoLoad>(cx, args.get(0), args.get(1), args.rval());
314
}
315
316
template <typename T>
317
struct DoExchange {
318
static bool run(JSContext* cx, SharedMem<T*> addr, HandleValue valv,
319
MutableHandleValue result) {
320
using Ops = ArrayOps<T>;
321
T value;
322
JS_TRY_VAR_OR_RETURN_FALSE(cx, value, Ops::convertValue(cx, valv));
323
value = jit::AtomicOperations::exchangeSeqCst(addr, value);
324
JS_TRY_OR_RETURN_FALSE(cx, Ops::storeResult(cx, value, result));
325
return true;
326
}
327
};
328
329
template <typename T>
330
struct DoStore {
331
static bool run(JSContext* cx, SharedMem<T*> addr, HandleValue valv,
332
MutableHandleValue result) {
333
using Ops = ArrayOps<T>;
334
T value;
335
JS_TRY_VAR_OR_RETURN_FALSE(cx, value, Ops::convertValue(cx, valv, result));
336
jit::AtomicOperations::storeSeqCst(addr, value);
337
return true;
338
}
339
};
340
341
bool js::atomics_store(JSContext* cx, unsigned argc, Value* vp) {
342
CallArgs args = CallArgsFromVp(argc, vp);
343
return perform<DoStore>(cx, args.get(0), args.get(1), args.get(2),
344
args.rval());
345
}
346
347
bool js::atomics_exchange(JSContext* cx, unsigned argc, Value* vp) {
348
CallArgs args = CallArgsFromVp(argc, vp);
349
return perform<DoExchange>(cx, args.get(0), args.get(1), args.get(2),
350
args.rval());
351
}
352
353
template <typename Operate>
354
struct DoBinopWithOperation {
355
template <typename T>
356
struct DoBinop {
357
static bool run(JSContext* cx, SharedMem<T*> addr, HandleValue valv,
358
MutableHandleValue result) {
359
using Ops = ArrayOps<T>;
360
T v;
361
JS_TRY_VAR_OR_RETURN_FALSE(cx, v, Ops::convertValue(cx, valv));
362
v = Operate::operate(addr, v);
363
JS_TRY_OR_RETURN_FALSE(cx, Ops::storeResult(cx, v, result));
364
return true;
365
}
366
};
367
};
368
369
template <typename Operate>
370
static bool AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv,
371
HandleValue valv, MutableHandleValue r) {
372
return perform<DoBinopWithOperation<Operate>::template DoBinop>(
373
cx, objv, idxv, valv, r);
374
}
375
376
#define INTEGRAL_TYPES_FOR_EACH(NAME) \
377
static int8_t operate(SharedMem<int8_t*> addr, int8_t v) { \
378
return NAME(addr, v); \
379
} \
380
static uint8_t operate(SharedMem<uint8_t*> addr, uint8_t v) { \
381
return NAME(addr, v); \
382
} \
383
static int16_t operate(SharedMem<int16_t*> addr, int16_t v) { \
384
return NAME(addr, v); \
385
} \
386
static uint16_t operate(SharedMem<uint16_t*> addr, uint16_t v) { \
387
return NAME(addr, v); \
388
} \
389
static int32_t operate(SharedMem<int32_t*> addr, int32_t v) { \
390
return NAME(addr, v); \
391
} \
392
static uint32_t operate(SharedMem<uint32_t*> addr, uint32_t v) { \
393
return NAME(addr, v); \
394
} \
395
static int64_t operate(SharedMem<int64_t*> addr, int64_t v) { \
396
return NAME(addr, v); \
397
} \
398
static uint64_t operate(SharedMem<uint64_t*> addr, uint64_t v) { \
399
return NAME(addr, v); \
400
}
401
402
class PerformAdd {
403
public:
404
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAddSeqCst)
405
};
406
407
bool js::atomics_add(JSContext* cx, unsigned argc, Value* vp) {
408
CallArgs args = CallArgsFromVp(argc, vp);
409
return AtomicsBinop<PerformAdd>(cx, args.get(0), args.get(1), args.get(2),
410
args.rval());
411
}
412
413
class PerformSub {
414
public:
415
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchSubSeqCst)
416
};
417
418
bool js::atomics_sub(JSContext* cx, unsigned argc, Value* vp) {
419
CallArgs args = CallArgsFromVp(argc, vp);
420
return AtomicsBinop<PerformSub>(cx, args.get(0), args.get(1), args.get(2),
421
args.rval());
422
}
423
424
class PerformAnd {
425
public:
426
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAndSeqCst)
427
};
428
429
bool js::atomics_and(JSContext* cx, unsigned argc, Value* vp) {
430
CallArgs args = CallArgsFromVp(argc, vp);
431
return AtomicsBinop<PerformAnd>(cx, args.get(0), args.get(1), args.get(2),
432
args.rval());
433
}
434
435
class PerformOr {
436
public:
437
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchOrSeqCst)
438
};
439
440
bool js::atomics_or(JSContext* cx, unsigned argc, Value* vp) {
441
CallArgs args = CallArgsFromVp(argc, vp);
442
return AtomicsBinop<PerformOr>(cx, args.get(0), args.get(1), args.get(2),
443
args.rval());
444
}
445
446
class PerformXor {
447
public:
448
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchXorSeqCst)
449
};
450
451
bool js::atomics_xor(JSContext* cx, unsigned argc, Value* vp) {
452
CallArgs args = CallArgsFromVp(argc, vp);
453
return AtomicsBinop<PerformXor>(cx, args.get(0), args.get(1), args.get(2),
454
args.rval());
455
}
456
457
bool js::atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp) {
458
CallArgs args = CallArgsFromVp(argc, vp);
459
HandleValue v = args.get(0);
460
int32_t size;
461
if (v.isInt32()) {
462
size = v.toInt32();
463
} else {
464
double dsize;
465
if (!ToInteger(cx, v, &dsize)) {
466
return false;
467
}
468
if (!mozilla::NumberIsInt32(dsize, &size)) {
469
args.rval().setBoolean(false);
470
return true;
471
}
472
}
473
args.rval().setBoolean(jit::AtomicOperations::isLockfreeJS(size));
474
return true;
475
}
476
477
namespace js {
478
479
// Represents one waiting worker.
480
//
481
// The type is declared opaque in SharedArrayObject.h. Instances of
482
// js::FutexWaiter are stack-allocated and linked onto a list across a
483
// call to FutexThread::wait().
484
//
485
// The 'waiters' field of the SharedArrayRawBuffer points to the highest
486
// priority waiter in the list, and lower priority nodes are linked through
487
// the 'lower_pri' field. The 'back' field goes the other direction.
488
// The list is circular, so the 'lower_pri' field of the lowest priority
489
// node points to the first node in the list. The list has no dedicated
490
// header node.
491
492
class FutexWaiter {
493
public:
494
FutexWaiter(uint32_t offset, JSContext* cx)
495
: offset(offset), cx(cx), lower_pri(nullptr), back(nullptr) {}
496
497
uint32_t offset; // int32 element index within the SharedArrayBuffer
498
JSContext* cx; // The waiting thread
499
FutexWaiter* lower_pri; // Lower priority nodes in circular doubly-linked
500
// list of waiters
501
FutexWaiter* back; // Other direction
502
};
503
504
class AutoLockFutexAPI {
505
// We have to wrap this in a Maybe because of the way loading
506
// mozilla::Atomic pointers works.
507
mozilla::Maybe<js::UniqueLock<js::Mutex>> unique_;
508
509
public:
510
AutoLockFutexAPI() {
511
js::Mutex* lock = FutexThread::lock_;
512
unique_.emplace(*lock);
513
}
514
515
~AutoLockFutexAPI() { unique_.reset(); }
516
517
js::UniqueLock<js::Mutex>& unique() { return *unique_; }
518
};
519
520
} // namespace js
521
522
template <typename T>
523
static FutexThread::WaitResult AtomicsWait(
524
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset, T value,
525
const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
526
// Validation and other guards should ensure that this does not happen.
527
MOZ_ASSERT(sarb, "wait is only applicable to shared memory");
528
529
if (!cx->fx.canWait()) {
530
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
531
JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
532
return FutexThread::WaitResult::Error;
533
}
534
535
SharedMem<T*> addr =
536
sarb->dataPointerShared().cast<T*>() + (byteOffset / sizeof(T));
537
538
// This lock also protects the "waiters" field on SharedArrayRawBuffer,
539
// and it provides the necessary memory fence.
540
AutoLockFutexAPI lock;
541
542
if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
543
return FutexThread::WaitResult::NotEqual;
544
}
545
546
FutexWaiter w(byteOffset, cx);
547
if (FutexWaiter* waiters = sarb->waiters()) {
548
w.lower_pri = waiters;
549
w.back = waiters->back;
550
waiters->back->lower_pri = &w;
551
waiters->back = &w;
552
} else {
553
w.lower_pri = w.back = &w;
554
sarb->setWaiters(&w);
555
}
556
557
FutexThread::WaitResult retval = cx->fx.wait(cx, lock.unique(), timeout);
558
559
if (w.lower_pri == &w) {
560
sarb->setWaiters(nullptr);
561
} else {
562
w.lower_pri->back = w.back;
563
w.back->lower_pri = w.lower_pri;
564
if (sarb->waiters() == &w) {
565
sarb->setWaiters(w.lower_pri);
566
}
567
}
568
569
return retval;
570
}
571
572
FutexThread::WaitResult js::atomics_wait_impl(
573
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset,
574
int32_t value, const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
575
return AtomicsWait(cx, sarb, byteOffset, value, timeout);
576
}
577
578
FutexThread::WaitResult js::atomics_wait_impl(
579
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset,
580
int64_t value, const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
581
return AtomicsWait(cx, sarb, byteOffset, value, timeout);
582
}
583
584
template <typename T>
585
static bool DoAtomicsWait(JSContext* cx, Handle<TypedArrayObject*> view,
586
uint32_t offset, T value, HandleValue timeoutv,
587
MutableHandleValue r) {
588
mozilla::Maybe<mozilla::TimeDuration> timeout;
589
if (!timeoutv.isUndefined()) {
590
double timeout_ms;
591
if (!ToNumber(cx, timeoutv, &timeout_ms)) {
592
return false;
593
}
594
if (!mozilla::IsNaN(timeout_ms)) {
595
if (timeout_ms < 0) {
596
timeout = mozilla::Some(mozilla::TimeDuration::FromSeconds(0.0));
597
} else if (!mozilla::IsInfinite(timeout_ms)) {
598
timeout =
599
mozilla::Some(mozilla::TimeDuration::FromMilliseconds(timeout_ms));
600
}
601
}
602
}
603
604
Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
605
// The computation will not overflow because range checks have been
606
// performed.
607
uint32_t byteOffset =
608
offset * sizeof(T) +
609
(view->dataPointerShared().cast<uint8_t*>().unwrap(/* arithmetic */) -
610
sab->dataPointerShared().unwrap(/* arithmetic */));
611
612
switch (atomics_wait_impl(cx, sab->rawBufferObject(), byteOffset, value,
613
timeout)) {
614
case FutexThread::WaitResult::NotEqual:
615
r.setString(cx->names().futexNotEqual);
616
return true;
617
case FutexThread::WaitResult::OK:
618
r.setString(cx->names().futexOK);
619
return true;
620
case FutexThread::WaitResult::TimedOut:
621
r.setString(cx->names().futexTimedOut);
622
return true;
623
case FutexThread::WaitResult::Error:
624
return false;
625
default:
626
MOZ_CRASH("Should not happen");
627
}
628
}
629
630
bool js::atomics_wait(JSContext* cx, unsigned argc, Value* vp) {
631
CallArgs args = CallArgsFromVp(argc, vp);
632
HandleValue objv = args.get(0);
633
HandleValue idxv = args.get(1);
634
HandleValue valv = args.get(2);
635
HandleValue timeoutv = args.get(3);
636
MutableHandleValue r = args.rval();
637
638
Rooted<TypedArrayObject*> view(cx, nullptr);
639
if (!GetSharedTypedArray(cx, objv, true, &view)) {
640
return false;
641
}
642
MOZ_ASSERT(view->type() == Scalar::Int32 || view->type() == Scalar::BigInt64);
643
644
uint32_t offset;
645
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
646
return false;
647
}
648
649
if (view->type() == Scalar::Int32) {
650
int32_t value;
651
if (!ToInt32(cx, valv, &value)) {
652
return false;
653
}
654
return DoAtomicsWait(cx, view, offset, value, timeoutv, r);
655
}
656
657
MOZ_ASSERT(view->type() == Scalar::BigInt64);
658
RootedBigInt valbi(cx, ToBigInt(cx, valv));
659
if (!valbi) {
660
return false;
661
}
662
return DoAtomicsWait(cx, view, offset, BigInt::toInt64(valbi), timeoutv, r);
663
}
664
665
int64_t js::atomics_notify_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset,
666
int64_t count) {
667
// Validation should ensure this does not happen.
668
MOZ_ASSERT(sarb, "notify is only applicable to shared memory");
669
670
AutoLockFutexAPI lock;
671
672
int64_t woken = 0;
673
674
FutexWaiter* waiters = sarb->waiters();
675
if (waiters && count) {
676
FutexWaiter* iter = waiters;
677
do {
678
FutexWaiter* c = iter;
679
iter = iter->lower_pri;
680
if (c->offset != byteOffset || !c->cx->fx.isWaiting()) {
681
continue;
682
}
683
c->cx->fx.notify(FutexThread::NotifyExplicit);
684
// Overflow will be a problem only in two cases:
685
// (1) 128-bit systems with substantially more than 2^64 bytes of
686
// memory per process, and a very lightweight
687
// Atomics.waitAsync(). Obviously a future problem.
688
// (2) Bugs.
689
MOZ_RELEASE_ASSERT(woken < INT64_MAX);
690
++woken;
691
if (count > 0) {
692
--count;
693
}
694
} while (count && iter != waiters);
695
}
696
697
return woken;
698
}
699
700
bool js::atomics_notify(JSContext* cx, unsigned argc, Value* vp) {
701
CallArgs args = CallArgsFromVp(argc, vp);
702
HandleValue objv = args.get(0);
703
HandleValue idxv = args.get(1);
704
HandleValue countv = args.get(2);
705
MutableHandleValue r = args.rval();
706
707
Rooted<TypedArrayObject*> view(cx, nullptr);
708
if (!GetSharedTypedArray(cx, objv, true, &view)) {
709
return false;
710
}
711
MOZ_ASSERT(view->type() == Scalar::Int32 || view->type() == Scalar::BigInt64);
712
uint32_t elementSize =
713
view->type() == Scalar::Int32 ? sizeof(int32_t) : sizeof(int64_t);
714
uint32_t offset;
715
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
716
return false;
717
}
718
int64_t count;
719
if (countv.isUndefined()) {
720
count = -1;
721
} else {
722
double dcount;
723
if (!ToInteger(cx, countv, &dcount)) {
724
return false;
725
}
726
if (dcount < 0.0) {
727
dcount = 0.0;
728
}
729
count = dcount < double(1ULL << 63) ? int64_t(dcount) : -1;
730
}
731
732
Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
733
// The computation will not overflow because range checks have been
734
// performed.
735
uint32_t byteOffset =
736
offset * elementSize +
737
(view->dataPointerShared().cast<uint8_t*>().unwrap(/* arithmetic */) -
738
sab->dataPointerShared().unwrap(/* arithmetic */));
739
740
r.setNumber(
741
double(atomics_notify_impl(sab->rawBufferObject(), byteOffset, count)));
742
743
return true;
744
}
745
746
/* static */
747
bool js::FutexThread::initialize() {
748
MOZ_ASSERT(!lock_);
749
lock_ = js_new<js::Mutex>(mutexid::FutexThread);
750
return lock_ != nullptr;
751
}
752
753
/* static */
754
void js::FutexThread::destroy() {
755
if (lock_) {
756
js::Mutex* lock = lock_;
757
js_delete(lock);
758
lock_ = nullptr;
759
}
760
}
761
762
/* static */
763
void js::FutexThread::lock() {
764
// Load the atomic pointer.
765
js::Mutex* lock = lock_;
766
767
lock->lock();
768
}
769
770
/* static */ mozilla::Atomic<js::Mutex*, mozilla::SequentiallyConsistent,
771
mozilla::recordreplay::Behavior::DontPreserve>
772
FutexThread::lock_;
773
774
/* static */
775
void js::FutexThread::unlock() {
776
// Load the atomic pointer.
777
js::Mutex* lock = lock_;
778
779
lock->unlock();
780
}
781
782
js::FutexThread::FutexThread()
783
: cond_(nullptr), state_(Idle), canWait_(false) {}
784
785
bool js::FutexThread::initInstance() {
786
MOZ_ASSERT(lock_);
787
cond_ = js_new<js::ConditionVariable>();
788
return cond_ != nullptr;
789
}
790
791
void js::FutexThread::destroyInstance() {
792
if (cond_) {
793
js_delete(cond_);
794
}
795
}
796
797
bool js::FutexThread::isWaiting() {
798
// When a worker is awoken for an interrupt it goes into state
799
// WaitingNotifiedForInterrupt for a short time before it actually
800
// wakes up and goes into WaitingInterrupted. In those states the
801
// worker is still waiting, and if an explicit notify arrives the
802
// worker transitions to Woken. See further comments in
803
// FutexThread::wait().
804
return state_ == Waiting || state_ == WaitingInterrupted ||
805
state_ == WaitingNotifiedForInterrupt;
806
}
807
808
FutexThread::WaitResult js::FutexThread::wait(
809
JSContext* cx, js::UniqueLock<js::Mutex>& locked,
810
const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
811
MOZ_ASSERT(&cx->fx == this);
812
MOZ_ASSERT(cx->fx.canWait());
813
MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
814
815
// Disallow waiting when a runtime is processing an interrupt.
816
// See explanation below.
817
818
if (state_ == WaitingInterrupted) {
819
UnlockGuard<Mutex> unlock(locked);
820
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
821
JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
822
return WaitResult::Error;
823
}
824
825
// Go back to Idle after returning.
826
auto onFinish = mozilla::MakeScopeExit([&] { state_ = Idle; });
827
828
const bool isTimed = timeout.isSome();
829
830
auto finalEnd = timeout.map([](const mozilla::TimeDuration& timeout) {
831
return mozilla::TimeStamp::Now() + timeout;
832
});
833
834
// 4000s is about the longest timeout slice that is guaranteed to
835
// work cross-platform.
836
auto maxSlice = mozilla::TimeDuration::FromSeconds(4000.0);
837
838
for (;;) {
839
// If we are doing a timed wait, calculate the end time for this wait
840
// slice.
841
auto sliceEnd = finalEnd.map([&](mozilla::TimeStamp& finalEnd) {
842
auto sliceEnd = mozilla::TimeStamp::Now() + maxSlice;
843
if (finalEnd < sliceEnd) {
844
sliceEnd = finalEnd;
845
}
846
return sliceEnd;
847
});
848
849
state_ = Waiting;
850
851
if (isTimed) {
852
mozilla::Unused << cond_->wait_until(locked, *sliceEnd);
853
} else {
854
cond_->wait(locked);
855
}
856
857
switch (state_) {
858
case FutexThread::Waiting:
859
// Timeout or spurious wakeup.
860
if (isTimed) {
861
auto now = mozilla::TimeStamp::Now();
862
if (now >= *finalEnd) {
863
return WaitResult::TimedOut;
864
}
865
}
866
break;
867
868
case FutexThread::Woken:
869
return WaitResult::OK;
870
871
case FutexThread::WaitingNotifiedForInterrupt:
872
// The interrupt handler may reenter the engine. In that case
873
// there are two complications:
874
//
875
// - The waiting thread is not actually waiting on the
876
// condition variable so we have to record that it
877
// should be woken when the interrupt handler returns.
878
// To that end, we flag the thread as interrupted around
879
// the interrupt and check state_ when the interrupt
880
// handler returns. A notify() call that reaches the
881
// runtime during the interrupt sets state_ to Woken.
882
//
883
// - It is in principle possible for wait() to be
884
// reentered on the same thread/runtime and waiting on the
885
// same location and to yet again be interrupted and enter
886
// the interrupt handler. In this case, it is important
887
// that when another agent notifies waiters, all waiters using
888
// the same runtime on the same location are woken in LIFO
889
// order; FIFO may be the required order, but FIFO would
890
// fail to wake up the innermost call. Interrupts are
891
// outside any spec anyway. Also, several such suspended
892
// waiters may be woken at a time.
893
//
894
// For the time being we disallow waiting from within code
895
// that runs from within an interrupt handler; this may
896
// occasionally (very rarely) be surprising but is
897
// expedient. Other solutions exist, see bug #1131943. The
898
// code that performs the check is above, at the head of
899
// this function.
900
901
state_ = WaitingInterrupted;
902
{
903
UnlockGuard<Mutex> unlock(locked);
904
if (!cx->handleInterrupt()) {
905
return WaitResult::Error;
906
}
907
}
908
if (state_ == Woken) {
909
return WaitResult::OK;
910
}
911
break;
912
913
default:
914
MOZ_CRASH("Bad FutexState in wait()");
915
}
916
}
917
}
918
919
void js::FutexThread::notify(NotifyReason reason) {
920
MOZ_ASSERT(isWaiting());
921
922
if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) &&
923
reason == NotifyExplicit) {
924
state_ = Woken;
925
return;
926
}
927
switch (reason) {
928
case NotifyExplicit:
929
state_ = Woken;
930
break;
931
case NotifyForJSInterrupt:
932
if (state_ == WaitingNotifiedForInterrupt) {
933
return;
934
}
935
state_ = WaitingNotifiedForInterrupt;
936
break;
937
default:
938
MOZ_CRASH("bad NotifyReason in FutexThread::notify()");
939
}
940
cond_->notify_all();
941
}
942
943
const JSFunctionSpec AtomicsMethods[] = {
944
JS_INLINABLE_FN("compareExchange", atomics_compareExchange, 4, 0,
945
AtomicsCompareExchange),
946
JS_INLINABLE_FN("load", atomics_load, 2, 0, AtomicsLoad),
947
JS_INLINABLE_FN("store", atomics_store, 3, 0, AtomicsStore),
948
JS_INLINABLE_FN("exchange", atomics_exchange, 3, 0, AtomicsExchange),
949
JS_INLINABLE_FN("add", atomics_add, 3, 0, AtomicsAdd),
950
JS_INLINABLE_FN("sub", atomics_sub, 3, 0, AtomicsSub),
951
JS_INLINABLE_FN("and", atomics_and, 3, 0, AtomicsAnd),
952
JS_INLINABLE_FN("or", atomics_or, 3, 0, AtomicsOr),
953
JS_INLINABLE_FN("xor", atomics_xor, 3, 0, AtomicsXor),
954
JS_INLINABLE_FN("isLockFree", atomics_isLockFree, 1, 0, AtomicsIsLockFree),
955
JS_FN("wait", atomics_wait, 4, 0),
956
JS_FN("notify", atomics_notify, 3, 0),
957
JS_FN("wake", atomics_notify, 3, 0), // Legacy name
958
JS_FS_END};
959
960
JSObject* AtomicsObject::initClass(JSContext* cx,
961
Handle<GlobalObject*> global) {
962
// Create Atomics Object.
963
RootedObject objProto(cx,
964
GlobalObject::getOrCreateObjectPrototype(cx, global));
965
if (!objProto) {
966
return nullptr;
967
}
968
RootedObject Atomics(cx, NewObjectWithGivenProto(cx, &AtomicsObject::class_,
969
objProto, SingletonObject));
970
if (!Atomics) {
971
return nullptr;
972
}
973
974
if (!JS_DefineFunctions(cx, Atomics, AtomicsMethods)) {
975
return nullptr;
976
}
977
if (!DefineToStringTag(cx, Atomics, cx->names().Atomics)) {
978
return nullptr;
979
}
980
981
RootedValue AtomicsValue(cx, ObjectValue(*Atomics));
982
983
// Everything is set up, install Atomics on the global object.
984
if (!DefineDataProperty(cx, global, cx->names().Atomics, AtomicsValue,
985
JSPROP_RESOLVING)) {
986
return nullptr;
987
}
988
989
global->setConstructor(JSProto_Atomics, AtomicsValue);
990
return Atomics;
991
}
992
993
JSObject* js::InitAtomicsClass(JSContext* cx, Handle<GlobalObject*> global) {
994
return AtomicsObject::initClass(cx, global);
995
}
996
997
#undef CXX11_ATOMICS
998
#undef GNU_ATOMICS