Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/HelperThreads.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Maybe.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Unused.h"
#include "mozilla/Utf8.h" // mozilla::Utf8Unit
#include <algorithm>
#include "frontend/BytecodeCompilation.h"
#include "frontend/CompilationStencil.h" // frontend::{CompilationStencil, ExtensibleCompilationStencil, CompilationInput, CompilationGCOutput, BorrowingCompilationStencil}
#include "frontend/ParserAtom.h" // frontend::ParserAtomsTable
#include "gc/GC.h" // gc::MergeRealms
#include "jit/IonCompileTask.h"
#include "jit/JitRuntime.h"
#include "js/ContextOptions.h" // JS::ContextOptions
#include "js/friend/StackLimits.h" // js::ReportOverRecursed
#include "js/OffThreadScriptCompilation.h" // JS::OffThreadToken, JS::OffThreadCompileCallback
#include "js/SourceText.h"
#include "js/UniquePtr.h"
#include "js/Utility.h"
#include "threading/CpuCount.h"
#include "util/NativeStack.h"
#include "vm/ErrorReporting.h"
#include "vm/HelperThreadState.h"
#include "vm/MutexIDs.h"
#include "vm/SharedImmutableStringsCache.h"
#include "vm/Time.h"
#include "vm/TraceLogging.h"
#include "vm/Xdr.h"
#include "wasm/WasmGenerator.h"
#include "debugger/DebugAPI-inl.h"
#include "gc/ArenaList-inl.h"
#include "vm/JSContext-inl.h"
#include "vm/JSObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/NativeObject-inl.h"
#include "vm/Realm-inl.h"
using namespace js;
using mozilla::Maybe;
using mozilla::TimeDuration;
using mozilla::TimeStamp;
using mozilla::Unused;
using mozilla::Utf8Unit;
using JS::CompileOptions;
using JS::ReadOnlyCompileOptions;
namespace js {
Mutex gHelperThreadLock(mutexid::GlobalHelperThreadState);
GlobalHelperThreadState* gHelperThreadState = nullptr;
} // namespace js
// These macros are identical in function to the same-named ones in
// GeckoProfiler.h, but they are defined separately because SpiderMonkey can't
// use GeckoProfiler.h.
#define PROFILER_RAII_PASTE(id, line) id##line
#define PROFILER_RAII_EXPAND(id, line) PROFILER_RAII_PASTE(id, line)
#define PROFILER_RAII PROFILER_RAII_EXPAND(raiiObject, __LINE__)
#define AUTO_PROFILER_LABEL(label, categoryPair) \
HelperThread::AutoProfilerLabel PROFILER_RAII( \
this, label, JS::ProfilingCategoryPair::categoryPair)
bool js::CreateHelperThreadsState() {
MOZ_ASSERT(!gHelperThreadState);
UniquePtr<GlobalHelperThreadState> helperThreadState =
MakeUnique<GlobalHelperThreadState>();
if (!helperThreadState) {
return false;
}
gHelperThreadState = helperThreadState.release();
if (!gHelperThreadState->ensureContextList(gHelperThreadState->threadCount)) {
js_delete(gHelperThreadState);
gHelperThreadState = nullptr;
return false;
}
return true;
}
void js::DestroyHelperThreadsState() {
if (!gHelperThreadState) {
return;
}
gHelperThreadState->finish();
js_delete(gHelperThreadState);
gHelperThreadState = nullptr;
}
bool js::EnsureHelperThreadsInitialized() {
MOZ_ASSERT(gHelperThreadState);
return gHelperThreadState->ensureInitialized();
}
static size_t ClampDefaultCPUCount(size_t cpuCount) {
// It's extremely rare for SpiderMonkey to have more than a few cores worth
// of work. At higher core counts, performance can even decrease due to NUMA
// (and SpiderMonkey's lack of NUMA-awareness), contention, and general lack
// of optimization for high core counts. So to avoid wasting thread stack
// resources (and cluttering gdb and core dumps), clamp to 8 cores for now.
return std::min<size_t>(cpuCount, 8);
}
static size_t ThreadCountForCPUCount(size_t cpuCount) {
// We need at least two threads for tier-2 wasm compilations, because
// there's a master task that holds a thread while other threads do the
// compilation.
return std::max<size_t>(cpuCount, 2);
}
bool js::SetFakeCPUCount(size_t count) {
// This must be called before the threads have been initialized.
AutoLockHelperThreadState lock;
MOZ_ASSERT(HelperThreadState().threads(lock).empty());
HelperThreadState().cpuCount = count;
HelperThreadState().threadCount = ThreadCountForCPUCount(count);
return true;
}
void JS::SetProfilingThreadCallbacks(
JS::RegisterThreadCallback registerThread,
JS::UnregisterThreadCallback unregisterThread) {
HelperThreadState().registerThread = registerThread;
HelperThreadState().unregisterThread = unregisterThread;
}
bool js::StartOffThreadWasmCompile(wasm::CompileTask* task,
wasm::CompileMode mode) {
return HelperThreadState().submitTask(task, mode);
}
bool GlobalHelperThreadState::submitTask(wasm::CompileTask* task,
wasm::CompileMode mode) {
AutoLockHelperThreadState lock;
if (!wasmWorklist(lock, mode).pushBack(task)) {
return false;
}
dispatch(lock);
return true;
}
size_t js::RemovePendingWasmCompileTasks(
const wasm::CompileTaskState& taskState, wasm::CompileMode mode,
const AutoLockHelperThreadState& lock) {
wasm::CompileTaskPtrFifo& worklist =
HelperThreadState().wasmWorklist(lock, mode);
return worklist.eraseIf([&taskState](wasm::CompileTask* task) {
return &task->state == &taskState;
});
}
void js::StartOffThreadWasmTier2Generator(wasm::UniqueTier2GeneratorTask task) {
Unused << HelperThreadState().submitTask(std::move(task));
}
bool GlobalHelperThreadState::submitTask(wasm::UniqueTier2GeneratorTask task) {
MOZ_ASSERT(CanUseExtraThreads());
AutoLockHelperThreadState lock;
if (!wasmTier2GeneratorWorklist(lock).append(task.get())) {
return false;
}
Unused << task.release();
dispatch(lock);
return true;
}
static void CancelOffThreadWasmTier2GeneratorLocked(
AutoLockHelperThreadState& lock) {
if (HelperThreadState().threads(lock).empty()) {
return;
}
// Remove pending tasks from the tier2 generator worklist and cancel and
// delete them.
{
wasm::Tier2GeneratorTaskPtrVector& worklist =
HelperThreadState().wasmTier2GeneratorWorklist(lock);
for (size_t i = 0; i < worklist.length(); i++) {
wasm::Tier2GeneratorTask* task = worklist[i];
HelperThreadState().remove(worklist, &i);
js_delete(task);
}
}
// There is at most one running Tier2Generator task and we assume that
// below.
static_assert(GlobalHelperThreadState::MaxTier2GeneratorTasks == 1,
"code must be generalized");
// If there is a running Tier2 generator task, shut it down in a predictable
// way. The task will be deleted by the normal deletion logic.
for (auto* helper : HelperThreadState().helperTasks(lock)) {
if (helper->is<wasm::Tier2GeneratorTask>()) {
// Set a flag that causes compilation to shortcut itself.
helper->as<wasm::Tier2GeneratorTask>()->cancel();
// Wait for the generator task to finish. This avoids a shutdown race
// where the shutdown code is trying to shut down helper threads and the
// ongoing tier2 compilation is trying to finish, which requires it to
// have access to helper threads.
uint32_t oldFinishedCount =
HelperThreadState().wasmTier2GeneratorsFinished(lock);
while (HelperThreadState().wasmTier2GeneratorsFinished(lock) ==
oldFinishedCount) {
HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
}
// At most one of these tasks.
break;
}
}
}
void js::CancelOffThreadWasmTier2Generator() {
AutoLockHelperThreadState lock;
CancelOffThreadWasmTier2GeneratorLocked(lock);
}
bool js::StartOffThreadIonCompile(jit::IonCompileTask* task,
const AutoLockHelperThreadState& lock) {
return HelperThreadState().submitTask(task, lock);
}
bool GlobalHelperThreadState::submitTask(
jit::IonCompileTask* task, const AutoLockHelperThreadState& locked) {
MOZ_ASSERT(CanUseExtraThreads());
if (!ionWorklist(locked).append(task)) {
return false;
}
// The build is moving off-thread. Freeze the LifoAlloc to prevent any
// unwanted mutations.
task->alloc().lifoAlloc()->setReadOnly();
dispatch(locked);
return true;
}
bool js::StartOffThreadIonFree(jit::IonCompileTask* task,
const AutoLockHelperThreadState& lock) {
js::UniquePtr<jit::IonFreeTask> freeTask =
js::MakeUnique<jit::IonFreeTask>(task);
if (!freeTask) {
return false;
}
return HelperThreadState().submitTask(std::move(freeTask), lock);
}
bool GlobalHelperThreadState::submitTask(
UniquePtr<jit::IonFreeTask> task, const AutoLockHelperThreadState& locked) {
MOZ_ASSERT(CanUseExtraThreads());
if (!ionFreeList(locked).append(std::move(task))) {
return false;
}
dispatch(locked);
return true;
}
/*
* Move an IonCompilationTask for which compilation has either finished, failed,
* or been cancelled into the global finished compilation list. All off thread
* compilations which are started must eventually be finished.
*/
void js::FinishOffThreadIonCompile(jit::IonCompileTask* task,
const AutoLockHelperThreadState& lock) {
AutoEnterOOMUnsafeRegion oomUnsafe;
if (!HelperThreadState().ionFinishedList(lock).append(task)) {
oomUnsafe.crash("FinishOffThreadIonCompile");
}
task->script()
->runtimeFromAnyThread()
->jitRuntime()
->numFinishedOffThreadTasksRef(lock)++;
}
static JSRuntime* GetSelectorRuntime(const CompilationSelector& selector) {
struct Matcher {
JSRuntime* operator()(JSScript* script) {
return script->runtimeFromMainThread();
}
JSRuntime* operator()(Realm* realm) {
return realm->runtimeFromMainThread();
}
JSRuntime* operator()(Zone* zone) { return zone->runtimeFromMainThread(); }
JSRuntime* operator()(ZonesInState zbs) { return zbs.runtime; }
JSRuntime* operator()(JSRuntime* runtime) { return runtime; }
};
return selector.match(Matcher());
}
static bool JitDataStructuresExist(const CompilationSelector& selector) {
struct Matcher {
bool operator()(JSScript* script) { return !!script->realm()->jitRealm(); }
bool operator()(Realm* realm) { return !!realm->jitRealm(); }
bool operator()(Zone* zone) { return !!zone->jitZone(); }
bool operator()(ZonesInState zbs) { return zbs.runtime->hasJitRuntime(); }
bool operator()(JSRuntime* runtime) { return runtime->hasJitRuntime(); }
};
return selector.match(Matcher());
}
static bool IonCompileTaskMatches(const CompilationSelector& selector,
jit::IonCompileTask* task) {
struct TaskMatches {
jit::IonCompileTask* task_;
bool operator()(JSScript* script) { return script == task_->script(); }
bool operator()(Realm* realm) { return realm == task_->script()->realm(); }
bool operator()(Zone* zone) {
return zone == task_->script()->zoneFromAnyThread();
}
bool operator()(JSRuntime* runtime) {
return runtime == task_->script()->runtimeFromAnyThread();
}
bool operator()(ZonesInState zbs) {
return zbs.runtime == task_->script()->runtimeFromAnyThread() &&
zbs.state == task_->script()->zoneFromAnyThread()->gcState();
}
};
return selector.match(TaskMatches{task});
}
static void CancelOffThreadIonCompileLocked(const CompilationSelector& selector,
AutoLockHelperThreadState& lock) {
if (HelperThreadState().threads(lock).empty()) {
return;
}
/* Cancel any pending entries for which processing hasn't started. */
GlobalHelperThreadState::IonCompileTaskVector& worklist =
HelperThreadState().ionWorklist(lock);
for (size_t i = 0; i < worklist.length(); i++) {
jit::IonCompileTask* task = worklist[i];
if (IonCompileTaskMatches(selector, task)) {
// Once finished, tasks are added to a Linked list which is
// allocated with the IonCompileTask class. The IonCompileTask is
// allocated in the LifoAlloc so we need the LifoAlloc to be mutable.
worklist[i]->alloc().lifoAlloc()->setReadWrite();
FinishOffThreadIonCompile(task, lock);
HelperThreadState().remove(worklist, &i);
}
}
/* Wait for in progress entries to finish up. */
bool cancelled;
do {
cancelled = false;
for (auto* helper : HelperThreadState().helperTasks(lock)) {
if (!helper->is<jit::IonCompileTask>()) {
continue;
}
jit::IonCompileTask* ionCompileTask = helper->as<jit::IonCompileTask>();
if (IonCompileTaskMatches(selector, ionCompileTask)) {
ionCompileTask->mirGen().cancel();
cancelled = true;
}
}
if (cancelled) {
HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
}
} while (cancelled);
/* Cancel code generation for any completed entries. */
GlobalHelperThreadState::IonCompileTaskVector& finished =
HelperThreadState().ionFinishedList(lock);
for (size_t i = 0; i < finished.length(); i++) {
jit::IonCompileTask* task = finished[i];
if (IonCompileTaskMatches(selector, task)) {
JSRuntime* rt = task->script()->runtimeFromAnyThread();
rt->jitRuntime()->numFinishedOffThreadTasksRef(lock)--;
jit::FinishOffThreadTask(rt, task, lock);
HelperThreadState().remove(finished, &i);
}
}
/* Cancel lazy linking for pending tasks (attached to the ionScript). */
JSRuntime* runtime = GetSelectorRuntime(selector);
jit::IonCompileTask* task =
runtime->jitRuntime()->ionLazyLinkList(runtime).getFirst();
while (task) {
jit::IonCompileTask* next = task->getNext();
if (IonCompileTaskMatches(selector, task)) {
jit::FinishOffThreadTask(runtime, task, lock);
}
task = next;
}
}
void js::CancelOffThreadIonCompile(const CompilationSelector& selector) {
if (!JitDataStructuresExist(selector)) {
return;
}
AutoLockHelperThreadState lock;
CancelOffThreadIonCompileLocked(selector, lock);
}
#ifdef DEBUG
bool js::HasOffThreadIonCompile(Realm* realm) {
AutoLockHelperThreadState lock;
if (HelperThreadState().threads(lock).empty()) {
return false;
}
GlobalHelperThreadState::IonCompileTaskVector& worklist =
HelperThreadState().ionWorklist(lock);
for (size_t i = 0; i < worklist.length(); i++) {
jit::IonCompileTask* task = worklist[i];
if (task->script()->realm() == realm) {
return true;
}
}
for (auto* helper : HelperThreadState().helperTasks(lock)) {
if (helper->is<jit::IonCompileTask>() &&
helper->as<jit::IonCompileTask>()->script()->realm() == realm) {
return true;
}
}
GlobalHelperThreadState::IonCompileTaskVector& finished =
HelperThreadState().ionFinishedList(lock);
for (size_t i = 0; i < finished.length(); i++) {
jit::IonCompileTask* task = finished[i];
if (task->script()->realm() == realm) {
return true;
}
}
JSRuntime* rt = realm->runtimeFromMainThread();
jit::IonCompileTask* task = rt->jitRuntime()->ionLazyLinkList(rt).getFirst();
while (task) {
if (task->script()->realm() == realm) {
return true;
}
task = task->getNext();
}
return false;
}
#endif
struct MOZ_RAII AutoSetContextParse {
explicit AutoSetContextParse(ParseTask* task) {
TlsContext.get()->setParseTask(task);
}
~AutoSetContextParse() { TlsContext.get()->setParseTask(nullptr); }
};
// We want our default stack size limit to be approximately 2MB, to be safe, but
// expect most threads to use much less. On Linux, however, requesting a stack
// of 2MB or larger risks the kernel allocating an entire 2MB huge page for it
// on first access, which we do not want. To avoid this possibility, we subtract
// 2 standard VM page sizes from our default.
static const uint32_t kDefaultHelperStackSize = 2048 * 1024 - 2 * 4096;
static const uint32_t kDefaultHelperStackQuota = 1800 * 1024;
// TSan enforces a minimum stack size that's just slightly larger than our
// default helper stack size. It does this to store blobs of TSan-specific
// data on each thread's stack. Unfortunately, that means that even though
// we'll actually receive a larger stack than we requested, the effective
// usable space of that stack is significantly less than what we expect.
// To offset TSan stealing our stack space from underneath us, double the
// default.
//
// Note that we don't need this for ASan/MOZ_ASAN because ASan doesn't
// require all the thread-specific state that TSan does.
#if defined(MOZ_TSAN)
static const uint32_t HELPER_STACK_SIZE = 2 * kDefaultHelperStackSize;
static const uint32_t HELPER_STACK_QUOTA = 2 * kDefaultHelperStackQuota;
#else
static const uint32_t HELPER_STACK_SIZE = kDefaultHelperStackSize;
static const uint32_t HELPER_STACK_QUOTA = kDefaultHelperStackQuota;
#endif
AutoSetHelperThreadContext::AutoSetHelperThreadContext(
AutoLockHelperThreadState& lock)
: lock(lock) {
cx = HelperThreadState().getFirstUnusedContext(lock);
MOZ_ASSERT(cx);
cx->setHelperThread(lock);
cx->nativeStackBase = GetNativeStackBase();
// When we set the JSContext, we need to reset the computed stack limits for
// the current thread, so we also set the native stack quota.
JS_SetNativeStackQuota(cx, HELPER_STACK_QUOTA);
}
AutoSetHelperThreadContext::~AutoSetHelperThreadContext() {
cx->tempLifoAlloc().releaseAll();
if (cx->shouldFreeUnusedMemory()) {
cx->tempLifoAlloc().freeAll();
cx->setFreeUnusedMemory(false);
}
cx->clearHelperThread(lock);
cx = nullptr;
}
static const JSClass parseTaskGlobalClass = {"internal-parse-task-global",
JSCLASS_GLOBAL_FLAGS,
&JS::DefaultGlobalClassOps};
ParseTask::ParseTask(ParseTaskKind kind, JSContext* cx,
JS::OffThreadCompileCallback callback, void* callbackData)
: kind(kind),
options(cx),
parseGlobal(nullptr),
callback(callback),
callbackData(callbackData),
overRecursed(false),
outOfMemory(false) {
// Note that |cx| is the main thread context here but the parse task will
// run with a different, helper thread, context.
MOZ_ASSERT(!cx->isHelperThreadContext());
MOZ_ALWAYS_TRUE(scripts.reserve(scripts.capacity()));
MOZ_ALWAYS_TRUE(sourceObjects.reserve(sourceObjects.capacity()));
}
bool ParseTask::init(JSContext* cx, const ReadOnlyCompileOptions& options,
JSObject* global) {
MOZ_ASSERT(!cx->isHelperThreadContext());
if (!this->options.copy(cx, options)) {
return false;
}
runtime = cx->runtime();
parseGlobal = global;
return true;
}
void ParseTask::activate(JSRuntime* rt) {
rt->addParseTaskRef();
if (parseGlobal) {
rt->setUsedByHelperThread(parseGlobal->zone());
}
}
ParseTask::~ParseTask() = default;
void ParseTask::trace(JSTracer* trc) {
if (runtime != trc->runtime()) {
return;
}
if (parseGlobal) {
Zone* zone = MaybeForwarded(parseGlobal)->zoneFromAnyThread();
if (zone->usedByHelperThread()) {
MOZ_ASSERT(!zone->isCollecting());
return;
}
}
TraceNullableRoot(trc, &parseGlobal, "ParseTask::parseGlobal");
scripts.trace(trc);
sourceObjects.trace(trc);
if (stencilInput_) {
stencilInput_->trace(trc);
}
gcOutput_.trace(trc);
}
size_t ParseTask::sizeOfExcludingThis(
mozilla::MallocSizeOf mallocSizeOf) const {
size_t stencilInputSize =
stencilInput_ ? stencilInput_->sizeOfIncludingThis(mallocSizeOf) : 0;
size_t stencilSize =
stencil_ ? stencil_->sizeOfIncludingThis(mallocSizeOf) : 0;
size_t extensibleStencilSize =
extensibleStencil_ ? extensibleStencil_->sizeOfIncludingThis(mallocSizeOf)
: 0;
// TODO: 'errors' requires adding support to `CompileError`. They are not
// common though.
return options.sizeOfExcludingThis(mallocSizeOf) +
scripts.sizeOfExcludingThis(mallocSizeOf) +
sourceObjects.sizeOfExcludingThis(mallocSizeOf) + stencilInputSize +
stencilSize + extensibleStencilSize +
gcOutput_.sizeOfExcludingThis(mallocSizeOf);
}
void ParseTask::runHelperThreadTask(AutoLockHelperThreadState& locked) {
#ifdef DEBUG
if (parseGlobal) {
runtime->incOffThreadParsesRunning();
}
#endif
runTask(locked);
// The callback is invoked while we are still off thread.
callback(this, callbackData);
// FinishOffThreadScript will need to be called on the script to
// migrate it into the correct compartment.
HelperThreadState().parseFinishedList(locked).insertBack(this);
#ifdef DEBUG
if (parseGlobal) {
runtime->decOffThreadParsesRunning();
}
#endif
}
void ParseTask::runTask(AutoLockHelperThreadState& lock) {
AutoSetHelperThreadContext usesContext(lock);
AutoUnlockHelperThreadState unlock(lock);
JSContext* cx = TlsContext.get();
AutoSetContextRuntime ascr(runtime);
AutoSetContextParse parsetask(this);
gc::AutoSuppressNurseryCellAlloc noNurseryAlloc(cx);
Zone* zone = nullptr;
if (parseGlobal) {
zone = parseGlobal->zoneFromAnyThread();
zone->setHelperThreadOwnerContext(cx);
}
auto resetOwnerContext = mozilla::MakeScopeExit([&] {
if (zone) {
zone->setHelperThreadOwnerContext(nullptr);
}
});
Maybe<AutoRealm> ar;
if (parseGlobal) {
ar.emplace(cx, parseGlobal);
}
parse(cx);
MOZ_ASSERT(cx->tempLifoAlloc().isEmpty());
cx->tempLifoAlloc().freeAll();
cx->frontendCollectionPool().purge();
cx->atomsZoneFreeLists().clear();
}
template <typename Unit>
struct ScriptParseTask : public ParseTask {
JS::SourceText<Unit> data;
ScriptParseTask(JSContext* cx, JS::SourceText<Unit>& srcBuf,
JS::OffThreadCompileCallback callback, void* callbackData);
void parse(JSContext* cx) override;
};
template <typename Unit>
ScriptParseTask<Unit>::ScriptParseTask(JSContext* cx,
JS::SourceText<Unit>& srcBuf,
JS::OffThreadCompileCallback callback,
void* callbackData)
: ParseTask(ParseTaskKind::Script, cx, callback, callbackData),
data(std::move(srcBuf)) {}
template <typename Unit>
void ScriptParseTask<Unit>::parse(JSContext* cx) {
MOZ_ASSERT(cx->isHelperThreadContext());
ScopeKind scopeKind =
options.nonSyntacticScope ? ScopeKind::NonSyntactic : ScopeKind::Global;
stencilInput_ = cx->make_unique<frontend::CompilationInput>(options);
if (stencilInput_) {
extensibleStencil_ = frontend::CompileGlobalScriptToExtensibleStencil(
cx, *stencilInput_, data, scopeKind);
}
if (extensibleStencil_) {
frontend::BorrowingCompilationStencil borrowingStencil(*extensibleStencil_);
if (!frontend::PrepareForInstantiate(cx, *stencilInput_, borrowingStencil,
gcOutput_)) {
extensibleStencil_ = nullptr;
}
}
if (options.useOffThreadParseGlobal) {
Unused << instantiateStencils(cx);
}
}
bool ParseTask::instantiateStencils(JSContext* cx) {
if (!stencil_ && !extensibleStencil_) {
return false;
}
bool result;
if (stencil_) {
result =
frontend::InstantiateStencils(cx, *stencilInput_, *stencil_, gcOutput_);
} else {
frontend::BorrowingCompilationStencil borrowingStencil(*extensibleStencil_);
result = frontend::InstantiateStencils(cx, *stencilInput_, borrowingStencil,
gcOutput_);
}
// Whatever happens to the top-level script compilation (even if it fails),
// we must finish initializing the SSO. This is because there may be valid
// inner scripts observable by the debugger which reference the partially-
// initialized SSO.
if (gcOutput_.sourceObject) {
sourceObjects.infallibleAppend(gcOutput_.sourceObject);
}
if (result) {
MOZ_ASSERT(gcOutput_.script);
MOZ_ASSERT_IF(gcOutput_.module,
gcOutput_.module->script() == gcOutput_.script);
scripts.infallibleAppend(gcOutput_.script);
}
return result;
}
template <typename Unit>
struct ModuleParseTask : public ParseTask {
JS::SourceText<Unit> data;
ModuleParseTask(JSContext* cx, JS::SourceText<Unit>& srcBuf,
JS::OffThreadCompileCallback callback, void* callbackData);
void parse(JSContext* cx) override;
};
template <typename Unit>
ModuleParseTask<Unit>::ModuleParseTask(JSContext* cx,
JS::SourceText<Unit>& srcBuf,
JS::OffThreadCompileCallback callback,
void* callbackData)
: ParseTask(ParseTaskKind::Module, cx, callback, callbackData),
data(std::move(srcBuf)) {}
template <typename Unit>
void ModuleParseTask<Unit>::parse(JSContext* cx) {
MOZ_ASSERT(cx->isHelperThreadContext());
options.setModule();
stencilInput_ = cx->make_unique<frontend::CompilationInput>(options);
if (stencilInput_) {
extensibleStencil_ =
frontend::ParseModuleToExtensibleStencil(cx, *stencilInput_, data);
}
if (extensibleStencil_) {
frontend::BorrowingCompilationStencil borrowingStencil(*extensibleStencil_);
if (!frontend::PrepareForInstantiate(cx, *stencilInput_, borrowingStencil,
gcOutput_)) {
extensibleStencil_ = nullptr;
}
}
if (options.useOffThreadParseGlobal) {
Unused << instantiateStencils(cx);
}
}
ScriptDecodeTask::ScriptDecodeTask(JSContext* cx,
const JS::TranscodeRange& range,
JS::OffThreadCompileCallback callback,
void* callbackData)
: ParseTask(ParseTaskKind::ScriptDecode, cx, callback, callbackData),
range(range) {
MOZ_ASSERT(JS::IsTranscodingBytecodeAligned(range.begin().get()));
}
void ScriptDecodeTask::parse(JSContext* cx) {
MOZ_ASSERT(cx->isHelperThreadContext());
RootedScript resultScript(cx);
Rooted<ScriptSourceObject*> sourceObject(cx);
if (options.useStencilXDR) {
// The buffer contains stencil.
stencilInput_ = cx->make_unique<frontend::CompilationInput>(options);
if (!stencilInput_) {
return;
}
if (!stencilInput_->initForGlobal(cx)) {
return;
}
stencil_ =
cx->make_unique<frontend::CompilationStencil>(stencilInput_->source);
if (!stencil_) {
return;
}
XDRStencilDecoder decoder(cx, &options, range);
XDRResult res = decoder.codeStencil(*stencilInput_, *stencil_);
if (!res.isOk()) {
stencil_.reset();
return;
}
if (!frontend::PrepareForInstantiate(cx, *stencilInput_, *stencil_,
gcOutput_)) {
stencil_.reset();
}
if (options.useOffThreadParseGlobal) {
Unused << instantiateStencils(cx);
}
return;
}
// The buffer contains JSScript.
Rooted<UniquePtr<XDROffThreadDecoder>> decoder(
cx, js::MakeUnique<XDROffThreadDecoder>(
cx, &options, XDROffThreadDecoder::Type::Single,
/* sourceObjectOut = */ &sourceObject.get(), range));
if (!decoder) {
ReportOutOfMemory(cx);
return;
}
mozilla::DebugOnly<XDRResult> res = decoder->codeScript(&resultScript);
MOZ_ASSERT(bool(resultScript) == static_cast<const XDRResult&>(res).isOk());
if (sourceObject) {
sourceObjects.infallibleAppend(sourceObject);
}
if (resultScript) {
scripts.infallibleAppend(resultScript);
}
}
MultiScriptsDecodeTask::MultiScriptsDecodeTask(
JSContext* cx, JS::TranscodeSources& sources,
JS::OffThreadCompileCallback callback, void* callbackData)
: ParseTask(ParseTaskKind::MultiScriptsDecode, cx, callback, callbackData),
sources(&sources) {}
void MultiScriptsDecodeTask::parse(JSContext* cx) {
MOZ_ASSERT(cx->isHelperThreadContext());
if (!scripts.reserve(sources->length()) ||
!sourceObjects.reserve(sources->length())) {
ReportOutOfMemory(cx); // This sets |outOfMemory|.
return;
}
for (auto& source : *sources) {
CompileOptions opts(cx, options);
opts.setFileAndLine(source.filename, source.lineno);
RootedScript resultScript(cx);
Rooted<ScriptSourceObject*> sourceObject(cx);
Rooted<UniquePtr<XDROffThreadDecoder>> decoder(
cx, js::MakeUnique<XDROffThreadDecoder>(
cx, &opts, XDROffThreadDecoder::Type::Multi,
&sourceObject.get(), source.range));
if (!decoder) {
ReportOutOfMemory(cx);
return;
}
mozilla::DebugOnly<XDRResult> res = decoder->codeScript(&resultScript);
MOZ_ASSERT(bool(resultScript) == static_cast<const XDRResult&>(res).isOk());
if (sourceObject) {
sourceObjects.infallibleAppend(sourceObject);
}
if (resultScript) {
scripts.infallibleAppend(resultScript);
} else {
// If any decodes fail, don't process the rest. We likely are hitting OOM.
break;
}
}
}
static void WaitForOffThreadParses(JSRuntime* rt,
AutoLockHelperThreadState& lock) {
if (HelperThreadState().threads(lock).empty()) {
return;
}
GlobalHelperThreadState::ParseTaskVector& worklist =
HelperThreadState().parseWorklist(lock);
while (true) {
bool pending = false;
for (const auto& task : worklist) {
if (task->runtimeMatches(rt)) {
pending = true;
break;
}
}
if (!pending) {
bool inProgress = false;
for (auto* helper : HelperThreadState().helperTasks(lock)) {
if (helper->is<ParseTask>() &&
helper->as<ParseTask>()->runtimeMatches(rt)) {
inProgress = true;
break;
}
}
if (!inProgress) {
break;
}
}
HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
}
#ifdef DEBUG
for (const auto& task : worklist) {
MOZ_ASSERT(!task->runtimeMatches(rt));
}
for (auto* helper : HelperThreadState().helperTasks(lock)) {
MOZ_ASSERT_IF(helper->is<ParseTask>(),
!helper->as<ParseTask>()->runtimeMatches(rt));
}
#endif
}
void js::WaitForOffThreadParses(JSRuntime* rt) {
AutoLockHelperThreadState lock;
WaitForOffThreadParses(rt, lock);
}
void js::CancelOffThreadParses(JSRuntime* rt) {
AutoLockHelperThreadState lock;
#ifdef DEBUG
for (const auto& task : HelperThreadState().parseWaitingOnGC(lock)) {
MOZ_ASSERT(!task->runtimeMatches(rt));
}
#endif
// Instead of forcibly canceling pending parse tasks, just wait for all
// scheduled and in progress ones to complete. Otherwise the final GC may not
// collect everything due to zones being used off thread.
WaitForOffThreadParses(rt, lock);
// Clean up any parse tasks which haven't been finished by the main thread.
auto& finished = HelperThreadState().parseFinishedList(lock);
while (true) {
bool found = false;
ParseTask* next;
ParseTask* task = finished.getFirst();
while (task) {
next = task->getNext();
if (task->runtimeMatches(rt)) {
found = true;
task->remove();
HelperThreadState().destroyParseTask(rt, task);
}
task = next;
}
if (!found) {
break;
}
}
#ifdef DEBUG
for (ParseTask* task : finished) {
MOZ_ASSERT(!task->runtimeMatches(rt));
}
#endif
}
bool js::OffThreadParsingMustWaitForGC(JSRuntime* rt) {
// Off thread parsing can't occur during incremental collections on the
// atoms zone, to avoid triggering barriers. (Outside the atoms zone, the
// compilation will use a new zone that is never collected.) If an
// atoms-zone GC is in progress, hold off on executing the parse task until
// the atoms-zone GC completes (see EnqueuePendingParseTasksAfterGC).
return rt->activeGCInAtomsZone();
}
static bool EnsureConstructor(JSContext* cx, Handle<GlobalObject*> global,
JSProtoKey key) {
if (!GlobalObject::ensureConstructor(cx, global, key)) {
return false;
}
MOZ_ASSERT(global->getPrototype(key).toObject().isDelegate(),
"standard class prototype wasn't a delegate from birth");
return true;
}
// Initialize all classes potentially created during parsing for use in parser
// data structures, template objects, &c.
static bool EnsureParserCreatedClasses(JSContext* cx, ParseTaskKind kind) {
Handle<GlobalObject*> global = cx->global();
if (!EnsureConstructor(cx, global, JSProto_Function)) {
return false; // needed by functions, also adds object literals' proto
}
if (!EnsureConstructor(cx, global, JSProto_Array)) {
return false; // needed by array literals
}
if (!EnsureConstructor(cx, global, JSProto_RegExp)) {
return false; // needed by regular expression literals
}
if (!EnsureConstructor(cx, global, JSProto_GeneratorFunction)) {
return false; // needed by function*() {}
}
if (!EnsureConstructor(cx, global, JSProto_AsyncFunction)) {
return false; // needed by async function() {}
}
if (!EnsureConstructor(cx, global, JSProto_AsyncGeneratorFunction)) {
return false; // needed by async function*() {}
}
if (kind == ParseTaskKind::Module &&
!GlobalObject::ensureModulePrototypesCreated(cx, global)) {
return false;
}
return true;
}
class MOZ_RAII AutoSetCreatedForHelperThread {
Zone* zone;
public:
explicit AutoSetCreatedForHelperThread(JSObject* global)
: zone(global ? global->zone() : nullptr) {
if (zone) {
zone->setCreatedForHelperThread();
}
}
void forget() { zone = nullptr; }
~AutoSetCreatedForHelperThread() {
if (zone) {
zone->clearUsedByHelperThread();
}
}
};
static JSObject* CreateGlobalForOffThreadParse(JSContext* cx,
const gc::AutoSuppressGC& nogc) {
JS::Realm* currentRealm = cx->realm();
JS::RealmOptions realmOptions(currentRealm->creationOptions(),
currentRealm->behaviors());
auto& creationOptions = realmOptions.creationOptions();
creationOptions.setInvisibleToDebugger(true)
.setMergeable(true)
.setNewCompartmentAndZone();
// Don't falsely inherit the host's global trace hook.
creationOptions.setTrace(nullptr);
return JS_NewGlobalObject(cx, &parseTaskGlobalClass,
currentRealm->principals(),
JS::DontFireOnNewGlobalHook, realmOptions);
}
static bool QueueOffThreadParseTask(JSContext* cx, UniquePtr<ParseTask> task) {
AutoLockHelperThreadState lock;
bool mustWait = task->options.useOffThreadParseGlobal &&
OffThreadParsingMustWaitForGC(cx->runtime());
bool result;
if (mustWait) {
result = HelperThreadState().parseWaitingOnGC(lock).append(std::move(task));
} else {
result =
HelperThreadState().submitTask(cx->runtime(), std::move(task), lock);
}
if (!result) {
ReportOutOfMemory(cx);
}
return result;
}
bool GlobalHelperThreadState::submitTask(
JSRuntime* rt, UniquePtr<ParseTask> task,
const AutoLockHelperThreadState& locked) {
if (!parseWorklist(locked).append(std::move(task))) {
return false;
}
parseWorklist(locked).back()->activate(rt);
dispatch(locked);
return true;
}
static JS::OffThreadToken* StartOffThreadParseTask(
JSContext* cx, UniquePtr<ParseTask> task,
const ReadOnlyCompileOptions& options) {
// Suppress GC so that calls below do not trigger a new incremental GC
// which could require barriers on the atoms zone.
gc::AutoSuppressGC nogc(cx);
gc::AutoSuppressNurseryCellAlloc noNurseryAlloc(cx);
AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
JSObject* global = nullptr;
if (options.useOffThreadParseGlobal) {
global = CreateGlobalForOffThreadParse(cx, nogc);
if (!global) {
return nullptr;
}
}
// Mark the global's zone as created for a helper thread. This prevents it
// from being collected until clearUsedByHelperThread() is called after
// parsing is complete. If this function exits due to error this state is
// cleared automatically.
AutoSetCreatedForHelperThread createdForHelper(global);
if (!task->init(cx, options, global)) {
return nullptr;
}
JS::OffThreadToken* token = task.get();
if (!QueueOffThreadParseTask(cx, std::move(task))) {
return nullptr;
}
createdForHelper.forget();
// Return an opaque pointer to caller so that it may query/cancel the task
// before the callback is fired.
return token;
}
template <typename Unit>
static JS::OffThreadToken* StartOffThreadParseScriptInternal(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<Unit>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
auto task = cx->make_unique<ScriptParseTask<Unit>>(cx, srcBuf, callback,
callbackData);
if (!task) {
return nullptr;
}
return StartOffThreadParseTask(cx, std::move(task), options);
}
JS::OffThreadToken* js::StartOffThreadParseScript(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
return StartOffThreadParseScriptInternal(cx, options, srcBuf, callback,
callbackData);
}
JS::OffThreadToken* js::StartOffThreadParseScript(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<Utf8Unit>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
return StartOffThreadParseScriptInternal(cx, options, srcBuf, callback,
callbackData);
}
template <typename Unit>
static JS::OffThreadToken* StartOffThreadParseModuleInternal(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<Unit>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
auto task = cx->make_unique<ModuleParseTask<Unit>>(cx, srcBuf, callback,
callbackData);
if (!task) {
return nullptr;
}
return StartOffThreadParseTask(cx, std::move(task), options);
}
JS::OffThreadToken* js::StartOffThreadParseModule(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
return StartOffThreadParseModuleInternal(cx, options, srcBuf, callback,
callbackData);
}
JS::OffThreadToken* js::StartOffThreadParseModule(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::SourceText<Utf8Unit>& srcBuf, JS::OffThreadCompileCallback callback,
void* callbackData) {
return StartOffThreadParseModuleInternal(cx, options, srcBuf, callback,
callbackData);
}
JS::OffThreadToken* js::StartOffThreadDecodeScript(
JSContext* cx, const ReadOnlyCompileOptions& options,
const JS::TranscodeRange& range, JS::OffThreadCompileCallback callback,
void* callbackData) {
// XDR data must be Stencil format, or a parse-global must be available.
MOZ_RELEASE_ASSERT(options.useStencilXDR || options.useOffThreadParseGlobal);
auto task =
cx->make_unique<ScriptDecodeTask>(cx, range, callback, callbackData);
if (!task) {
return nullptr;
}
return StartOffThreadParseTask(cx, std::move(task), options);
}
JS::OffThreadToken* js::StartOffThreadDecodeMultiScripts(
JSContext* cx, const ReadOnlyCompileOptions& options,
JS::TranscodeSources& sources, JS::OffThreadCompileCallback callback,
void* callbackData) {
auto task = cx->make_unique<MultiScriptsDecodeTask>(cx, sources, callback,
callbackData);
if (!task) {
return nullptr;
}
// NOTE: All uses of DecodeMulti are currently generated by non-incremental
// XDR and therefore do not support the stencil format. As a result,
// they must continue to use the off-thread-parse-global in order to
// decode.
CompileOptions optionsCopy(cx, options);
optionsCopy.useStencilXDR = false;
optionsCopy.useOffThreadParseGlobal = true;
return StartOffThreadParseTask(cx, std::move(task), optionsCopy);
}
void js::EnqueuePendingParseTasksAfterGC(JSRuntime* rt) {
MOZ_ASSERT(!OffThreadParsingMustWaitForGC(rt));
AutoLockHelperThreadState lock;
GlobalHelperThreadState::ParseTaskVector& waiting =
HelperThreadState().parseWaitingOnGC(lock);
for (size_t i = 0; i < waiting.length(); i++) {
if (!waiting[i]->runtimeMatches(rt)) {
continue;
}
{
AutoEnterOOMUnsafeRegion oomUnsafe;
if (!HelperThreadState().submitTask(rt, std::move(waiting[i]), lock)) {
oomUnsafe.crash("EnqueuePendingParseTasksAfterGC");
}
}
HelperThreadState().remove(waiting, &i);
}
}
#ifdef DEBUG
bool js::CurrentThreadIsParseThread() {
JSContext* cx = TlsContext.get();
return cx->isHelperThreadContext() && cx->parseTask();
}
#endif
bool GlobalHelperThreadState::ensureInitialized() {
MOZ_ASSERT(CanUseExtraThreads());
MOZ_ASSERT(this == &HelperThreadState());
return ensureThreadCount(threadCount);
}
bool GlobalHelperThreadState::ensureThreadCount(size_t count) {
if (!ensureContextList(count)) {
return false;
}
AutoLockHelperThreadState lock;
if (threads(lock).length() >= count) {
return true;
}
if (!threads(lock).reserve(count)) {
return false;
}
if (!helperTasks_.reserve(count)) {
return false;
}
for (size_t& i : runningTaskCount) {
i = 0;
}
// Update threadCount on exit so this stays consistent with how many threads
// there are.
auto updateThreadCount =
mozilla::MakeScopeExit([&] { threadCount = threads(lock).length(); });
while (threads(lock).length() < count) {
auto thread = js::MakeUnique<HelperThread>();
if (!thread || !thread->init()) {
return false;
}
threads(lock).infallibleEmplaceBack(std::move(thread));
}
return true;
}
GlobalHelperThreadState::GlobalHelperThreadState()
: cpuCount(0),
threadCount(0),
totalCountRunningTasks(0),
registerThread(nullptr),
unregisterThread(nullptr),
wasmTier2GeneratorsFinished_(0) {
cpuCount = ClampDefaultCPUCount(GetCPUCount());
threadCount = ThreadCountForCPUCount(cpuCount);
gcParallelThreadCount = threadCount;
MOZ_ASSERT(cpuCount > 0, "GetCPUCount() seems broken");
}
void GlobalHelperThreadState::finish() {
finishThreads();
// Make sure there are no Ion free tasks left. We check this here because,
// unlike the other tasks, we don't explicitly block on this when
// destroying a runtime.
AutoLockHelperThreadState lock;
auto& freeList = ionFreeList(lock);
while (!freeList.empty()) {
UniquePtr<jit::IonFreeTask> task = std::move(freeList.back());
freeList.popBack();
jit::FreeIonCompileTask(task->compileTask());
}
destroyHelperContexts(lock);
}
void GlobalHelperThreadState::finishThreads() {
HelperThreadVector oldThreads;
{
AutoLockHelperThreadState lock;
if (threads(lock).empty()) {
return;
}
MOZ_ASSERT(CanUseExtraThreads());
waitForAllThreadsLocked(lock);
for (auto& thread : threads(lock)) {
thread->setTerminate(lock);
}
notifyAll(GlobalHelperThreadState::PRODUCER, lock);
std::swap(threads_, oldThreads);
}
for (auto& thread : oldThreads) {
thread->join();
}
}
bool GlobalHelperThreadState::ensureContextList(size_t count) {
AutoLockHelperThreadState lock;
if (helperContexts_.length() >= count) {
return true;
}
while (helperContexts_.length() < count) {
auto cx = js::MakeUnique<JSContext>(nullptr, JS::ContextOptions());
if (!cx || !cx->init(ContextKind::HelperThread) ||
!helperContexts_.append(cx.release())) {
return false;
}
}
return true;
}
JSContext* GlobalHelperThreadState::getFirstUnusedContext(
AutoLockHelperThreadState& locked) {
for (auto& cx : helperContexts_) {
if (cx->contextAvailable(locked)) {
return cx;
}
}
MOZ_CRASH("Expected available JSContext");
}
void GlobalHelperThreadState::destroyHelperContexts(
AutoLockHelperThreadState& lock) {
while (helperContexts_.length() > 0) {
js_delete(helperContexts_.popCopy());
}
}
#ifdef DEBUG
void GlobalHelperThreadState::assertIsLockedByCurrentThread() const {
gHelperThreadLock.assertOwnedByCurrentThread();
}
#endif // DEBUG
void GlobalHelperThreadState::dispatch(
const AutoLockHelperThreadState& locked) {
notifyOne(PRODUCER, locked);
}
void GlobalHelperThreadState::wait(
AutoLockHelperThreadState& locked, CondVar which,
TimeDuration timeout /* = TimeDuration::Forever() */) {
whichWakeup(which).wait_for(locked, timeout);
}
void GlobalHelperThreadState::notifyAll(CondVar which,
const AutoLockHelperThreadState&) {
whichWakeup(which).notify_all();
}
void GlobalHelperThreadState::notifyOne(CondVar which,
const AutoLockHelperThreadState&) {
whichWakeup(which).notify_one();
}
bool GlobalHelperThreadState::hasActiveThreads(
const AutoLockHelperThreadState& lock) {
return !helperTasks(lock).empty();
}
void js::WaitForAllHelperThreads() { HelperThreadState().waitForAllThreads(); }
void js::WaitForAllHelperThreads(AutoLockHelperThreadState& lock) {
HelperThreadState().waitForAllThreadsLocked(lock);
}
void GlobalHelperThreadState::waitForAllThreads() {
AutoLockHelperThreadState lock;
waitForAllThreadsLocked(lock);
}
void GlobalHelperThreadState::waitForAllThreadsLocked(
AutoLockHelperThreadState& lock) {
CancelOffThreadWasmTier2GeneratorLocked(lock);
while (hasActiveThreads(lock) || hasQueuedTasks(lock)) {
wait(lock, CONSUMER);
}
MOZ_ASSERT(!hasActiveThreads(lock));
MOZ_ASSERT(!hasQueuedTasks(lock));
}
// A task can be a "master" task, ie, it will block waiting for other worker
// threads that perform work on its behalf. If so it must not take the last
// available thread; there must always be at least one worker thread able to do
// the actual work. (Or the system may deadlock.)
//
// If a task is a master task it *must* pass isMaster=true here, or perform a
// similar calculation to avoid deadlock from starvation.
//
// isMaster should only be true if the thread calling checkTaskThreadLimit() is
// a helper thread.
//
// NOTE: Calling checkTaskThreadLimit() from a helper thread in the dynamic
// region after currentTask.emplace() and before currentTask.reset() may cause
// it to return a different result than if it is called outside that dynamic
// region, as the predicate inspects the values of the threads' currentTask
// members.
bool GlobalHelperThreadState::checkTaskThreadLimit(
ThreadType threadType, size_t maxThreads, bool isMaster,
const AutoLockHelperThreadState& lock) const {
MOZ_ASSERT(maxThreads > 0);
if (!isMaster && maxThreads >= threadCount) {
return true;
}
size_t count = runningTaskCount[threadType];
if (count >= maxThreads) {
return false;
}
MOZ_ASSERT(threadCount >= totalCountRunningTasks);
size_t idle = threadCount - totalCountRunningTasks;
// It is possible for the number of idle threads to be zero here, because
// checkTaskThreadLimit() can be called from non-helper threads. Notably,
// the compression task scheduler invokes it, and runs off a helper thread.
if (idle == 0) {
return false;
}
// A master thread that's the last available thread must not be allowed to
// run.
if (isMaster && idle == 1) {
return false;
}
return true;
}
void GlobalHelperThreadState::triggerFreeUnusedMemory() {
if (!CanUseExtraThreads()) {
return;
}
AutoLockHelperThreadState lock;
for (auto& context : helperContexts_) {
if (context->shouldFreeUnusedMemory() && context->contextAvailable(lock)) {
// This context hasn't been used since the last time freeUnusedMemory
// was set. Free the temp LifoAlloc from the main thread.
context->tempLifoAllocNoCheck().freeAll();
context->setFreeUnusedMemory(false);
} else {
context->setFreeUnusedMemory(true);
}
}
}
static inline bool IsHelperThreadSimulatingOOM(js::ThreadType threadType) {
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
return js::oom::simulator.targetThread() == threadType;
#else
return false;
#endif
}
void GlobalHelperThreadState::addSizeOfIncludingThis(
JS::GlobalStats* stats, AutoLockHelperThreadState& lock) const {
#ifdef DEBUG
assertIsLockedByCurrentThread();
#endif
mozilla::MallocSizeOf mallocSizeOf = stats->mallocSizeOf_;
JS::HelperThreadStats& htStats = stats->helperThread;
htStats.stateData += mallocSizeOf(this);
htStats.stateData += threads(lock).sizeOfExcludingThis(mallocSizeOf);
// Report memory used by various containers
htStats.stateData +=
ionWorklist_.sizeOfExcludingThis(mallocSizeOf) +
ionFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
ionFreeList_.sizeOfExcludingThis(mallocSizeOf) +
wasmWorklist_tier1_.sizeOfExcludingThis(mallocSizeOf) +
wasmWorklist_tier2_.sizeOfExcludingThis(mallocSizeOf) +
wasmTier2GeneratorWorklist_.sizeOfExcludingThis(mallocSizeOf) +
promiseHelperTasks_.sizeOfExcludingThis(mallocSizeOf) +
parseWorklist_.sizeOfExcludingThis(mallocSizeOf) +
parseFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
parseWaitingOnGC_.sizeOfExcludingThis(mallocSizeOf) +
compressionPendingList_.sizeOfExcludingThis(mallocSizeOf) +
compressionWorklist_.sizeOfExcludingThis(mallocSizeOf) +
compressionFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
gcParallelWorklist_.sizeOfExcludingThis(mallocSizeOf) +
helperContexts_.sizeOfExcludingThis(mallocSizeOf) +
helperTasks_.sizeOfExcludingThis(mallocSizeOf);
// Report ParseTasks on wait lists
for (const auto& task : parseWorklist_) {
htStats.parseTask += task->sizeOfIncludingThis(mallocSizeOf);
}
for (auto task : parseFinishedList_) {
htStats.parseTask += task->sizeOfIncludingThis(mallocSizeOf);
}
for (const auto& task : parseWaitingOnGC_) {
htStats.parseTask += task->sizeOfIncludingThis(mallocSizeOf);
}
// Report IonCompileTasks on wait lists
for (auto task : ionWorklist_) {
htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf);
}
for (auto task : ionFinishedList_) {
htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf);
}
for (const auto& task : ionFreeList_) {
htStats.ionCompileTask +=
task->compileTask()->sizeOfExcludingThis(mallocSizeOf);
}
// Report wasm::CompileTasks on wait lists
for (auto task : wasmWorklist_tier1_) {
htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf);
}
for (auto task : wasmWorklist_tier2_) {
htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf);
}
{
// Report memory used by the JSContexts.
// We're holding the helper state lock, and the JSContext memory reporter
// won't do anything more substantial than traversing data structures and
// getting their size, so disable ProtectedData checks.
AutoNoteSingleThreadedRegion anstr;
for (auto* cx : helperContexts_) {
htStats.contexts += cx->sizeOfIncludingThis(mallocSizeOf);
}
}
// Report number of helper threads.
MOZ_ASSERT(htStats.idleThreadCount == 0);
MOZ_ASSERT(threadCount >= totalCountRunningTasks);
htStats.activeThreadCount = totalCountRunningTasks;
htStats.idleThreadCount = threadCount - totalCountRunningTasks;
}
size_t GlobalHelperThreadState::maxIonCompilationThreads() const {
if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_ION)) {
return 1;
}
return threadCount;
}
size_t