Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "prio.h"
8
#include "PLDHashTable.h"
9
#include "mozilla/IOInterposer.h"
10
#include "mozilla/AutoMemMap.h"
11
#include "mozilla/IOBuffers.h"
12
#include "mozilla/MemoryReporting.h"
13
#include "mozilla/MemUtils.h"
14
#include "mozilla/ResultExtensions.h"
15
#include "mozilla/scache/StartupCache.h"
16
#include "mozilla/ScopeExit.h"
17
18
#include "nsClassHashtable.h"
19
#include "nsComponentManagerUtils.h"
20
#include "nsCRT.h"
21
#include "nsDirectoryServiceUtils.h"
22
#include "nsIClassInfo.h"
23
#include "nsIFile.h"
24
#include "nsIObserver.h"
25
#include "nsIOutputStream.h"
26
#include "nsISupports.h"
27
#include "nsITimer.h"
28
#include "nsZipArchive.h"
29
#include "mozilla/Omnijar.h"
30
#include "prenv.h"
31
#include "mozilla/Telemetry.h"
32
#include "nsThreadUtils.h"
33
#include "nsXULAppAPI.h"
34
#include "nsIProtocolHandler.h"
35
#include "GeckoProfiler.h"
36
37
#ifdef IS_BIG_ENDIAN
38
# define SC_ENDIAN "big"
39
#else
40
# define SC_ENDIAN "little"
41
#endif
42
43
#if PR_BYTES_PER_WORD == 4
44
# define SC_WORDSIZE "4"
45
#else
46
# define SC_WORDSIZE "8"
47
#endif
48
49
using namespace mozilla::Compression;
50
51
namespace mozilla {
52
namespace scache {
53
54
MOZ_DEFINE_MALLOC_SIZE_OF(StartupCacheMallocSizeOf)
55
56
NS_IMETHODIMP
57
StartupCache::CollectReports(nsIHandleReportCallback* aHandleReport,
58
nsISupports* aData, bool aAnonymize) {
59
MOZ_COLLECT_REPORT(
60
"explicit/startup-cache/mapping", KIND_NONHEAP, UNITS_BYTES,
61
mCacheData.nonHeapSizeOfExcludingThis(),
62
"Memory used to hold the mapping of the startup cache from file. "
63
"This memory is likely to be swapped out shortly after start-up.");
64
65
MOZ_COLLECT_REPORT("explicit/startup-cache/data", KIND_HEAP, UNITS_BYTES,
66
HeapSizeOfIncludingThis(StartupCacheMallocSizeOf),
67
"Memory used by the startup cache for things other than "
68
"the file mapping.");
69
70
return NS_OK;
71
}
72
73
static const uint8_t MAGIC[] = "startupcache0002";
74
// This is a heuristic value for how much to reserve for mTable to avoid
75
// rehashing. This is not a hard limit in release builds, but it is in
76
// debug builds as it should be stable. If we exceed this number we should
77
// just increase it.
78
static const size_t STARTUP_CACHE_RESERVE_CAPACITY = 450;
79
// This is a hard limit which we will assert on, to ensure that we don't
80
// have some bug causing runaway cache growth.
81
static const size_t STARTUP_CACHE_MAX_CAPACITY = 5000;
82
83
#define STARTUP_CACHE_NAME "startupCache." SC_WORDSIZE "." SC_ENDIAN
84
85
static inline Result<Ok, nsresult> Write(PRFileDesc* fd, const void* data,
86
int32_t len) {
87
if (PR_Write(fd, data, len) != len) {
88
return Err(NS_ERROR_FAILURE);
89
}
90
return Ok();
91
}
92
93
static inline Result<Ok, nsresult> Seek(PRFileDesc* fd, int32_t offset) {
94
if (PR_Seek(fd, offset, PR_SEEK_SET) == -1) {
95
return Err(NS_ERROR_FAILURE);
96
}
97
return Ok();
98
}
99
100
static nsresult MapLZ4ErrorToNsresult(size_t aError) {
101
return NS_ERROR_FAILURE;
102
}
103
104
StartupCache* StartupCache::GetSingletonNoInit() {
105
return StartupCache::gStartupCache;
106
}
107
108
StartupCache* StartupCache::GetSingleton() {
109
if (!gStartupCache) {
110
if (!XRE_IsParentProcess()) {
111
return nullptr;
112
}
113
#ifdef MOZ_DISABLE_STARTUPCACHE
114
return nullptr;
115
#else
116
StartupCache::InitSingleton();
117
#endif
118
}
119
120
return StartupCache::gStartupCache;
121
}
122
123
void StartupCache::DeleteSingleton() { StartupCache::gStartupCache = nullptr; }
124
125
nsresult StartupCache::InitSingleton() {
126
nsresult rv;
127
StartupCache::gStartupCache = new StartupCache();
128
129
rv = StartupCache::gStartupCache->Init();
130
if (NS_FAILED(rv)) {
131
StartupCache::gStartupCache = nullptr;
132
}
133
return rv;
134
}
135
136
StaticRefPtr<StartupCache> StartupCache::gStartupCache;
137
bool StartupCache::gShutdownInitiated;
138
bool StartupCache::gIgnoreDiskCache;
139
bool StartupCache::gFoundDiskCacheOnInit;
140
141
NS_IMPL_ISUPPORTS(StartupCache, nsIMemoryReporter)
142
143
StartupCache::StartupCache()
144
: mDirty(false),
145
mWrittenOnce(false),
146
mStartupWriteInitiated(false),
147
mCurTableReferenced(false),
148
mRequestedCount(0),
149
mCacheEntriesBaseOffset(0),
150
mWriteThread(nullptr),
151
mPrefetchThread(nullptr) {}
152
153
StartupCache::~StartupCache() {
154
WaitOnWriteThread();
155
UnregisterWeakMemoryReporter(this);
156
}
157
158
nsresult StartupCache::Init() {
159
// workaround for bug 653936
160
nsCOMPtr<nsIProtocolHandler> jarInitializer(
161
do_GetService(NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX "jar"));
162
163
nsresult rv;
164
165
// This allows to override the startup cache filename
166
// which is useful from xpcshell, when there is no ProfLDS directory to keep
167
// cache in.
168
char* env = PR_GetEnv("MOZ_STARTUP_CACHE");
169
if (env && *env) {
170
rv = NS_NewLocalFile(NS_ConvertUTF8toUTF16(env), false,
171
getter_AddRefs(mFile));
172
} else {
173
nsCOMPtr<nsIFile> file;
174
rv = NS_GetSpecialDirectory("ProfLDS", getter_AddRefs(file));
175
if (NS_FAILED(rv)) {
176
// return silently, this will fail in mochitests's xpcshell process.
177
return rv;
178
}
179
180
rv = file->AppendNative(NS_LITERAL_CSTRING("startupCache"));
181
NS_ENSURE_SUCCESS(rv, rv);
182
183
// Try to create the directory if it's not there yet
184
rv = file->Create(nsIFile::DIRECTORY_TYPE, 0777);
185
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_ALREADY_EXISTS) return rv;
186
187
rv = file->AppendNative(NS_LITERAL_CSTRING(STARTUP_CACHE_NAME));
188
189
NS_ENSURE_SUCCESS(rv, rv);
190
191
mFile = file;
192
}
193
194
NS_ENSURE_TRUE(mFile, NS_ERROR_UNEXPECTED);
195
196
mObserverService = do_GetService("@mozilla.org/observer-service;1");
197
198
if (!mObserverService) {
199
NS_WARNING("Could not get observerService.");
200
return NS_ERROR_UNEXPECTED;
201
}
202
203
mListener = new StartupCacheListener();
204
rv = mObserverService->AddObserver(mListener, NS_XPCOM_SHUTDOWN_OBSERVER_ID,
205
false);
206
NS_ENSURE_SUCCESS(rv, rv);
207
rv = mObserverService->AddObserver(mListener, "startupcache-invalidate",
208
false);
209
NS_ENSURE_SUCCESS(rv, rv);
210
211
auto result = LoadArchive();
212
rv = result.isErr() ? result.unwrapErr() : NS_OK;
213
214
gFoundDiskCacheOnInit = rv != NS_ERROR_FILE_NOT_FOUND;
215
216
// Sometimes we don't have a cache yet, that's ok.
217
// If it's corrupted, just remove it and start over.
218
if (gIgnoreDiskCache || (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND)) {
219
NS_WARNING("Failed to load startupcache file correctly, removing!");
220
InvalidateCache();
221
}
222
223
RegisterWeakMemoryReporter(this);
224
mDecompressionContext = MakeUnique<LZ4FrameDecompressionContext>(true);
225
226
return NS_OK;
227
}
228
229
void StartupCache::StartPrefetchMemoryThread() {
230
// XXX: It would be great for this to not create its own thread, unfortunately
231
// there doesn't seem to be an existing thread that makes sense for this, so
232
// barring a coordinated global scheduling system this is the best we get.
233
mPrefetchThread = PR_CreateThread(
234
PR_USER_THREAD, StartupCache::ThreadedPrefetch, this, PR_PRIORITY_NORMAL,
235
PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 256 * 1024);
236
}
237
238
/**
239
* LoadArchive can be called from the main thread or while reloading cache on
240
* write thread.
241
*/
242
Result<Ok, nsresult> StartupCache::LoadArchive() {
243
if (gIgnoreDiskCache) return Err(NS_ERROR_FAILURE);
244
245
MOZ_TRY(mCacheData.init(mFile));
246
auto size = mCacheData.size();
247
if (CanPrefetchMemory()) {
248
StartPrefetchMemoryThread();
249
}
250
251
uint32_t headerSize;
252
if (size < sizeof(MAGIC) + sizeof(headerSize)) {
253
return Err(NS_ERROR_UNEXPECTED);
254
}
255
256
auto data = mCacheData.get<uint8_t>();
257
auto end = data + size;
258
259
if (memcmp(MAGIC, data.get(), sizeof(MAGIC))) {
260
return Err(NS_ERROR_UNEXPECTED);
261
}
262
data += sizeof(MAGIC);
263
264
headerSize = LittleEndian::readUint32(data.get());
265
data += sizeof(headerSize);
266
267
if (headerSize > end - data) {
268
MOZ_ASSERT(false, "StartupCache file is corrupt.");
269
return Err(NS_ERROR_UNEXPECTED);
270
}
271
272
Range<uint8_t> header(data, data + headerSize);
273
data += headerSize;
274
275
mCacheEntriesBaseOffset = sizeof(MAGIC) + sizeof(headerSize) + headerSize;
276
{
277
if (!mTable.reserve(STARTUP_CACHE_RESERVE_CAPACITY)) {
278
return Err(NS_ERROR_UNEXPECTED);
279
}
280
auto cleanup = MakeScopeExit([&]() {
281
mTable.clear();
282
mCacheData.reset();
283
});
284
loader::InputBuffer buf(header);
285
286
uint32_t currentOffset = 0;
287
while (!buf.finished()) {
288
uint32_t offset = 0;
289
uint32_t compressedSize = 0;
290
uint32_t uncompressedSize = 0;
291
nsCString key;
292
buf.codeUint32(offset);
293
buf.codeUint32(compressedSize);
294
buf.codeUint32(uncompressedSize);
295
buf.codeString(key);
296
297
if (offset + compressedSize > end - data) {
298
MOZ_ASSERT(false, "StartupCache file is corrupt.");
299
return Err(NS_ERROR_UNEXPECTED);
300
}
301
302
// Make sure offsets match what we'd expect based on script ordering and
303
// size, as a basic sanity check.
304
if (offset != currentOffset) {
305
return Err(NS_ERROR_UNEXPECTED);
306
}
307
currentOffset += compressedSize;
308
309
// We could use mTable.putNew if we knew the file we're loading weren't
310
// corrupt. However, we don't know that, so check if the key already
311
// exists. If it does, we know the file must be corrupt.
312
decltype(mTable)::AddPtr p = mTable.lookupForAdd(key);
313
if (p) {
314
return Err(NS_ERROR_UNEXPECTED);
315
}
316
317
if (!mTable.add(
318
p, key,
319
StartupCacheEntry(offset, compressedSize, uncompressedSize))) {
320
return Err(NS_ERROR_UNEXPECTED);
321
}
322
}
323
324
if (buf.error()) {
325
return Err(NS_ERROR_UNEXPECTED);
326
}
327
328
cleanup.release();
329
}
330
331
return Ok();
332
}
333
334
bool StartupCache::HasEntry(const char* id) {
335
AUTO_PROFILER_LABEL("StartupCache::HasEntry", OTHER);
336
337
MOZ_ASSERT(NS_IsMainThread(), "Startup cache only available on main thread");
338
WaitOnWriteThread();
339
340
return mTable.has(nsDependentCString(id));
341
}
342
343
nsresult StartupCache::GetBuffer(const char* id, const char** outbuf,
344
uint32_t* length) {
345
AUTO_PROFILER_LABEL("StartupCache::GetBuffer", OTHER);
346
347
NS_ASSERTION(NS_IsMainThread(),
348
"Startup cache only available on main thread");
349
350
WaitOnWriteThread();
351
Telemetry::LABELS_STARTUP_CACHE_REQUESTS label =
352
Telemetry::LABELS_STARTUP_CACHE_REQUESTS::Miss;
353
auto telemetry =
354
MakeScopeExit([&label] { Telemetry::AccumulateCategorical(label); });
355
356
decltype(mTable)::Ptr p = mTable.lookup(nsDependentCString(id));
357
if (!p) {
358
return NS_ERROR_NOT_AVAILABLE;
359
}
360
361
auto& value = p->value();
362
if (value.mData) {
363
label = Telemetry::LABELS_STARTUP_CACHE_REQUESTS::HitMemory;
364
} else {
365
if (!mCacheData.initialized()) {
366
return NS_ERROR_NOT_AVAILABLE;
367
}
368
369
size_t totalRead = 0;
370
size_t totalWritten = 0;
371
Span<const char> compressed = MakeSpan(
372
mCacheData.get<char>().get() + mCacheEntriesBaseOffset + value.mOffset,
373
value.mCompressedSize);
374
value.mData = MakeUnique<char[]>(value.mUncompressedSize);
375
Span<char> uncompressed =
376
MakeSpan(value.mData.get(), value.mUncompressedSize);
377
bool finished = false;
378
while (!finished) {
379
auto result = mDecompressionContext->Decompress(
380
uncompressed.From(totalWritten), compressed.From(totalRead));
381
if (NS_WARN_IF(result.isErr())) {
382
value.mData = nullptr;
383
InvalidateCache();
384
return NS_ERROR_FAILURE;
385
}
386
auto decompressionResult = result.unwrap();
387
totalRead += decompressionResult.mSizeRead;
388
totalWritten += decompressionResult.mSizeWritten;
389
finished = decompressionResult.mFinished;
390
}
391
392
label = Telemetry::LABELS_STARTUP_CACHE_REQUESTS::HitDisk;
393
}
394
395
if (!value.mRequested) {
396
value.mRequested = true;
397
value.mRequestedOrder = ++mRequestedCount;
398
MOZ_ASSERT(mRequestedCount <= mTable.count(),
399
"Somehow we requested more StartupCache items than exist.");
400
ResetStartupWriteTimerCheckingReadCount();
401
}
402
403
// Track that something holds a reference into mTable, so we know to hold
404
// onto it in case the cache is invalidated.
405
mCurTableReferenced = true;
406
*outbuf = value.mData.get();
407
*length = value.mUncompressedSize;
408
return NS_OK;
409
}
410
411
// Makes a copy of the buffer, client retains ownership of inbuf.
412
nsresult StartupCache::PutBuffer(const char* id, UniquePtr<char[]>&& inbuf,
413
uint32_t len) {
414
NS_ASSERTION(NS_IsMainThread(),
415
"Startup cache only available on main thread");
416
WaitOnWriteThread();
417
if (StartupCache::gShutdownInitiated) {
418
return NS_ERROR_NOT_AVAILABLE;
419
}
420
421
bool exists = mTable.has(nsDependentCString(id));
422
423
if (exists) {
424
NS_WARNING("Existing entry in StartupCache.");
425
// Double-caching is undesirable but not an error.
426
return NS_OK;
427
}
428
429
// putNew returns false on alloc failure - in the very unlikely event we hit
430
// that and aren't going to crash elsewhere, there's no reason we need to
431
// crash here.
432
if (mTable.putNew(nsCString(id), StartupCacheEntry(std::move(inbuf), len,
433
++mRequestedCount))) {
434
return ResetStartupWriteTimer();
435
}
436
MOZ_DIAGNOSTIC_ASSERT(mTable.count() < STARTUP_CACHE_MAX_CAPACITY,
437
"Too many StartupCache entries.");
438
return NS_OK;
439
}
440
441
size_t StartupCache::HeapSizeOfIncludingThis(
442
mozilla::MallocSizeOf aMallocSizeOf) const {
443
// This function could measure more members, but they haven't been found by
444
// DMD to be significant. They can be added later if necessary.
445
446
size_t n = aMallocSizeOf(this);
447
448
n += mTable.shallowSizeOfExcludingThis(aMallocSizeOf);
449
for (auto iter = mTable.iter(); !iter.done(); iter.next()) {
450
if (iter.get().value().mData) {
451
n += aMallocSizeOf(iter.get().value().mData.get());
452
}
453
n += iter.get().key().SizeOfExcludingThisIfUnshared(aMallocSizeOf);
454
}
455
456
return n;
457
}
458
459
/**
460
* WriteToDisk writes the cache out to disk. Callers of WriteToDisk need to call
461
* WaitOnWriteThread to make sure there isn't a write happening on another
462
* thread
463
*/
464
Result<Ok, nsresult> StartupCache::WriteToDisk() {
465
mStartupWriteInitiated = true;
466
if (!mDirty || mWrittenOnce) {
467
return Ok();
468
}
469
470
if (!mFile) {
471
return Err(NS_ERROR_UNEXPECTED);
472
}
473
474
AutoFDClose fd;
475
MOZ_TRY(mFile->OpenNSPRFileDesc(PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
476
0644, &fd.rwget()));
477
478
nsTArray<std::pair<const nsCString*, StartupCacheEntry*>> entries;
479
for (auto iter = mTable.iter(); !iter.done(); iter.next()) {
480
if (iter.get().value().mRequested) {
481
entries.AppendElement(
482
std::make_pair(&iter.get().key(), &iter.get().value()));
483
}
484
}
485
486
if (entries.IsEmpty()) {
487
return Ok();
488
}
489
490
entries.Sort(StartupCacheEntry::Comparator());
491
loader::OutputBuffer buf;
492
for (auto& e : entries) {
493
auto key = e.first;
494
auto value = e.second;
495
auto uncompressedSize = value->mUncompressedSize;
496
// Set the mHeaderOffsetInFile so we can go back and edit the offset.
497
value->mHeaderOffsetInFile = buf.cursor();
498
// Write a 0 offset/compressed size as a placeholder until we get the real
499
// offset after compressing.
500
buf.codeUint32(0);
501
buf.codeUint32(0);
502
buf.codeUint32(uncompressedSize);
503
buf.codeString(*key);
504
}
505
506
uint8_t headerSize[4];
507
LittleEndian::writeUint32(headerSize, buf.cursor());
508
509
MOZ_TRY(Write(fd, MAGIC, sizeof(MAGIC)));
510
MOZ_TRY(Write(fd, headerSize, sizeof(headerSize)));
511
size_t headerStart = sizeof(MAGIC) + sizeof(headerSize);
512
size_t dataStart = headerStart + buf.cursor();
513
MOZ_TRY(Seek(fd, dataStart));
514
515
size_t offset = 0;
516
517
const size_t chunkSize = 1024 * 16;
518
LZ4FrameCompressionContext ctx(6, /* aCompressionLevel */
519
chunkSize, /* aReadBufLen */
520
true, /* aChecksum */
521
true); /* aStableSrc */
522
size_t writeBufLen = ctx.GetRequiredWriteBufferLength();
523
auto writeBuffer = MakeUnique<char[]>(writeBufLen);
524
auto writeSpan = MakeSpan(writeBuffer.get(), writeBufLen);
525
526
for (auto& e : entries) {
527
auto value = e.second;
528
value->mOffset = offset;
529
Span<const char> result;
530
MOZ_TRY_VAR(result,
531
ctx.BeginCompressing(writeSpan).mapErr(MapLZ4ErrorToNsresult));
532
MOZ_TRY(Write(fd, result.Elements(), result.Length()));
533
offset += result.Length();
534
535
for (size_t i = 0; i < value->mUncompressedSize; i += chunkSize) {
536
size_t size = std::min(chunkSize, value->mUncompressedSize - i);
537
char* uncompressed = value->mData.get() + i;
538
MOZ_TRY_VAR(result, ctx.ContinueCompressing(MakeSpan(uncompressed, size))
539
.mapErr(MapLZ4ErrorToNsresult));
540
MOZ_TRY(Write(fd, result.Elements(), result.Length()));
541
offset += result.Length();
542
}
543
544
MOZ_TRY_VAR(result, ctx.EndCompressing().mapErr(MapLZ4ErrorToNsresult));
545
MOZ_TRY(Write(fd, result.Elements(), result.Length()));
546
offset += result.Length();
547
value->mCompressedSize = offset - value->mOffset;
548
MOZ_TRY(Seek(fd, dataStart + offset));
549
}
550
551
for (auto& e : entries) {
552
auto value = e.second;
553
uint8_t* headerEntry = buf.Get() + value->mHeaderOffsetInFile;
554
LittleEndian::writeUint32(headerEntry, value->mOffset);
555
LittleEndian::writeUint32(headerEntry + sizeof(value->mOffset),
556
value->mCompressedSize);
557
}
558
MOZ_TRY(Seek(fd, headerStart));
559
MOZ_TRY(Write(fd, buf.Get(), buf.cursor()));
560
561
mDirty = false;
562
mWrittenOnce = true;
563
564
return Ok();
565
}
566
567
void StartupCache::InvalidateCache(bool memoryOnly) {
568
WaitOnWriteThread();
569
mWrittenOnce = false;
570
if (memoryOnly) {
571
auto writeResult = WriteToDisk();
572
if (NS_WARN_IF(writeResult.isErr())) {
573
gIgnoreDiskCache = true;
574
return;
575
}
576
}
577
if (mCurTableReferenced) {
578
// There should be no way for this assert to fail other than a user manually
579
// sending startupcache-invalidate messages through the Browser Toolbox.
580
MOZ_DIAGNOSTIC_ASSERT(xpc::IsInAutomation() || mOldTables.Length() < 10,
581
"Startup cache invalidated too many times.");
582
mOldTables.AppendElement(std::move(mTable));
583
mCurTableReferenced = false;
584
} else {
585
mTable.clear();
586
}
587
mRequestedCount = 0;
588
if (!memoryOnly) {
589
mCacheData.reset();
590
nsresult rv = mFile->Remove(false);
591
if (NS_FAILED(rv) && rv != NS_ERROR_FILE_TARGET_DOES_NOT_EXIST &&
592
rv != NS_ERROR_FILE_NOT_FOUND) {
593
gIgnoreDiskCache = true;
594
return;
595
}
596
}
597
gIgnoreDiskCache = false;
598
auto result = LoadArchive();
599
if (NS_WARN_IF(result.isErr())) {
600
gIgnoreDiskCache = true;
601
}
602
}
603
604
void StartupCache::MaybeInitShutdownWrite() {
605
if (mTimer) {
606
mTimer->Cancel();
607
}
608
gShutdownInitiated = true;
609
610
MaybeSpawnWriteThread();
611
}
612
613
void StartupCache::IgnoreDiskCache() {
614
gIgnoreDiskCache = true;
615
if (gStartupCache) gStartupCache->InvalidateCache();
616
}
617
618
/*
619
* WaitOnWriteThread() is called from a main thread to wait for the worker
620
* thread to finish. However since the same code is used in the worker thread
621
* and main thread, the worker thread can also call WaitOnWriteThread() which is
622
* a no-op.
623
*/
624
void StartupCache::WaitOnWriteThread() {
625
NS_ASSERTION(NS_IsMainThread(),
626
"Startup cache should only wait for io thread on main thread");
627
if (!mWriteThread || mWriteThread == PR_GetCurrentThread()) return;
628
629
PR_JoinThread(mWriteThread);
630
mWriteThread = nullptr;
631
}
632
633
void StartupCache::WaitOnPrefetchThread() {
634
if (!mPrefetchThread || mPrefetchThread == PR_GetCurrentThread()) return;
635
636
PR_JoinThread(mPrefetchThread);
637
mPrefetchThread = nullptr;
638
}
639
640
void StartupCache::ThreadedPrefetch(void* aClosure) {
641
AUTO_PROFILER_REGISTER_THREAD("StartupCache");
642
NS_SetCurrentThreadName("StartupCache");
643
mozilla::IOInterposer::RegisterCurrentThread();
644
StartupCache* startupCacheObj = static_cast<StartupCache*>(aClosure);
645
PrefetchMemory(startupCacheObj->mCacheData.get<uint8_t>().get(),
646
startupCacheObj->mCacheData.size());
647
mozilla::IOInterposer::UnregisterCurrentThread();
648
}
649
650
void StartupCache::ThreadedWrite(void* aClosure) {
651
AUTO_PROFILER_REGISTER_THREAD("StartupCache");
652
NS_SetCurrentThreadName("StartupCache");
653
mozilla::IOInterposer::RegisterCurrentThread();
654
/*
655
* It is safe to use the pointer passed in aClosure to reference the
656
* StartupCache object because the thread's lifetime is tightly coupled to
657
* the lifetime of the StartupCache object; this thread is joined in the
658
* StartupCache destructor, guaranteeing that this function runs if and only
659
* if the StartupCache object is valid.
660
*/
661
StartupCache* startupCacheObj = static_cast<StartupCache*>(aClosure);
662
auto result = startupCacheObj->WriteToDisk();
663
Unused << NS_WARN_IF(result.isErr());
664
mozilla::IOInterposer::UnregisterCurrentThread();
665
}
666
667
bool StartupCache::ShouldCompactCache() {
668
// If we've requested less than 4/5 of the startup cache, then we should
669
// probably compact it down. This can happen quite easily after the first run,
670
// which seems to request quite a few more things than subsequent runs.
671
CheckedInt<uint32_t> threshold = CheckedInt<uint32_t>(mTable.count()) * 4 / 5;
672
MOZ_RELEASE_ASSERT(threshold.isValid(), "Runaway StartupCache size");
673
return mRequestedCount < threshold.value();
674
}
675
676
/*
677
* The write-thread is spawned on a timeout(which is reset with every write).
678
* This can avoid a slow shutdown. After writing out the cache, the zipreader is
679
* reloaded on the worker thread.
680
*/
681
void StartupCache::WriteTimeout(nsITimer* aTimer, void* aClosure) {
682
/*
683
* It is safe to use the pointer passed in aClosure to reference the
684
* StartupCache object because the timer's lifetime is tightly coupled to
685
* the lifetime of the StartupCache object; this timer is canceled in the
686
* StartupCache destructor, guaranteeing that this function runs if and only
687
* if the StartupCache object is valid.
688
*/
689
StartupCache* startupCacheObj = static_cast<StartupCache*>(aClosure);
690
startupCacheObj->MaybeSpawnWriteThread();
691
}
692
693
/*
694
* See StartupCache::WriteTimeout above - this is just the non-static body.
695
*/
696
void StartupCache::MaybeSpawnWriteThread() {
697
if (mWriteThread || mWrittenOnce) {
698
return;
699
}
700
701
if (mCacheData.initialized() && !ShouldCompactCache()) {
702
return;
703
}
704
705
WaitOnPrefetchThread();
706
mStartupWriteInitiated = false;
707
mDirty = true;
708
mCacheData.reset();
709
mWriteThread = PR_CreateThread(PR_USER_THREAD, StartupCache::ThreadedWrite,
710
this, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
711
PR_JOINABLE_THREAD, 512 * 1024);
712
}
713
714
// We don't want to refcount StartupCache, so we'll just
715
// hold a ref to this and pass it to observerService instead.
716
NS_IMPL_ISUPPORTS(StartupCacheListener, nsIObserver)
717
718
nsresult StartupCacheListener::Observe(nsISupports* subject, const char* topic,
719
const char16_t* data) {
720
StartupCache* sc = StartupCache::GetSingleton();
721
if (!sc) return NS_OK;
722
723
if (strcmp(topic, NS_XPCOM_SHUTDOWN_OBSERVER_ID) == 0) {
724
// Do not leave the thread running past xpcom shutdown
725
sc->WaitOnWriteThread();
726
sc->WaitOnPrefetchThread();
727
StartupCache::gShutdownInitiated = true;
728
} else if (strcmp(topic, "startupcache-invalidate") == 0) {
729
sc->InvalidateCache(data && nsCRT::strcmp(data, u"memoryOnly") == 0);
730
}
731
return NS_OK;
732
}
733
734
nsresult StartupCache::GetDebugObjectOutputStream(
735
nsIObjectOutputStream* aStream, nsIObjectOutputStream** aOutStream) {
736
NS_ENSURE_ARG_POINTER(aStream);
737
#ifdef DEBUG
738
auto* stream = new StartupCacheDebugOutputStream(aStream, &mWriteObjectMap);
739
NS_ADDREF(*aOutStream = stream);
740
#else
741
NS_ADDREF(*aOutStream = aStream);
742
#endif
743
744
return NS_OK;
745
}
746
747
nsresult StartupCache::ResetStartupWriteTimerCheckingReadCount() {
748
nsresult rv = NS_OK;
749
if (!mTimer)
750
mTimer = NS_NewTimer();
751
else
752
rv = mTimer->Cancel();
753
NS_ENSURE_SUCCESS(rv, rv);
754
// Wait for 10 seconds, then write out the cache.
755
mTimer->InitWithNamedFuncCallback(StartupCache::WriteTimeout, this, 60000,
756
nsITimer::TYPE_ONE_SHOT,
757
"StartupCache::WriteTimeout");
758
return NS_OK;
759
}
760
761
nsresult StartupCache::ResetStartupWriteTimer() {
762
mStartupWriteInitiated = false;
763
mDirty = true;
764
nsresult rv = NS_OK;
765
if (!mTimer)
766
mTimer = NS_NewTimer();
767
else
768
rv = mTimer->Cancel();
769
NS_ENSURE_SUCCESS(rv, rv);
770
// Wait for 10 seconds, then write out the cache.
771
mTimer->InitWithNamedFuncCallback(StartupCache::WriteTimeout, this, 60000,
772
nsITimer::TYPE_ONE_SHOT,
773
"StartupCache::WriteTimeout");
774
return NS_OK;
775
}
776
777
bool StartupCache::StartupWriteComplete() {
778
WaitOnWriteThread();
779
return mStartupWriteInitiated && !mDirty;
780
}
781
782
// StartupCacheDebugOutputStream implementation
783
#ifdef DEBUG
784
NS_IMPL_ISUPPORTS(StartupCacheDebugOutputStream, nsIObjectOutputStream,
785
nsIBinaryOutputStream, nsIOutputStream)
786
787
bool StartupCacheDebugOutputStream::CheckReferences(nsISupports* aObject) {
788
nsresult rv;
789
790
nsCOMPtr<nsIClassInfo> classInfo = do_QueryInterface(aObject);
791
if (!classInfo) {
792
NS_ERROR("aObject must implement nsIClassInfo");
793
return false;
794
}
795
796
uint32_t flags;
797
rv = classInfo->GetFlags(&flags);
798
NS_ENSURE_SUCCESS(rv, false);
799
if (flags & nsIClassInfo::SINGLETON) return true;
800
801
nsISupportsHashKey* key = mObjectMap->GetEntry(aObject);
802
if (key) {
803
NS_ERROR(
804
"non-singleton aObject is referenced multiple times in this"
805
"serialization, we don't support that.");
806
return false;
807
}
808
809
mObjectMap->PutEntry(aObject);
810
return true;
811
}
812
813
// nsIObjectOutputStream implementation
814
nsresult StartupCacheDebugOutputStream::WriteObject(nsISupports* aObject,
815
bool aIsStrongRef) {
816
nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject));
817
818
NS_ASSERTION(rootObject.get() == aObject,
819
"bad call to WriteObject -- call WriteCompoundObject!");
820
bool check = CheckReferences(aObject);
821
NS_ENSURE_TRUE(check, NS_ERROR_FAILURE);
822
return mBinaryStream->WriteObject(aObject, aIsStrongRef);
823
}
824
825
nsresult StartupCacheDebugOutputStream::WriteSingleRefObject(
826
nsISupports* aObject) {
827
nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject));
828
829
NS_ASSERTION(rootObject.get() == aObject,
830
"bad call to WriteSingleRefObject -- call WriteCompoundObject!");
831
bool check = CheckReferences(aObject);
832
NS_ENSURE_TRUE(check, NS_ERROR_FAILURE);
833
return mBinaryStream->WriteSingleRefObject(aObject);
834
}
835
836
nsresult StartupCacheDebugOutputStream::WriteCompoundObject(
837
nsISupports* aObject, const nsIID& aIID, bool aIsStrongRef) {
838
nsCOMPtr<nsISupports> rootObject(do_QueryInterface(aObject));
839
840
nsCOMPtr<nsISupports> roundtrip;
841
rootObject->QueryInterface(aIID, getter_AddRefs(roundtrip));
842
NS_ASSERTION(roundtrip.get() == aObject,
843
"bad aggregation or multiple inheritance detected by call to "
844
"WriteCompoundObject!");
845
846
bool check = CheckReferences(aObject);
847
NS_ENSURE_TRUE(check, NS_ERROR_FAILURE);
848
return mBinaryStream->WriteCompoundObject(aObject, aIID, aIsStrongRef);
849
}
850
851
nsresult StartupCacheDebugOutputStream::WriteID(nsID const& aID) {
852
return mBinaryStream->WriteID(aID);
853
}
854
855
char* StartupCacheDebugOutputStream::GetBuffer(uint32_t aLength,
856
uint32_t aAlignMask) {
857
return mBinaryStream->GetBuffer(aLength, aAlignMask);
858
}
859
860
void StartupCacheDebugOutputStream::PutBuffer(char* aBuffer, uint32_t aLength) {
861
mBinaryStream->PutBuffer(aBuffer, aLength);
862
}
863
#endif // DEBUG
864
865
} // namespace scache
866
} // namespace mozilla