Source code
Revision control
Copy as Markdown
Other Tools
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include <algorithm>
#include <math.h>
#include "CacheEntry.h"
#include "CacheFileUtils.h"
#include "CacheIndex.h"
#include "CacheLog.h"
#include "CacheObserver.h"
#include "CacheStorageService.h"
#include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/Telemetry.h"
#include "mozilla/psm/TransportSecurityInfo.h"
#include "nsComponentManagerUtils.h"
#include "nsIAsyncOutputStream.h"
#include "nsICacheEntryOpenCallback.h"
#include "nsICacheStorage.h"
#include "nsIInputStream.h"
#include "nsIOutputStream.h"
#include "nsISeekableStream.h"
#include "nsISizeOf.h"
#include "nsIURI.h"
#include "nsNetCID.h"
#include "nsProxyRelease.h"
#include "nsServiceManagerUtils.h"
#include "nsString.h"
#include "nsThreadUtils.h"
namespace mozilla::net {
static uint32_t const ENTRY_WANTED = nsICacheEntryOpenCallback::ENTRY_WANTED;
static uint32_t const RECHECK_AFTER_WRITE_FINISHED =
nsICacheEntryOpenCallback::RECHECK_AFTER_WRITE_FINISHED;
static uint32_t const ENTRY_NEEDS_REVALIDATION =
nsICacheEntryOpenCallback::ENTRY_NEEDS_REVALIDATION;
static uint32_t const ENTRY_NOT_WANTED =
nsICacheEntryOpenCallback::ENTRY_NOT_WANTED;
NS_IMPL_ISUPPORTS(CacheEntryHandle, nsICacheEntry)
// CacheEntryHandle
CacheEntryHandle::CacheEntryHandle(CacheEntry* aEntry) : mEntry(aEntry) {
#ifdef DEBUG
if (!mEntry->HandlesCount()) {
// CacheEntry.mHandlesCount must go from zero to one only under
// the service lock. Can access CacheStorageService::Self() w/o a check
// since CacheEntry hrefs it.
CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
}
#endif
mEntry->AddHandleRef();
LOG(("New CacheEntryHandle %p for entry %p", this, aEntry));
}
NS_IMETHODIMP CacheEntryHandle::Dismiss() {
LOG(("CacheEntryHandle::Dismiss %p", this));
if (mClosed.compareExchange(false, true)) {
mEntry->OnHandleClosed(this);
return NS_OK;
}
LOG((" already dropped"));
return NS_ERROR_UNEXPECTED;
}
CacheEntryHandle::~CacheEntryHandle() {
mEntry->ReleaseHandleRef();
Dismiss();
LOG(("CacheEntryHandle::~CacheEntryHandle %p", this));
}
// CacheEntry::Callback
CacheEntry::Callback::Callback(CacheEntry* aEntry,
nsICacheEntryOpenCallback* aCallback,
bool aReadOnly, bool aCheckOnAnyThread,
bool aSecret)
: mEntry(aEntry),
mCallback(aCallback),
mTarget(GetCurrentSerialEventTarget()),
mReadOnly(aReadOnly),
mRevalidating(false),
mCheckOnAnyThread(aCheckOnAnyThread),
mRecheckAfterWrite(false),
mNotWanted(false),
mSecret(aSecret),
mDoomWhenFoundPinned(false),
mDoomWhenFoundNonPinned(false) {
MOZ_COUNT_CTOR(CacheEntry::Callback);
// The counter may go from zero to non-null only under the service lock
// but here we expect it to be already positive.
MOZ_ASSERT(mEntry->HandlesCount());
mEntry->AddHandleRef();
}
CacheEntry::Callback::Callback(CacheEntry* aEntry,
bool aDoomWhenFoundInPinStatus)
: mEntry(aEntry),
mReadOnly(false),
mRevalidating(false),
mCheckOnAnyThread(true),
mRecheckAfterWrite(false),
mNotWanted(false),
mSecret(false),
mDoomWhenFoundPinned(aDoomWhenFoundInPinStatus),
mDoomWhenFoundNonPinned(!aDoomWhenFoundInPinStatus) {
MOZ_COUNT_CTOR(CacheEntry::Callback);
MOZ_ASSERT(mEntry->HandlesCount());
mEntry->AddHandleRef();
}
CacheEntry::Callback::Callback(CacheEntry::Callback const& aThat)
: mEntry(aThat.mEntry),
mCallback(aThat.mCallback),
mTarget(aThat.mTarget),
mReadOnly(aThat.mReadOnly),
mRevalidating(aThat.mRevalidating),
mCheckOnAnyThread(aThat.mCheckOnAnyThread),
mRecheckAfterWrite(aThat.mRecheckAfterWrite),
mNotWanted(aThat.mNotWanted),
mSecret(aThat.mSecret),
mDoomWhenFoundPinned(aThat.mDoomWhenFoundPinned),
mDoomWhenFoundNonPinned(aThat.mDoomWhenFoundNonPinned) {
MOZ_COUNT_CTOR(CacheEntry::Callback);
// The counter may go from zero to non-null only under the service lock
// but here we expect it to be already positive.
MOZ_ASSERT(mEntry->HandlesCount());
mEntry->AddHandleRef();
}
CacheEntry::Callback::~Callback() {
ProxyRelease("CacheEntry::Callback::mCallback", mCallback, mTarget);
mEntry->ReleaseHandleRef();
MOZ_COUNT_DTOR(CacheEntry::Callback);
}
// We have locks on both this and aEntry
void CacheEntry::Callback::ExchangeEntry(CacheEntry* aEntry) {
aEntry->mLock.AssertCurrentThreadOwns();
mEntry->mLock.AssertCurrentThreadOwns();
if (mEntry == aEntry) return;
// The counter may go from zero to non-null only under the service lock
// but here we expect it to be already positive.
MOZ_ASSERT(aEntry->HandlesCount());
aEntry->AddHandleRef();
mEntry->ReleaseHandleRef();
mEntry = aEntry;
}
// This is called on entries in another entry's mCallback array, under the lock
// of that other entry. No other threads can access this entry at this time.
bool CacheEntry::Callback::DeferDoom(bool* aDoom) const
MOZ_NO_THREAD_SAFETY_ANALYSIS {
MOZ_ASSERT(mEntry->mPinningKnown);
if (MOZ_UNLIKELY(mDoomWhenFoundNonPinned) ||
MOZ_UNLIKELY(mDoomWhenFoundPinned)) {
*aDoom =
(MOZ_UNLIKELY(mDoomWhenFoundNonPinned) &&
MOZ_LIKELY(!mEntry->mPinned)) ||
(MOZ_UNLIKELY(mDoomWhenFoundPinned) && MOZ_UNLIKELY(mEntry->mPinned));
return true;
}
return false;
}
nsresult CacheEntry::Callback::OnCheckThread(bool* aOnCheckThread) const {
if (!mCheckOnAnyThread) {
// Check we are on the target
return mTarget->IsOnCurrentThread(aOnCheckThread);
}
// We can invoke check anywhere
*aOnCheckThread = true;
return NS_OK;
}
nsresult CacheEntry::Callback::OnAvailThread(bool* aOnAvailThread) const {
return mTarget->IsOnCurrentThread(aOnAvailThread);
}
// CacheEntry
NS_IMPL_ISUPPORTS(CacheEntry, nsIRunnable, CacheFileListener)
/* static */
uint64_t CacheEntry::GetNextId() {
static Atomic<uint64_t, Relaxed> id(0);
return ++id;
}
CacheEntry::CacheEntry(const nsACString& aStorageID, const nsACString& aURI,
const nsACString& aEnhanceID, bool aUseDisk,
bool aSkipSizeCheck, bool aPin)
: mURI(aURI),
mEnhanceID(aEnhanceID),
mStorageID(aStorageID),
mUseDisk(aUseDisk),
mSkipSizeCheck(aSkipSizeCheck),
mPinned(aPin),
mSecurityInfoLoaded(false),
mPreventCallbacks(false),
mHasData(false),
mPinningKnown(false),
mCacheEntryId(GetNextId()) {
LOG(("CacheEntry::CacheEntry [this=%p]", this));
mService = CacheStorageService::Self();
CacheStorageService::Self()->RecordMemoryOnlyEntry(this, !aUseDisk,
true /* overwrite */);
}
CacheEntry::~CacheEntry() { LOG(("CacheEntry::~CacheEntry [this=%p]", this)); }
char const* CacheEntry::StateString(uint32_t aState) {
switch (aState) {
case NOTLOADED:
return "NOTLOADED";
case LOADING:
return "LOADING";
case EMPTY:
return "EMPTY";
case WRITING:
return "WRITING";
case READY:
return "READY";
case REVALIDATING:
return "REVALIDATING";
}
return "?";
}
nsresult CacheEntry::HashingKeyWithStorage(nsACString& aResult) const {
return HashingKey(mStorageID, mEnhanceID, mURI, aResult);
}
nsresult CacheEntry::HashingKey(nsACString& aResult) const {
return HashingKey(""_ns, mEnhanceID, mURI, aResult);
}
// static
nsresult CacheEntry::HashingKey(const nsACString& aStorageID,
const nsACString& aEnhanceID, nsIURI* aURI,
nsACString& aResult) {
nsAutoCString spec;
nsresult rv = aURI->GetAsciiSpec(spec);
NS_ENSURE_SUCCESS(rv, rv);
return HashingKey(aStorageID, aEnhanceID, spec, aResult);
}
// static
nsresult CacheEntry::HashingKey(const nsACString& aStorageID,
const nsACString& aEnhanceID,
const nsACString& aURISpec,
nsACString& aResult) {
/**
* This key is used to salt hash that is a base for disk file name.
* Changing it will cause we will not be able to find files on disk.
*/
aResult.Assign(aStorageID);
if (!aEnhanceID.IsEmpty()) {
CacheFileUtils::AppendTagWithValue(aResult, '~', aEnhanceID);
}
// Appending directly
aResult.Append(':');
aResult.Append(aURISpec);
return NS_OK;
}
void CacheEntry::AsyncOpen(nsICacheEntryOpenCallback* aCallback,
uint32_t aFlags) {
bool readonly = aFlags & nsICacheStorage::OPEN_READONLY;
bool bypassIfBusy = aFlags & nsICacheStorage::OPEN_BYPASS_IF_BUSY;
bool truncate = aFlags & nsICacheStorage::OPEN_TRUNCATE;
bool priority = aFlags & nsICacheStorage::OPEN_PRIORITY;
bool multithread = aFlags & nsICacheStorage::CHECK_MULTITHREADED;
bool secret = aFlags & nsICacheStorage::OPEN_SECRETLY;
if (MOZ_LOG_TEST(gCache2Log, LogLevel::Debug)) {
MutexAutoLock lock(mLock);
LOG(("CacheEntry::AsyncOpen [this=%p, state=%s, flags=%d, callback=%p]",
this, StateString(mState), aFlags, aCallback));
}
#ifdef DEBUG
{
// yes, if logging is on in DEBUG we'll take the lock twice in a row
MutexAutoLock lock(mLock);
MOZ_ASSERT(!readonly || !truncate, "Bad flags combination");
MOZ_ASSERT(!(truncate && mState > LOADING),
"Must not call truncate on already loaded entry");
}
#endif
Callback callback(this, aCallback, readonly, multithread, secret);
if (!Open(callback, truncate, priority, bypassIfBusy)) {
// We get here when the callback wants to bypass cache when it's busy.
LOG((" writing or revalidating, callback wants to bypass cache"));
callback.mNotWanted = true;
InvokeAvailableCallback(callback);
}
}
bool CacheEntry::Open(Callback& aCallback, bool aTruncate, bool aPriority,
bool aBypassIfBusy) {
mozilla::MutexAutoLock lock(mLock);
// Check state under the lock
if (aBypassIfBusy && (mState == WRITING || mState == REVALIDATING)) {
return false;
}
RememberCallback(aCallback);
// Load() opens the lock
if (Load(aTruncate, aPriority)) {
// Loading is in progress...
return true;
}
InvokeCallbacks();
return true;
}
bool CacheEntry::Load(bool aTruncate, bool aPriority) MOZ_REQUIRES(mLock) {
LOG(("CacheEntry::Load [this=%p, trunc=%d]", this, aTruncate));
mLock.AssertCurrentThreadOwns();
if (mState > LOADING) {
LOG((" already loaded"));
return false;
}
if (mState == LOADING) {
LOG((" already loading"));
return true;
}
mState = LOADING;
MOZ_ASSERT(!mFile);
nsresult rv;
nsAutoCString fileKey;
rv = HashingKeyWithStorage(fileKey);
bool reportMiss = false;
// Check the index under two conditions for two states and take appropriate
// action:
// 1. When this is a disk entry and not told to truncate, check there is a
// disk file.
// If not, set the 'truncate' flag to true so that this entry will open
// instantly as a new one.
// 2. When this is a memory-only entry, check there is a disk file.
// If there is or could be, doom that file.
if ((!aTruncate || !mUseDisk) && NS_SUCCEEDED(rv)) {
// Check the index right now to know we have or have not the entry
// as soon as possible.
CacheIndex::EntryStatus status;
if (NS_SUCCEEDED(CacheIndex::HasEntry(fileKey, &status))) {
switch (status) {
case CacheIndex::DOES_NOT_EXIST:
// Doesn't apply to memory-only entries, Load() is called only once
// for them and never again for their session lifetime.
if (!aTruncate && mUseDisk) {
LOG(
(" entry doesn't exist according information from the index, "
"truncating"));
reportMiss = true;
aTruncate = true;
}
break;
case CacheIndex::EXISTS:
case CacheIndex::DO_NOT_KNOW:
if (!mUseDisk) {
LOG(
(" entry open as memory-only, but there is a file, status=%d, "
"dooming it",
status));
CacheFileIOManager::DoomFileByKey(fileKey, nullptr);
}
break;
}
}
}
mFile = new CacheFile();
BackgroundOp(Ops::REGISTER);
bool directLoad = aTruncate || !mUseDisk;
if (directLoad) {
// mLoadStart will be used to calculate telemetry of life-time of this
// entry. Low resulution is then enough.
mLoadStart = TimeStamp::NowLoRes();
mPinningKnown = true;
} else {
mLoadStart = TimeStamp::Now();
}
{
mozilla::MutexAutoUnlock unlock(mLock);
if (reportMiss) {
CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
CacheFileUtils::DetailedCacheHitTelemetry::MISS, mLoadStart);
}
LOG((" performing load, file=%p", mFile.get()));
if (NS_SUCCEEDED(rv)) {
rv = mFile->Init(fileKey, aTruncate, !mUseDisk, mSkipSizeCheck, aPriority,
mPinned, directLoad ? nullptr : this);
}
if (NS_FAILED(rv)) {
mFileStatus = rv;
AsyncDoom(nullptr);
return false;
}
}
if (directLoad) {
// Just fake the load has already been done as "new".
mFileStatus = NS_OK;
mState = EMPTY;
}
return mState == LOADING;
}
NS_IMETHODIMP CacheEntry::OnFileReady(nsresult aResult, bool aIsNew) {
LOG(("CacheEntry::OnFileReady [this=%p, rv=0x%08" PRIx32 ", new=%d]", this,
static_cast<uint32_t>(aResult), aIsNew));
MOZ_ASSERT(!mLoadStart.IsNull());
if (NS_SUCCEEDED(aResult)) {
if (aIsNew) {
CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
CacheFileUtils::DetailedCacheHitTelemetry::MISS, mLoadStart);
} else {
CacheFileUtils::DetailedCacheHitTelemetry::AddRecord(
CacheFileUtils::DetailedCacheHitTelemetry::HIT, mLoadStart);
}
}
// OnFileReady, that is the only code that can transit from LOADING
// to any follow-on state and can only be invoked ones on an entry.
// Until this moment there is no consumer that could manipulate
// the entry state.
mozilla::MutexAutoLock lock(mLock);
MOZ_ASSERT(mState == LOADING);
mState = (aIsNew || NS_FAILED(aResult)) ? EMPTY : READY;
mFileStatus = aResult;
mPinned = mFile->IsPinned();
mPinningKnown = true;
LOG((" pinning=%d", (bool)mPinned));
if (mState == READY) {
mHasData = true;
uint32_t frecency;
mFile->GetFrecency(&frecency);
// mFrecency is held in a double to increase computance precision.
// It is ok to persist frecency only as a uint32 with some math involved.
mFrecency = INT2FRECENCY(frecency);
}
InvokeCallbacks();
return NS_OK;
}
NS_IMETHODIMP CacheEntry::OnFileDoomed(nsresult aResult) {
if (mDoomCallback) {
RefPtr<DoomCallbackRunnable> event =
new DoomCallbackRunnable(this, aResult);
NS_DispatchToMainThread(event);
}
return NS_OK;
}
already_AddRefed<CacheEntryHandle> CacheEntry::ReopenTruncated(
bool aMemoryOnly, nsICacheEntryOpenCallback* aCallback)
MOZ_REQUIRES(mLock) {
LOG(("CacheEntry::ReopenTruncated [this=%p]", this));
mLock.AssertCurrentThreadOwns();
// Hold callbacks invocation, AddStorageEntry would invoke from doom
// prematurly
mPreventCallbacks = true;
RefPtr<CacheEntryHandle> handle;
RefPtr<CacheEntry> newEntry;
{
if (mPinned) {
MOZ_ASSERT(mUseDisk);
// We want to pin even no-store entries (the case we recreate a disk entry
// as a memory-only entry.)
aMemoryOnly = false;
}
mozilla::MutexAutoUnlock unlock(mLock);
// The following call dooms this entry (calls DoomAlreadyRemoved on us)
nsresult rv = CacheStorageService::Self()->AddStorageEntry(
GetStorageID(), GetURI(), GetEnhanceID(), mUseDisk && !aMemoryOnly,
mSkipSizeCheck, mPinned,
nsICacheStorage::OPEN_TRUNCATE, // truncate existing (this one)
getter_AddRefs(handle));
if (NS_SUCCEEDED(rv)) {
newEntry = handle->Entry();
LOG((" exchanged entry %p by entry %p, rv=0x%08" PRIx32, this,
newEntry.get(), static_cast<uint32_t>(rv)));
newEntry->AsyncOpen(aCallback, nsICacheStorage::OPEN_TRUNCATE);
} else {
LOG((" exchanged of entry %p failed, rv=0x%08" PRIx32, this,
static_cast<uint32_t>(rv)));
AsyncDoom(nullptr);
}
}
mPreventCallbacks = false;
if (!newEntry) return nullptr;
newEntry->TransferCallbacks(*this);
mCallbacks.Clear();
// Must return a new write handle, since the consumer is expected to
// write to this newly recreated entry. The |handle| is only a common
// reference counter and doesn't revert entry state back when write
// fails and also doesn't update the entry frecency. Not updating
// frecency causes entries to not be purged from our memory pools.
RefPtr<CacheEntryHandle> writeHandle = newEntry->NewWriteHandle();
return writeHandle.forget();
}
void CacheEntry::TransferCallbacks(CacheEntry& aFromEntry) {
mozilla::MutexAutoLock lock(mLock);
aFromEntry.mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::TransferCallbacks [entry=%p, from=%p]", this, &aFromEntry));
if (!mCallbacks.Length()) {
mCallbacks.SwapElements(aFromEntry.mCallbacks);
} else {
mCallbacks.AppendElements(aFromEntry.mCallbacks);
}
uint32_t callbacksLength = mCallbacks.Length();
if (callbacksLength) {
// Carry the entry reference (unfortunately, needs to be done manually...)
for (uint32_t i = 0; i < callbacksLength; ++i) {
mCallbacks[i].ExchangeEntry(this);
}
BackgroundOp(Ops::CALLBACKS, true);
}
}
void CacheEntry::RememberCallback(Callback& aCallback) {
mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::RememberCallback [this=%p, cb=%p, state=%s]", this,
aCallback.mCallback.get(), StateString(mState)));
mCallbacks.AppendElement(aCallback);
}
void CacheEntry::InvokeCallbacksLock() {
mozilla::MutexAutoLock lock(mLock);
InvokeCallbacks();
}
void CacheEntry::InvokeCallbacks() {
mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::InvokeCallbacks BEGIN [this=%p]", this));
// Invoke first all r/w callbacks, then all r/o callbacks.
if (InvokeCallbacks(false)) InvokeCallbacks(true);
LOG(("CacheEntry::InvokeCallbacks END [this=%p]", this));
}
bool CacheEntry::InvokeCallbacks(bool aReadOnly) MOZ_REQUIRES(mLock) {
mLock.AssertCurrentThreadOwns();
RefPtr<CacheEntryHandle> recreatedHandle;
uint32_t i = 0;
while (i < mCallbacks.Length()) {
if (mPreventCallbacks) {
LOG((" callbacks prevented!"));
return false;
}
if (!mIsDoomed && (mState == WRITING || mState == REVALIDATING)) {
LOG((" entry is being written/revalidated"));
return false;
}
bool recreate;
if (mCallbacks[i].DeferDoom(&recreate)) {
mCallbacks.RemoveElementAt(i);
if (!recreate) {
continue;
}
LOG((" defer doom marker callback hit positive, recreating"));
recreatedHandle = ReopenTruncated(!mUseDisk, nullptr);
break;
}
if (mCallbacks[i].mReadOnly != aReadOnly) {
// Callback is not r/w or r/o, go to another one in line
++i;
continue;
}
bool onCheckThread;
nsresult rv = mCallbacks[i].OnCheckThread(&onCheckThread);
if (NS_SUCCEEDED(rv) && !onCheckThread) {
// Redispatch to the target thread
rv = mCallbacks[i].mTarget->Dispatch(
NewRunnableMethod("net::CacheEntry::InvokeCallbacksLock", this,
&CacheEntry::InvokeCallbacksLock),
nsIEventTarget::DISPATCH_NORMAL);
if (NS_SUCCEEDED(rv)) {
LOG((" re-dispatching to target thread"));
return false;
}
}
Callback callback = mCallbacks[i];
mCallbacks.RemoveElementAt(i);
if (NS_SUCCEEDED(rv) && !InvokeCallback(callback)) {
// Callback didn't fire, put it back and go to another one in line.
// Only reason InvokeCallback returns false is that onCacheEntryCheck
// returns RECHECK_AFTER_WRITE_FINISHED. If we would stop the loop, other
// readers or potential writers would be unnecessarily kept from being
// invoked.
size_t pos = std::min(mCallbacks.Length(), static_cast<size_t>(i));
mCallbacks.InsertElementAt(pos, callback);
++i;
}
}
if (recreatedHandle) {
// Must be released outside of the lock, enters InvokeCallback on the new
// entry
mozilla::MutexAutoUnlock unlock(mLock);
recreatedHandle = nullptr;
}
return true;
}
bool CacheEntry::InvokeCallback(Callback& aCallback) MOZ_REQUIRES(mLock) {
mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::InvokeCallback [this=%p, state=%s, cb=%p]", this,
StateString(mState), aCallback.mCallback.get()));
// When this entry is doomed we want to notify the callback any time
if (!mIsDoomed) {
// When we are here, the entry must be loaded from disk
MOZ_ASSERT(mState > LOADING);
if (mState == WRITING || mState == REVALIDATING) {
// Prevent invoking other callbacks since one of them is now writing
// or revalidating this entry. No consumers should get this entry
// until metadata are filled with values downloaded from the server
// or the entry revalidated and output stream has been opened.
LOG((" entry is being written/revalidated, callback bypassed"));
return false;
}
// mRecheckAfterWrite flag already set means the callback has already passed
// the onCacheEntryCheck call. Until the current write is not finished this
// callback will be bypassed.
if (!aCallback.mRecheckAfterWrite) {
if (!aCallback.mReadOnly) {
if (mState == EMPTY) {
// Advance to writing state, we expect to invoke the callback and let
// it fill content of this entry. Must set and check the state here
// to prevent more then one
mState = WRITING;
LOG((" advancing to WRITING state"));
}
if (!aCallback.mCallback) {
// We can be given no callback only in case of recreate, it is ok
// to advance to WRITING state since the caller of recreate is
// expected to write this entry now.
return true;
}
}
if (mState == READY) {
// Metadata present, validate the entry
uint32_t checkResult;
{
// mayhemer: TODO check and solve any potential races of concurent
// OnCacheEntryCheck
mozilla::MutexAutoUnlock unlock(mLock);
RefPtr<CacheEntryHandle> handle = NewHandle();
nsresult rv =
aCallback.mCallback->OnCacheEntryCheck(handle, &checkResult);
LOG((" OnCacheEntryCheck: rv=0x%08" PRIx32 ", result=%" PRId32,
static_cast<uint32_t>(rv), static_cast<uint32_t>(checkResult)));
if (NS_FAILED(rv)) checkResult = ENTRY_NOT_WANTED;
}
aCallback.mRevalidating = checkResult == ENTRY_NEEDS_REVALIDATION;
switch (checkResult) {
case ENTRY_WANTED:
// Nothing more to do here, the consumer is responsible to handle
// the result of OnCacheEntryCheck it self.
// Proceed to callback...
break;
case RECHECK_AFTER_WRITE_FINISHED:
LOG(
(" consumer will check on the entry again after write is "
"done"));
// The consumer wants the entry to complete first.
aCallback.mRecheckAfterWrite = true;
break;
case ENTRY_NEEDS_REVALIDATION:
LOG((" will be holding callbacks until entry is revalidated"));
// State is READY now and from that state entry cannot transit to
// any other state then REVALIDATING for which cocurrency is not an
// issue. Potentially no need to lock here.
mState = REVALIDATING;
break;
case ENTRY_NOT_WANTED:
LOG((" consumer not interested in the entry"));
// Do not give this entry to the consumer, it is not interested in
// us.
aCallback.mNotWanted = true;
break;
}
}
}
}
if (aCallback.mCallback) {
if (!mIsDoomed && aCallback.mRecheckAfterWrite) {
// If we don't have data and the callback wants a complete entry,
// don't invoke now.
bool bypass = !mHasData;
if (!bypass && NS_SUCCEEDED(mFileStatus)) {
int64_t _unused;
bypass = !mFile->DataSize(&_unused);
}
if (bypass) {
LOG((" bypassing, entry data still being written"));
return false;
}
// Entry is complete now, do the check+avail call again
aCallback.mRecheckAfterWrite = false;
return InvokeCallback(aCallback);
}
mozilla::MutexAutoUnlock unlock(mLock);
InvokeAvailableCallback(aCallback);
}
return true;
}
void CacheEntry::InvokeAvailableCallback(Callback const& aCallback) {
nsresult rv;
uint32_t state;
{
mozilla::MutexAutoLock lock(mLock);
state = mState;
LOG(
("CacheEntry::InvokeAvailableCallback [this=%p, state=%s, cb=%p, "
"r/o=%d, "
"n/w=%d]",
this, StateString(mState), aCallback.mCallback.get(),
aCallback.mReadOnly, aCallback.mNotWanted));
// When we are here, the entry must be loaded from disk
MOZ_ASSERT(state > LOADING || mIsDoomed);
}
bool onAvailThread;
rv = aCallback.OnAvailThread(&onAvailThread);
if (NS_FAILED(rv)) {
LOG((" target thread dead?"));
return;
}
if (!onAvailThread) {
// Dispatch to the right thread
RefPtr<AvailableCallbackRunnable> event =
new AvailableCallbackRunnable(this, aCallback);
rv = aCallback.mTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
LOG((" redispatched, (rv = 0x%08" PRIx32 ")", static_cast<uint32_t>(rv)));
return;
}
if (mIsDoomed || aCallback.mNotWanted) {
LOG(
(" doomed or not wanted, notifying OCEA with "
"NS_ERROR_CACHE_KEY_NOT_FOUND"));
aCallback.mCallback->OnCacheEntryAvailable(nullptr, false,
NS_ERROR_CACHE_KEY_NOT_FOUND);
return;
}
if (state == READY) {
LOG((" ready/has-meta, notifying OCEA with entry and NS_OK"));