Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* This Source Code Form is subject to the terms of the Mozilla Public
3
* License, v. 2.0. If a copy of the MPL was not distributed with this
4
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6
#ifndef mozilla_layers_SurfacePoolCA_h
7
#define mozilla_layers_SurfacePoolCA_h
8
9
#include <IOSurface/IOSurface.h>
10
11
#include <deque>
12
#include <unordered_map>
13
14
#include "mozilla/Atomics.h"
15
#include "mozilla/DataMutex.h"
16
17
#include "mozilla/layers/SurfacePool.h"
18
#include "CFTypeRefPtr.h"
19
#include "nsISupportsImpl.h"
20
21
namespace mozilla {
22
23
namespace gl {
24
class MozFramebuffer;
25
} // namespace gl
26
27
namespace layers {
28
29
class SurfacePoolHandleCA;
30
struct SurfacePoolCAWrapperForGL;
31
32
// An implementation of SurfacePool for IOSurfaces and GL framebuffers.
33
// The goal of having this pool is to avoid creating and destroying IOSurfaces
34
// and framebuffers frequently, because doing so is expensive.
35
// SurfacePoolCA is threadsafe. All its data is wrapped inside LockedPool, and
36
// each access to LockedPool is guarded with a lock through DataMutex.
37
//
38
// The pool satisfies the following requirements:
39
// - It can be shared across windows, even across windows with different
40
// GLContexts.
41
// - The number of unused surfaces that are available for recycling is capped
42
// to a fixed value per pool, regardless of how many windows use that pool.
43
// - When all windows are closed (all handles are gone), no surfaces are kept
44
// alive (the pool is destroyed).
45
// - There is an explicit way of deleting GL resources for a GLContext so that
46
// it can happen at a deterministic time on the right thread.
47
// - Additionally, once a GLContext is no longer being used in any window
48
// (really: any pool handle), all surface-associated GL resources of that
49
// context are destroyed.
50
// - For every IOSurface, only one set of GL resources is in existence at any
51
// given time. We don't want there to be framebuffers in two different
52
// GLContexts for one surface.
53
// - We do not want to recycle an IOSurface that currently has GL resources of
54
// context A for a pool handle that uses context B.
55
// - We need to delay IOSurface recycling until the window server is done with
56
// the surface (`!IOSurfaceIsInUse(surf)`)
57
class SurfacePoolCA final : public SurfacePool {
58
public:
59
// Get a handle for a new window. aGL can be nullptr.
60
RefPtr<SurfacePoolHandle> GetHandleForGL(gl::GLContext* aGL) override;
61
62
// Destroy all GL resources associated with aGL managed by this pool.
63
void DestroyGLResourcesForContext(gl::GLContext* aGL) override;
64
65
private:
66
friend struct SurfacePoolCAWrapperForGL;
67
friend class SurfacePoolHandleCA;
68
friend RefPtr<SurfacePool> SurfacePool::Create(size_t aPoolSizeLimit);
69
70
explicit SurfacePoolCA(size_t aPoolSizeLimit);
71
~SurfacePoolCA() override;
72
73
// Get an existing surface of aSize from the pool or create a new surface.
74
// The returned surface is guaranteed not to be in use by the window server.
75
CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize,
76
gl::GLContext* aGL);
77
78
// Place a surface that was previously obtained from this pool back into the
79
// pool. aSurface may or may not be in use by the window server.
80
void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
81
82
// Re-run checks whether the window server still uses IOSurfaces which are
83
// eligible for recycling. The purpose of the "generation" counter is to
84
// reduce the number of calls to IOSurfaceIsInUse in a scenario where many
85
// windows / handles are calling CollectPendingSurfaces in the same frame
86
// (i.e. multiple simultaneously-animating windows).
87
uint64_t CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo);
88
89
// Enforce the pool size limit by evicting surfaces as necessary. This should
90
// happen at the end of the frame so that we can temporarily exceed the limit
91
// within a frame.
92
void EnforcePoolSizeLimit();
93
94
// Get or create the framebuffer for the given surface and GL context.
95
// The returned framebuffer handle will become invalid once
96
// DestroyGLResourcesForContext or DecrementGLContextHandleCount are called.
97
// The framebuffer's depth buffer (if present) may be shared between multiple
98
// framebuffers! Do not assume anything about the depth buffer's existing
99
// contents (i.e. clear it at the beginning of the draw), and do not
100
// interleave drawing commands to different framebuffers in such a way that
101
// the shared depth buffer could cause trouble.
102
Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
103
gl::GLContext* aGL,
104
bool aNeedsDepthBuffer);
105
106
// Called by the destructor of SurfacePoolCAWrapperForGL so that we can clear
107
// our weak reference to it and delete GL resources.
108
void OnWrapperDestroyed(gl::GLContext* aGL,
109
SurfacePoolCAWrapperForGL* aWrapper);
110
111
// The actual pool implementation lives in LockedPool, which is accessed in
112
// a thread-safe manner.
113
struct LockedPool {
114
explicit LockedPool(size_t aPoolSizeLimit);
115
LockedPool(LockedPool&&) = default;
116
~LockedPool();
117
118
RefPtr<SurfacePoolCAWrapperForGL> GetWrapperForGL(SurfacePoolCA* aPool,
119
gl::GLContext* aGL);
120
void DestroyGLResourcesForContext(gl::GLContext* aGL);
121
122
CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize,
123
gl::GLContext* aGL);
124
void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
125
uint64_t CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo);
126
void EnforcePoolSizeLimit();
127
Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
128
gl::GLContext* aGL,
129
bool aNeedsDepthBuffer);
130
void OnWrapperDestroyed(gl::GLContext* aGL,
131
SurfacePoolCAWrapperForGL* aWrapper);
132
uint64_t EstimateTotalMemory();
133
134
uint64_t mCollectionGeneration = 0;
135
136
protected:
137
struct GLResourcesForSurface {
138
RefPtr<gl::GLContext> mGLContext; // non-null
139
UniquePtr<gl::MozFramebuffer> mFramebuffer; // non-null
140
};
141
142
struct SurfacePoolEntry {
143
gfx::IntSize mSize;
144
CFTypeRefPtr<IOSurfaceRef> mIOSurface; // non-null
145
Maybe<GLResourcesForSurface> mGLResources;
146
};
147
148
struct PendingSurfaceEntry {
149
SurfacePoolEntry mEntry;
150
// The value of LockedPool::mCollectionGeneration at the time
151
// IOSurfaceIsInUse was last called for mEntry.mIOSurface.
152
uint64_t mPreviousCheckGeneration;
153
// The number of times an IOSurfaceIsInUse check has been performed.
154
uint64_t mCheckCount;
155
};
156
157
template <typename F>
158
void MutateEntryStorage(const char* aMutationType,
159
const gfx::IntSize& aSize, F aFn);
160
161
template <typename F>
162
void ForEachEntry(F aFn);
163
164
bool CanRecycleSurfaceForRequest(const SurfacePoolEntry& aEntry,
165
const gfx::IntSize& aSize,
166
gl::GLContext* aGL);
167
168
RefPtr<gl::DepthAndStencilBuffer> GetDepthBufferForSharing(
169
gl::GLContext* aGL, const gfx::IntSize& aSize);
170
UniquePtr<gl::MozFramebuffer> CreateFramebufferForTexture(
171
gl::GLContext* aGL, const gfx::IntSize& aSize, GLuint aTexture,
172
bool aNeedsDepthBuffer);
173
174
// Every IOSurface that is managed by the pool is wrapped in a
175
// SurfacePoolEntry object. Every entry is stored in one of three buckets at
176
// any given time: mInUseEntries, mPendingEntries, or mAvailableEntries. All
177
// mutations to these buckets are performed via calls to
178
// MutateEntryStorage(). Entries can move between the buckets in the
179
// following ways:
180
//
181
// [new]
182
// | Create
183
// v
184
// +----------------------------------------------------------------+
185
// | mInUseEntries |
186
// +------+------------------------------+--------------------------+
187
// | ^ | Start waiting for
188
// | | Recycle v
189
// | | +-----------------------------+
190
// | | | mPendingEntries |
191
// | | +--+--------------------+-----+
192
// | Retain | | Stop waiting for |
193
// v | v |
194
// +-------------------+-------------------------+ |
195
// | mAvailableEntries | |
196
// +-----------------------------+---------------+ |
197
// | Evict | Eject
198
// v v
199
// [destroyed] [destroyed]
200
//
201
// Each arrow corresponds to one invocation of MutateEntryStorage() with the
202
// arrow's label passed as the aMutationType string parameter.
203
204
// Stores the entries for surfaces that are in use by NativeLayerCA, i.e. an
205
// entry is inside mInUseEntries between calls to ObtainSurfaceFromPool()
206
// and ReturnSurfaceToPool().
207
std::unordered_map<CFTypeRefPtr<IOSurfaceRef>, SurfacePoolEntry>
208
mInUseEntries;
209
210
// Stores entries which are no longer in use by NativeLayerCA but are still
211
// in use by the window server, i.e. for which
212
// IOSurfaceIsInUse(pendingSurfaceEntry.mEntry.mIOSurface.get()) still
213
// returns true. These entries are checked once per frame inside
214
// CollectPendingSurfaces(), and returned to mAvailableEntries once the
215
// window server is done.
216
nsTArray<PendingSurfaceEntry> mPendingEntries;
217
218
// Stores entries which are available for recycling. These entries are not
219
// in use by a NativeLayerCA or by the window server.
220
nsTArray<SurfacePoolEntry> mAvailableEntries;
221
222
// Keeps weak references to SurfacePoolCAWrapperForGL instances.
223
// For each GLContext* value (including nullptr), only one wrapper can
224
// exist at any time. The wrapper keeps a strong reference to us and
225
// notifies us when it gets destroyed. At that point we can call
226
// DestroyGLResourcesForContext because we know no other SurfaceHandles for
227
// that context exist.
228
std::unordered_map<gl::GLContext*, SurfacePoolCAWrapperForGL*> mWrappers;
229
size_t mPoolSizeLimit = 0;
230
231
struct DepthBufferEntry {
232
RefPtr<gl::GLContext> mGLContext;
233
gfx::IntSize mSize;
234
WeakPtr<gl::DepthAndStencilBuffer> mBuffer;
235
};
236
237
nsTArray<DepthBufferEntry> mDepthBuffers;
238
};
239
240
DataMutex<LockedPool> mPool;
241
};
242
243
// One process-wide instance per (SurfacePoolCA*, GLContext*) pair.
244
// Keeps the SurfacePool alive, and the SurfacePool has a weak reference to the
245
// wrapper so that it can ensure that there's only one wrapper for it per
246
// GLContext* at any time.
247
struct SurfacePoolCAWrapperForGL {
248
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SurfacePoolCAWrapperForGL);
249
250
const RefPtr<SurfacePoolCA> mPool; // non-null
251
const RefPtr<gl::GLContext> mGL; // can be null
252
253
SurfacePoolCAWrapperForGL(SurfacePoolCA* aPool, gl::GLContext* aGL)
254
: mPool(aPool), mGL(aGL) {}
255
256
protected:
257
~SurfacePoolCAWrapperForGL() { mPool->OnWrapperDestroyed(mGL, this); }
258
};
259
260
// A surface pool handle that is stored on NativeLayerCA and keeps the
261
// SurfacePool alive.
262
class SurfacePoolHandleCA final : public SurfacePoolHandle {
263
public:
264
SurfacePoolHandleCA* AsSurfacePoolHandleCA() override { return this; }
265
const auto& gl() { return mPoolWrapper->mGL; }
266
CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize);
267
void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
268
Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
269
bool aNeedsDepthBuffer);
270
RefPtr<SurfacePool> Pool() override { return mPoolWrapper->mPool; }
271
void OnBeginFrame() override;
272
void OnEndFrame() override;
273
274
private:
275
friend class SurfacePoolCA;
276
SurfacePoolHandleCA(RefPtr<SurfacePoolCAWrapperForGL>&& aPoolWrapper,
277
uint64_t aCurrentCollectionGeneration);
278
~SurfacePoolHandleCA() override;
279
280
const RefPtr<SurfacePoolCAWrapperForGL> mPoolWrapper;
281
DataMutex<uint64_t> mPreviousFrameCollectionGeneration;
282
};
283
284
} // namespace layers
285
} // namespace mozilla
286
287
#endif // mozilla_layers_SurfacePoolCA_h