Source code

Revision control

Other Tools

1
/* This Source Code Form is subject to the terms of the Mozilla Public
2
* License, v. 2.0. If a copy of the MPL was not distributed with this
3
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5
//! Implements parallel traversal over the DOM tree.
6
//!
7
//! This traversal is based on Rayon, and therefore its safety is largely
8
//! verified by the type system.
9
//!
10
//! The primary trickiness and fine print for the above relates to the
11
//! thread safety of the DOM nodes themselves. Accessing a DOM element
12
//! concurrently on multiple threads is actually mostly "safe", since all
13
//! the mutable state is protected by an AtomicRefCell, and so we'll
14
//! generally panic if something goes wrong. Still, we try to to enforce our
15
//! thread invariants at compile time whenever possible. As such, TNode and
16
//! TElement are not Send, so ordinary style system code cannot accidentally
17
//! share them with other threads. In the parallel traversal, we explicitly
18
//! invoke |unsafe { SendNode::new(n) }| to put nodes in containers that may
19
//! be sent to other threads. This occurs in only a handful of places and is
20
//! easy to grep for. At the time of this writing, there is no other unsafe
21
//! code in the parallel traversal.
22
23
#![deny(missing_docs)]
24
25
use crate::context::{StyleContext, ThreadLocalStyleContext};
26
use crate::dom::{OpaqueNode, SendNode, TElement};
27
use crate::scoped_tls::ScopedTLS;
28
use crate::traversal::{DomTraversal, PerLevelTraversalData};
29
use arrayvec::ArrayVec;
30
use itertools::Itertools;
31
use rayon;
32
use smallvec::SmallVec;
33
34
/// The minimum stack size for a thread in the styling pool, in kilobytes.
35
pub const STYLE_THREAD_STACK_SIZE_KB: usize = 256;
36
37
/// The stack margin. If we get this deep in the stack, we will skip recursive
38
/// optimizations to ensure that there is sufficient room for non-recursive work.
39
///
40
/// We allocate large safety margins because certain OS calls can use very large
41
/// amounts of stack space [1]. Reserving a larger-than-necessary stack costs us
42
/// address space, but if we keep our safety margin big, we will generally avoid
43
/// committing those extra pages, and only use them in edge cases that would
44
/// otherwise cause crashes.
45
///
46
/// When measured with 128KB stacks and 40KB margin, we could support 53
47
/// levels of recursion before the limiter kicks in, on x86_64-Linux [2]. When
48
/// we doubled the stack size, we added it all to the safety margin, so we should
49
/// be able to get the same amount of recursion.
50
///
52
/// [2] See Gecko bug 1376883 for more discussion on the measurements.
53
///
54
pub const STACK_SAFETY_MARGIN_KB: usize = 168;
55
56
/// The maximum number of child nodes that we will process as a single unit.
57
///
58
/// Larger values will increase style sharing cache hits and general DOM
59
/// locality at the expense of decreased opportunities for parallelism. There
60
/// are some measurements in
62
/// and 13 that investigate some slightly different values for the work unit
63
/// size. If the size is significantly increased, make sure to adjust the
64
/// condition for kicking off a new work unit in top_down_dom, because
65
/// otherwise we're likely to end up doing too much work serially. For
66
/// example, the condition there could become some fraction of WORK_UNIT_MAX
67
/// instead of WORK_UNIT_MAX.
68
pub const WORK_UNIT_MAX: usize = 16;
69
70
/// A set of nodes, sized to the work unit. This gets copied when sent to other
71
/// threads, so we keep it compact.
72
type WorkUnit<N> = ArrayVec<[SendNode<N>; WORK_UNIT_MAX]>;
73
74
/// A callback to create our thread local context. This needs to be
75
/// out of line so we don't allocate stack space for the entire struct
76
/// in the caller.
77
#[inline(never)]
78
fn create_thread_local_context<'scope, E, D>(
79
traversal: &'scope D,
80
slot: &mut Option<ThreadLocalStyleContext<E>>,
81
) where
82
E: TElement + 'scope,
83
D: DomTraversal<E>,
84
{
85
*slot = Some(ThreadLocalStyleContext::new(traversal.shared_context()));
86
}
87
88
/// A parallel top-down DOM traversal.
89
///
90
/// This algorithm traverses the DOM in a breadth-first, top-down manner. The
91
/// goals are:
92
/// * Never process a child before its parent (since child style depends on
93
/// parent style). If this were to happen, the styling algorithm would panic.
94
/// * Prioritize discovering nodes as quickly as possible to maximize
95
/// opportunities for parallelism. But this needs to be weighed against
96
/// styling cousins on a single thread to improve sharing.
97
/// * Style all the children of a given node (i.e. all sibling nodes) on
98
/// a single thread (with an upper bound to handle nodes with an
99
/// abnormally large number of children). This is important because we use
100
/// a thread-local cache to share styles between siblings.
101
#[inline(always)]
102
#[allow(unsafe_code)]
103
fn top_down_dom<'a, 'scope, E, D>(
104
nodes: &'a [SendNode<E::ConcreteNode>],
105
root: OpaqueNode,
106
mut traversal_data: PerLevelTraversalData,
107
scope: &'a rayon::ScopeFifo<'scope>,
108
pool: &'scope rayon::ThreadPool,
109
traversal: &'scope D,
110
tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
111
) where
112
E: TElement + 'scope,
113
D: DomTraversal<E>,
114
{
115
debug_assert!(nodes.len() <= WORK_UNIT_MAX);
116
117
// We set this below, when we have a borrow of the thread-local-context
118
// available.
119
let recursion_ok;
120
121
// Collect all the children of the elements in our work unit. This will
122
// contain the combined children of up to WORK_UNIT_MAX nodes, which may
123
// be numerous. As such, we store it in a large SmallVec to minimize heap-
124
// spilling, and never move it.
125
let mut discovered_child_nodes = SmallVec::<[SendNode<E::ConcreteNode>; 128]>::new();
126
{
127
// Scope the borrow of the TLS so that the borrow is dropped before
128
// a potential recursive call when we pass TailCall.
129
let mut tlc = tls.ensure(|slot: &mut Option<ThreadLocalStyleContext<E>>| {
130
create_thread_local_context(traversal, slot)
131
});
132
133
// Check that we're not in danger of running out of stack.
134
recursion_ok = !tlc.stack_limit_checker.limit_exceeded();
135
136
let mut context = StyleContext {
137
shared: traversal.shared_context(),
138
thread_local: &mut *tlc,
139
};
140
141
for n in nodes {
142
// If the last node we processed produced children, we may want to
143
// spawn them off into a work item. We do this at the beginning of
144
// the loop (rather than at the end) so that we can traverse our
145
// last bits of work directly on this thread without a spawn call.
146
//
147
// This has the important effect of removing the allocation and
148
// context-switching overhead of the parallel traversal for perfectly
149
// linear regions of the DOM, i.e.:
150
//
151
// <russian><doll><tag><nesting></nesting></tag></doll></russian>
152
//
153
// which are not at all uncommon.
154
//
155
// There's a tension here between spawning off a work item as soon
156
// as discovered_child_nodes is nonempty and waiting until we have a
157
// full work item to do so. The former optimizes for speed of
158
// discovery (we'll start discovering the kids of the things in
159
// "nodes" ASAP). The latter gives us better sharing (e.g. we can
160
// share between cousins much better, because we don't hand them off
161
// as separate work items, which are likely to end up on separate
162
// threads) and gives us a chance to just handle everything on this
163
// thread for small DOM subtrees, as in the linear example above.
164
//
165
// There are performance and "number of ComputedValues"
166
// measurements for various testcases in
168
// following.
169
//
170
// The worst case behavior for waiting until we have a full work
171
// item is a deep tree which has WORK_UNIT_MAX "linear" branches,
172
// hence WORK_UNIT_MAX elements at each level. Such a tree would
173
// end up getting processed entirely sequentially, because we would
174
// process each level one at a time as a single work unit, whether
175
// via our end-of-loop tail call or not. If we kicked off a
176
// traversal as soon as we discovered kids, we would instead
177
// process such a tree more or less with a thread-per-branch,
178
// multiplexed across our actual threadpool.
179
if discovered_child_nodes.len() >= WORK_UNIT_MAX {
180
let mut traversal_data_copy = traversal_data.clone();
181
traversal_data_copy.current_dom_depth += 1;
182
traverse_nodes(
183
discovered_child_nodes.drain(),
184
DispatchMode::NotTailCall,
185
recursion_ok,
186
root,
187
traversal_data_copy,
188
scope,
189
pool,
190
traversal,
191
tls,
192
);
193
}
194
195
let node = **n;
196
let mut children_to_process = 0isize;
197
traversal.process_preorder(&traversal_data, &mut context, node, |n| {
198
children_to_process += 1;
199
let send_n = unsafe { SendNode::new(n) };
200
discovered_child_nodes.push(send_n);
201
});
202
203
traversal.handle_postorder_traversal(&mut context, root, node, children_to_process);
204
}
205
}
206
207
// Handle whatever elements we have queued up but not kicked off traversals
208
// for yet. If any exist, we can process them (or at least one work unit's
209
// worth of them) directly on this thread by passing TailCall.
210
if !discovered_child_nodes.is_empty() {
211
traversal_data.current_dom_depth += 1;
212
traverse_nodes(
213
discovered_child_nodes.drain(),
214
DispatchMode::TailCall,
215
recursion_ok,
216
root,
217
traversal_data,
218
scope,
219
pool,
220
traversal,
221
tls,
222
);
223
}
224
}
225
226
/// Controls whether traverse_nodes may make a recursive call to continue
227
/// doing work, or whether it should always dispatch work asynchronously.
228
#[derive(Clone, Copy, PartialEq)]
229
pub enum DispatchMode {
230
/// This is the last operation by the caller.
231
TailCall,
232
/// This is not the last operation by the caller.
233
NotTailCall,
234
}
235
236
impl DispatchMode {
237
fn is_tail_call(&self) -> bool {
238
matches!(*self, DispatchMode::TailCall)
239
}
240
}
241
242
/// Enqueues |nodes| for processing, possibly on this thread if the tail call
243
/// conditions are met.
244
#[inline]
245
pub fn traverse_nodes<'a, 'scope, E, D, I>(
246
nodes: I,
247
mode: DispatchMode,
248
recursion_ok: bool,
249
root: OpaqueNode,
250
traversal_data: PerLevelTraversalData,
251
scope: &'a rayon::ScopeFifo<'scope>,
252
pool: &'scope rayon::ThreadPool,
253
traversal: &'scope D,
254
tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
255
) where
256
E: TElement + 'scope,
257
D: DomTraversal<E>,
258
I: ExactSizeIterator<Item = SendNode<E::ConcreteNode>>,
259
{
260
debug_assert_ne!(nodes.len(), 0);
261
262
// This is a tail call from the perspective of the caller. However, we only
263
// want to actually dispatch the job as a tail call if there's nothing left
264
// in our local queue. Otherwise we need to return to it to maintain proper
265
// breadth-first ordering. We also need to take care to avoid stack
266
// overflow due to excessive tail recursion. The stack overflow avoidance
267
// isn't observable to content -- we're still completely correct, just not
268
// using tail recursion any more. See Gecko bugs 1368302 and 1376883.
269
let may_dispatch_tail =
270
mode.is_tail_call() && recursion_ok && !pool.current_thread_has_pending_tasks().unwrap();
271
272
// In the common case, our children fit within a single work unit, in which
273
// case we can pass the SmallVec directly and avoid extra allocation.
274
if nodes.len() <= WORK_UNIT_MAX {
275
let work: WorkUnit<E::ConcreteNode> = nodes.collect();
276
if may_dispatch_tail {
277
top_down_dom(&work, root, traversal_data, scope, pool, traversal, tls);
278
} else {
279
scope.spawn_fifo(move |scope| {
280
profiler_label!(Style);
281
let work = work;
282
top_down_dom(&work, root, traversal_data, scope, pool, traversal, tls);
283
});
284
}
285
} else {
286
for chunk in nodes.chunks(WORK_UNIT_MAX).into_iter() {
287
let nodes: WorkUnit<E::ConcreteNode> = chunk.collect();
288
let traversal_data_copy = traversal_data.clone();
289
scope.spawn_fifo(move |scope| {
290
profiler_label!(Style);
291
let n = nodes;
292
top_down_dom(&*n, root, traversal_data_copy, scope, pool, traversal, tls)
293
});
294
}
295
}
296
}