Source code

Revision control

Other Tools

1
//! This module contains the render task graph.
2
//!
3
//! Code associated with creating specific render tasks is in the render_task
4
//! module.
5
6
use api::ImageFormat;
7
use api::units::*;
8
use crate::internal_types::{CacheTextureId, FastHashMap, SavedTargetIndex};
9
use crate::render_backend::FrameId;
10
use crate::render_target::{RenderTarget, RenderTargetKind, RenderTargetList, ColorRenderTarget};
11
use crate::render_target::{PictureCacheTarget, TextureCacheRenderTarget, AlphaRenderTarget};
12
use crate::render_task::{BlitSource, RenderTask, RenderTaskKind, RenderTaskAddress, RenderTaskData};
13
use crate::render_task::{RenderTaskLocation};
14
use crate::util::{VecHelper, Allocation};
15
use std::{cmp, usize, f32, i32, u32};
16
17
#[cfg_attr(feature = "capture", derive(Serialize))]
18
#[cfg_attr(feature = "replay", derive(Deserialize))]
19
pub struct RenderTaskGraph {
20
pub tasks: Vec<RenderTask>,
21
pub task_data: Vec<RenderTaskData>,
22
/// Tasks that don't have dependencies, and that may be shared between
23
/// picture tasks.
24
///
25
/// We render these unconditionally before-rendering the rest of the tree.
26
pub cacheable_render_tasks: Vec<RenderTaskId>,
27
next_saved: SavedTargetIndex,
28
frame_id: FrameId,
29
}
30
31
/// Allows initializing a render task directly into the render task buffer.
32
///
33
/// See utils::VecHelpers. RenderTask is fairly large so avoiding the move when
34
/// pushing into the vector can save a lot of exensive memcpys on pages with many
35
/// render tasks.
36
pub struct RenderTaskAllocation<'a> {
37
alloc: Allocation<'a, RenderTask>,
38
#[cfg(debug_assertions)]
39
frame_id: FrameId,
40
}
41
42
impl<'l> RenderTaskAllocation<'l> {
43
#[inline(always)]
44
pub fn init(self, value: RenderTask) -> RenderTaskId {
45
RenderTaskId {
46
index: self.alloc.init(value) as u32,
47
#[cfg(debug_assertions)]
48
frame_id: self.frame_id,
49
}
50
}
51
}
52
53
impl RenderTaskGraph {
54
pub fn new(frame_id: FrameId, counters: &RenderTaskGraphCounters) -> Self {
55
// Preallocate a little more than what we needed in the previous frame so that small variations
56
// in the number of items don't cause us to constantly reallocate.
57
let extra_items = 8;
58
RenderTaskGraph {
59
tasks: Vec::with_capacity(counters.tasks_len + extra_items),
60
task_data: Vec::with_capacity(counters.task_data_len + extra_items),
61
cacheable_render_tasks: Vec::with_capacity(counters.cacheable_render_tasks_len + extra_items),
62
next_saved: SavedTargetIndex(0),
63
frame_id,
64
}
65
}
66
67
pub fn counters(&self) -> RenderTaskGraphCounters {
68
RenderTaskGraphCounters {
69
tasks_len: self.tasks.len(),
70
task_data_len: self.task_data.len(),
71
cacheable_render_tasks_len: self.cacheable_render_tasks.len(),
72
}
73
}
74
75
pub fn add(&mut self) -> RenderTaskAllocation {
76
RenderTaskAllocation {
77
alloc: self.tasks.alloc(),
78
#[cfg(debug_assertions)]
79
frame_id: self.frame_id,
80
}
81
}
82
83
/// Express a render task dependency between a parent and child task.
84
/// This is used to assign tasks to render passes.
85
pub fn add_dependency(
86
&mut self,
87
parent_id: RenderTaskId,
88
child_id: RenderTaskId,
89
) {
90
let parent = &mut self[parent_id];
91
parent.children.push(child_id);
92
}
93
94
/// Assign this frame's render tasks to render passes ordered so that passes appear
95
/// earlier than the ones that depend on them.
96
pub fn generate_passes(
97
&mut self,
98
main_render_task: Option<RenderTaskId>,
99
screen_size: DeviceIntSize,
100
gpu_supports_fast_clears: bool,
101
) -> Vec<RenderPass> {
102
let mut passes = Vec::new();
103
104
if !self.cacheable_render_tasks.is_empty() {
105
self.generate_passes_impl(
106
&self.cacheable_render_tasks[..],
107
screen_size,
108
gpu_supports_fast_clears,
109
false,
110
&mut passes,
111
);
112
}
113
114
if let Some(main_task) = main_render_task {
115
self.generate_passes_impl(
116
&[main_task],
117
screen_size,
118
gpu_supports_fast_clears,
119
true,
120
&mut passes,
121
);
122
}
123
124
125
self.resolve_target_conflicts(&mut passes);
126
127
passes
128
}
129
130
/// Assign the render tasks from the tree rooted at root_task to render passes and
131
/// append them to the `passes` vector so that the passes that we depend on end up
132
/// _earlier_ in the pass list.
133
fn generate_passes_impl(
134
&self,
135
root_tasks: &[RenderTaskId],
136
screen_size: DeviceIntSize,
137
gpu_supports_fast_clears: bool,
138
for_main_framebuffer: bool,
139
passes: &mut Vec<RenderPass>,
140
) {
141
// We recursively visit tasks from the roots (main and cached render tasks), to figure out
142
// which ones affect the frame and which passes they should be assigned to.
143
//
144
// We track the maximum depth of each task (how far it is from the roots) as well as the total
145
// maximum depth of the graph to determine each tasks' pass index. In a nutshell, depth 0 is
146
// for the last render pass (for example the main framebuffer), while the highest depth
147
// corresponds to the first pass.
148
149
fn assign_task_depth(
150
tasks: &[RenderTask],
151
task_id: RenderTaskId,
152
task_depth: i32,
153
task_max_depths: &mut [i32],
154
max_depth: &mut i32,
155
) {
156
*max_depth = std::cmp::max(*max_depth, task_depth);
157
158
let task_max_depth = &mut task_max_depths[task_id.index as usize];
159
if task_depth > *task_max_depth {
160
*task_max_depth = task_depth;
161
} else {
162
// If this task has already been processed at a larger depth,
163
// there is no need to process it again.
164
return;
165
}
166
167
let task = &tasks[task_id.index as usize];
168
for child in &task.children {
169
assign_task_depth(
170
tasks,
171
*child,
172
task_depth + 1,
173
task_max_depths,
174
max_depth,
175
);
176
}
177
}
178
179
// The maximum depth of each task. Values that are still equal to -1 after recursively visiting
180
// the nodes correspond to tasks that don't contribute to the frame.
181
let mut task_max_depths = vec![-1; self.tasks.len()];
182
let mut max_depth = 0;
183
184
for root_task in root_tasks {
185
assign_task_depth(
186
&self.tasks,
187
*root_task,
188
0,
189
&mut task_max_depths,
190
&mut max_depth,
191
);
192
}
193
194
let offset = passes.len();
195
196
passes.reserve(max_depth as usize + 1);
197
for _ in 0..max_depth {
198
passes.alloc().init(RenderPass::new_off_screen(screen_size, gpu_supports_fast_clears));
199
}
200
201
if for_main_framebuffer {
202
passes.alloc().init(RenderPass::new_main_framebuffer(screen_size, gpu_supports_fast_clears));
203
} else {
204
passes.alloc().init(RenderPass::new_off_screen(screen_size, gpu_supports_fast_clears));
205
}
206
207
// Assign tasks to their render passes.
208
for task_index in 0..self.tasks.len() {
209
if task_max_depths[task_index] < 0 {
210
// The task wasn't visited, it means it doesn't contribute to this frame.
211
continue;
212
}
213
let pass_index = offset + (max_depth - task_max_depths[task_index]) as usize;
214
let task_id = RenderTaskId {
215
index: task_index as u32,
216
#[cfg(debug_assertions)]
217
frame_id: self.frame_id,
218
};
219
let task = &self.tasks[task_index];
220
passes[pass_index as usize].add_render_task(
221
task_id,
222
task.get_dynamic_size(),
223
task.target_kind(),
224
&task.location,
225
);
226
}
227
}
228
229
/// Resolve conflicts between the generated passes and the limitiations of our target
230
/// allocation scheme.
231
///
232
/// The render task graph operates with a ping-pong target allocation scheme where
233
/// a set of targets is written to by even passes and a different set of targets is
234
/// written to by odd passes.
235
/// Since tasks cannot read and write the same target, we can run into issues if a
236
/// task pass in N + 2 reads the result of a task in pass N.
237
/// To avoid such cases have to insert blit tasks to copy the content of the task
238
/// into pass N + 1 which is readable by pass N + 2.
239
///
240
/// In addition, allocated rects of pass N are currently not tracked and can be
241
/// overwritten by allocations in later passes on the same target, unless the task
242
/// has been marked for saving, which perserves the allocated rect until the end of
243
/// the frame. This is a big hammer, hopefully we won't need to mark many passes
244
/// for saving. A better solution would be to track allocations through the entire
245
/// graph, there is a prototype of that in https://github.com/nical/toy-render-graph/
246
fn resolve_target_conflicts(&mut self, passes: &mut [RenderPass]) {
247
// Keep track of blit tasks we inserted to avoid adding several blits for the same
248
// task.
249
let mut task_redirects = vec![None; self.tasks.len()];
250
251
let mut task_passes = vec![-1; self.tasks.len()];
252
for pass_index in 0..passes.len() {
253
for task in &passes[pass_index].tasks {
254
task_passes[task.index as usize] = pass_index as i32;
255
}
256
}
257
258
for task_index in 0..self.tasks.len() {
259
if task_passes[task_index] < 0 {
260
// The task doesn't contribute to this frame.
261
continue;
262
}
263
264
let pass_index = task_passes[task_index];
265
266
// Go through each dependency and check whether they belong
267
// to a pass that uses the same targets and/or are more than
268
// one pass behind.
269
for nth_child in 0..self.tasks[task_index].children.len() {
270
let child_task_index = self.tasks[task_index].children[nth_child].index as usize;
271
let child_pass_index = task_passes[child_task_index];
272
273
if child_pass_index == pass_index - 1 {
274
// This should be the most common case.
275
continue;
276
}
277
278
// TODO: Picture tasks don't support having their dependency tasks redirected.
279
// Pictures store their respective render task(s) on their SurfaceInfo.
280
// We cannot blit the picture task here because we would need to update the
281
// surface's render tasks, but we don't have access to that info here.
282
// Also a surface may be expecting a picture task and not a blit task, so
283
// even if we could update the surface's render task(s), it might cause other issues.
284
// For now we mark the task to be saved rather than trying to redirect to a blit task.
285
let task_is_picture = if let RenderTaskKind::Picture(..) = self.tasks[task_index].kind {
286
true
287
} else {
288
false
289
};
290
291
if child_pass_index % 2 != pass_index % 2 || task_is_picture {
292
// The tasks and its dependency aren't on the same targets,
293
// but the dependency needs to be kept alive.
294
self.tasks[child_task_index].mark_for_saving();
295
continue;
296
}
297
298
if let Some(blit_id) = task_redirects[child_task_index] {
299
// We already resolved a similar conflict with a blit task,
300
// reuse the same blit instead of creating a new one.
301
self.tasks[task_index].children[nth_child] = blit_id;
302
303
// Mark for saving if the blit is more than pass appart from
304
// our task.
305
if child_pass_index < pass_index - 2 {
306
self.tasks[blit_id.index as usize].mark_for_saving();
307
}
308
309
continue;
310
}
311
312
// Our dependency is an even number of passes behind, need
313
// to insert a blit to ensure we don't read and write from
314
// the same target.
315
316
let child_task_id = RenderTaskId {
317
index: child_task_index as u32,
318
#[cfg(debug_assertions)]
319
frame_id: self.frame_id,
320
};
321
322
let mut blit = RenderTask::new_blit(
323
self.tasks[child_task_index].location.size(),
324
BlitSource::RenderTask { task_id: child_task_id },
325
);
326
327
// Mark for saving if the blit is more than pass appart from
328
// our task.
329
if child_pass_index < pass_index - 2 {
330
blit.mark_for_saving();
331
}
332
333
let blit_id = RenderTaskId {
334
index: self.tasks.len() as u32,
335
#[cfg(debug_assertions)]
336
frame_id: self.frame_id,
337
};
338
339
self.tasks.alloc().init(blit);
340
341
passes[child_pass_index as usize + 1].tasks.push(blit_id);
342
343
self.tasks[task_index].children[nth_child] = blit_id;
344
task_redirects[child_task_index] = Some(blit_id);
345
}
346
}
347
}
348
349
pub fn get_task_address(&self, id: RenderTaskId) -> RenderTaskAddress {
350
#[cfg(all(debug_assertions, not(feature = "replay")))]
351
debug_assert_eq!(self.frame_id, id.frame_id);
352
RenderTaskAddress(id.index as u16)
353
}
354
355
pub fn write_task_data(&mut self) {
356
for task in &self.tasks {
357
self.task_data.push(task.write_task_data());
358
}
359
}
360
361
pub fn save_target(&mut self) -> SavedTargetIndex {
362
let id = self.next_saved;
363
self.next_saved.0 += 1;
364
id
365
}
366
367
#[cfg(debug_assertions)]
368
pub fn frame_id(&self) -> FrameId {
369
self.frame_id
370
}
371
}
372
373
impl std::ops::Index<RenderTaskId> for RenderTaskGraph {
374
type Output = RenderTask;
375
fn index(&self, id: RenderTaskId) -> &RenderTask {
376
#[cfg(all(debug_assertions, not(feature = "replay")))]
377
debug_assert_eq!(self.frame_id, id.frame_id);
378
&self.tasks[id.index as usize]
379
}
380
}
381
382
impl std::ops::IndexMut<RenderTaskId> for RenderTaskGraph {
383
fn index_mut(&mut self, id: RenderTaskId) -> &mut RenderTask {
384
#[cfg(all(debug_assertions, not(feature = "replay")))]
385
debug_assert_eq!(self.frame_id, id.frame_id);
386
&mut self.tasks[id.index as usize]
387
}
388
}
389
390
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
391
#[cfg_attr(feature = "capture", derive(Serialize))]
392
#[cfg_attr(feature = "replay", derive(Deserialize))]
393
pub struct RenderTaskId {
394
pub index: u32,
395
396
#[cfg(debug_assertions)]
397
#[cfg_attr(feature = "replay", serde(default = "FrameId::first"))]
398
frame_id: FrameId,
399
}
400
401
#[derive(Debug)]
402
pub struct RenderTaskGraphCounters {
403
tasks_len: usize,
404
task_data_len: usize,
405
cacheable_render_tasks_len: usize,
406
}
407
408
impl RenderTaskGraphCounters {
409
pub fn new() -> Self {
410
RenderTaskGraphCounters {
411
tasks_len: 0,
412
task_data_len: 0,
413
cacheable_render_tasks_len: 0,
414
}
415
}
416
}
417
418
impl RenderTaskId {
419
pub const INVALID: RenderTaskId = RenderTaskId {
420
index: u32::MAX,
421
#[cfg(debug_assertions)]
422
frame_id: FrameId::INVALID,
423
};
424
}
425
426
/// Contains the set of `RenderTarget`s specific to the kind of pass.
427
#[cfg_attr(feature = "capture", derive(Serialize))]
428
#[cfg_attr(feature = "replay", derive(Deserialize))]
429
pub enum RenderPassKind {
430
/// The final pass to the main frame buffer, where we have a single color
431
/// target for display to the user.
432
MainFramebuffer {
433
main_target: ColorRenderTarget,
434
},
435
/// An intermediate pass, where we may have multiple targets.
436
OffScreen {
437
alpha: RenderTargetList<AlphaRenderTarget>,
438
color: RenderTargetList<ColorRenderTarget>,
439
texture_cache: FastHashMap<(CacheTextureId, usize), TextureCacheRenderTarget>,
440
picture_cache: Vec<PictureCacheTarget>,
441
},
442
}
443
444
/// A render pass represents a set of rendering operations that don't depend on one
445
/// another.
446
///
447
/// A render pass can have several render targets if there wasn't enough space in one
448
/// target to do all of the rendering for that pass. See `RenderTargetList`.
449
#[cfg_attr(feature = "capture", derive(Serialize))]
450
#[cfg_attr(feature = "replay", derive(Deserialize))]
451
pub struct RenderPass {
452
/// The kind of pass, as well as the set of targets associated with that
453
/// kind of pass.
454
pub kind: RenderPassKind,
455
/// The set of tasks to be performed in this pass, as indices into the
456
/// `RenderTaskGraph`.
457
pub tasks: Vec<RenderTaskId>,
458
/// Screen size in device pixels - used for opaque alpha batch break threshold.
459
pub screen_size: DeviceIntSize,
460
}
461
462
impl RenderPass {
463
/// Creates a pass for the main framebuffer. There is only one of these, and
464
/// it is always the last pass.
465
pub fn new_main_framebuffer(
466
screen_size: DeviceIntSize,
467
gpu_supports_fast_clears: bool,
468
) -> Self {
469
let main_target = ColorRenderTarget::new(screen_size, gpu_supports_fast_clears);
470
RenderPass {
471
kind: RenderPassKind::MainFramebuffer {
472
main_target,
473
},
474
tasks: vec![],
475
screen_size,
476
}
477
}
478
479
/// Creates an intermediate off-screen pass.
480
pub fn new_off_screen(
481
screen_size: DeviceIntSize,
482
gpu_supports_fast_clears: bool,
483
) -> Self {
484
RenderPass {
485
kind: RenderPassKind::OffScreen {
486
color: RenderTargetList::new(
487
screen_size,
488
ImageFormat::RGBA8,
489
gpu_supports_fast_clears,
490
),
491
alpha: RenderTargetList::new(
492
screen_size,
493
ImageFormat::R8,
494
gpu_supports_fast_clears,
495
),
496
texture_cache: FastHashMap::default(),
497
picture_cache: Vec::new(),
498
},
499
tasks: vec![],
500
screen_size,
501
}
502
}
503
504
/// Adds a task to this pass.
505
pub fn add_render_task(
506
&mut self,
507
task_id: RenderTaskId,
508
size: DeviceIntSize,
509
target_kind: RenderTargetKind,
510
location: &RenderTaskLocation,
511
) {
512
if let RenderPassKind::OffScreen { ref mut color, ref mut alpha, .. } = self.kind {
513
// If this will be rendered to a dynamically-allocated region on an
514
// off-screen render target, update the max-encountered size. We don't
515
// need to do this for things drawn to the texture cache, since those
516
// don't affect our render target allocation.
517
if location.is_dynamic() {
518
let max_size = match target_kind {
519
RenderTargetKind::Color => &mut color.max_dynamic_size,
520
RenderTargetKind::Alpha => &mut alpha.max_dynamic_size,
521
};
522
max_size.width = cmp::max(max_size.width, size.width);
523
max_size.height = cmp::max(max_size.height, size.height);
524
}
525
}
526
527
self.tasks.push(task_id);
528
}
529
}
530
531
// Dump an SVG visualization of the render graph for debugging purposes
532
#[allow(dead_code)]
533
pub fn dump_render_tasks_as_svg(
534
render_tasks: &RenderTaskGraph,
535
passes: &[RenderPass],
536
output: &mut dyn std::io::Write,
537
) -> std::io::Result<()> {
538
use svg_fmt::*;
539
540
let node_width = 80.0;
541
let node_height = 30.0;
542
let vertical_spacing = 8.0;
543
let horizontal_spacing = 20.0;
544
let margin = 10.0;
545
let text_size = 10.0;
546
547
let mut pass_rects = Vec::new();
548
let mut nodes = vec![None; render_tasks.tasks.len()];
549
550
let mut x = margin;
551
let mut max_y: f32 = 0.0;
552
553
#[derive(Clone)]
554
struct Node {
555
rect: Rectangle,
556
label: Text,
557
size: Text,
558
}
559
560
for pass in passes {
561
let mut layout = VerticalLayout::new(x, margin, node_width);
562
563
for task_id in &pass.tasks {
564
let task_index = task_id.index as usize;
565
let task = &render_tasks.tasks[task_index];
566
567
let rect = layout.push_rectangle(node_height);
568
569
let tx = rect.x + rect.w / 2.0;
570
let ty = rect.y + 10.0;
571
572
let saved = if task.saved_index.is_some() { " (Saved)" } else { "" };
573
let label = text(tx, ty, format!("{}{}", task.kind.as_str(), saved));
574
let size = text(tx, ty + 12.0, format!("{}", task.location.size()));
575
576
nodes[task_index] = Some(Node { rect, label, size });
577
578
layout.advance(vertical_spacing);
579
}
580
581
pass_rects.push(layout.total_rectangle());
582
583
x += node_width + horizontal_spacing;
584
max_y = max_y.max(layout.y + margin);
585
}
586
587
let mut links = Vec::new();
588
for node_index in 0..nodes.len() {
589
if nodes[node_index].is_none() {
590
continue;
591
}
592
593
let task = &render_tasks.tasks[node_index];
594
for dep in &task.children {
595
let dep_index = dep.index as usize;
596
597
if let (&Some(ref node), &Some(ref dep_node)) = (&nodes[node_index], &nodes[dep_index]) {
598
links.push((
599
dep_node.rect.x + dep_node.rect.w,
600
dep_node.rect.y + dep_node.rect.h / 2.0,
601
node.rect.x,
602
node.rect.y + node.rect.h / 2.0,
603
));
604
}
605
}
606
}
607
608
let svg_w = x + margin;
609
let svg_h = max_y + margin;
610
writeln!(output, "{}", BeginSvg { w: svg_w, h: svg_h })?;
611
612
// Background.
613
writeln!(output,
614
" {}",
615
rectangle(0.0, 0.0, svg_w, svg_h)
616
.inflate(1.0, 1.0)
617
.fill(rgb(50, 50, 50))
618
)?;
619
620
// Passes.
621
for rect in pass_rects {
622
writeln!(output,
623
" {}",
624
rect.inflate(3.0, 3.0)
625
.border_radius(4.0)
626
.opacity(0.4)
627
.fill(black())
628
)?;
629
}
630
631
// Links.
632
for (x1, y1, x2, y2) in links {
633
dump_task_dependency_link(output, x1, y1, x2, y2);
634
}
635
636
// Tasks.
637
for node in &nodes {
638
if let Some(node) = node {
639
writeln!(output,
640
" {}",
641
node.rect
642
.clone()
643
.fill(black())
644
.border_radius(3.0)
645
.opacity(0.5)
646
.offset(0.0, 2.0)
647
)?;
648
writeln!(output,
649
" {}",
650
node.rect
651
.clone()
652
.fill(rgb(200, 200, 200))
653
.border_radius(3.0)
654
.opacity(0.8)
655
)?;
656
657
writeln!(output,
658
" {}",
659
node.label
660
.clone()
661
.size(text_size)
662
.align(Align::Center)
663
.color(rgb(50, 50, 50))
664
)?;
665
writeln!(output,
666
" {}",
667
node.size
668
.clone()
669
.size(text_size * 0.7)
670
.align(Align::Center)
671
.color(rgb(50, 50, 50))
672
)?;
673
}
674
}
675
676
writeln!(output, "{}", EndSvg)
677
}
678
679
#[allow(dead_code)]
680
fn dump_task_dependency_link(
681
output: &mut dyn std::io::Write,
682
x1: f32, y1: f32,
683
x2: f32, y2: f32,
684
) {
685
use svg_fmt::*;
686
687
// If the link is a straight horizontal line and spans over multiple passes, it
688
// is likely to go straight though unrelated nodes in a way that makes it look like
689
// they are connected, so we bend the line upward a bit to avoid that.
690
let simple_path = (y1 - y2).abs() > 1.0 || (x2 - x1) < 45.0;
691
692
let mid_x = (x1 + x2) / 2.0;
693
if simple_path {
694
write!(output, " {}",
695
path().move_to(x1, y1)
696
.cubic_bezier_to(mid_x, y1, mid_x, y2, x2, y2)
697
.fill(Fill::None)
698
.stroke(Stroke::Color(rgb(100, 100, 100), 3.0))
699
).unwrap();
700
} else {
701
let ctrl1_x = (mid_x + x1) / 2.0;
702
let ctrl2_x = (mid_x + x2) / 2.0;
703
let ctrl_y = y1 - 25.0;
704
write!(output, " {}",
705
path().move_to(x1, y1)
706
.cubic_bezier_to(ctrl1_x, y1, ctrl1_x, ctrl_y, mid_x, ctrl_y)
707
.cubic_bezier_to(ctrl2_x, ctrl_y, ctrl2_x, y2, x2, y2)
708
.fill(Fill::None)
709
.stroke(Stroke::Color(rgb(100, 100, 100), 3.0))
710
).unwrap();
711
}
712
}
713
714
#[cfg(test)]
715
use euclid::{size2, rect};
716
#[cfg(test)]
717
use smallvec::SmallVec;
718
719
#[cfg(test)]
720
fn dyn_location(w: i32, h: i32) -> RenderTaskLocation {
721
RenderTaskLocation::Dynamic(None, size2(w, h))
722
}
723
724
#[test]
725
fn diamond_task_graph() {
726
// A simple diamon shaped task graph.
727
//
728
// [b1]
729
// / \
730
// [a] [main_pic]
731
// \ /
732
// [b2]
733
734
let color = RenderTargetKind::Color;
735
736
let counters = RenderTaskGraphCounters::new();
737
let mut tasks = RenderTaskGraph::new(FrameId::first(), &counters);
738
739
let a = tasks.add().init(RenderTask::new_test(color, dyn_location(640, 640), SmallVec::new()));
740
let b1 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![a]));
741
let b2 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![a]));
742
743
let main_pic = tasks.add().init(RenderTask::new_test(
744
color,
745
RenderTaskLocation::Fixed(rect(0, 0, 3200, 1800)),
746
smallvec![b1, b2],
747
));
748
749
let initial_number_of_tasks = tasks.tasks.len();
750
751
let passes = tasks.generate_passes(Some(main_pic), size2(3200, 1800), true);
752
753
// We should not have added any blits.
754
assert_eq!(tasks.tasks.len(), initial_number_of_tasks);
755
756
assert_eq!(passes.len(), 3);
757
assert_eq!(passes[0].tasks, vec![a]);
758
759
assert_eq!(passes[1].tasks.len(), 2);
760
assert!(passes[1].tasks.contains(&b1));
761
assert!(passes[1].tasks.contains(&b2));
762
763
assert_eq!(passes[2].tasks, vec![main_pic]);
764
}
765
766
#[test]
767
fn blur_task_graph() {
768
// This test simulates a complicated shadow stack effect with target allocation
769
// conflicts to resolve.
770
771
let color = RenderTargetKind::Color;
772
773
let counters = RenderTaskGraphCounters::new();
774
let mut tasks = RenderTaskGraph::new(FrameId::first(), &counters);
775
776
let pic = tasks.add().init(RenderTask::new_test(color, dyn_location(640, 640), SmallVec::new()));
777
let scale1 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![pic]));
778
let scale2 = tasks.add().init(RenderTask::new_test(color, dyn_location(160, 160), smallvec![scale1]));
779
let scale3 = tasks.add().init(RenderTask::new_test(color, dyn_location(80, 80), smallvec![scale2]));
780
let scale4 = tasks.add().init(RenderTask::new_test(color, dyn_location(40, 40), smallvec![scale3]));
781
782
let vblur1 = tasks.add().init(RenderTask::new_test(color, dyn_location(40, 40), smallvec![scale4]));
783
let hblur1 = tasks.add().init(RenderTask::new_test(color, dyn_location(40, 40), smallvec![vblur1]));
784
785
let vblur2 = tasks.add().init(RenderTask::new_test(color, dyn_location(40, 40), smallvec![scale4]));
786
let hblur2 = tasks.add().init(RenderTask::new_test(color, dyn_location(40, 40), smallvec![vblur2]));
787
788
// Insert a task that is an even number of passes away from its dependency.
789
// This means the source and destination are on the same target and we have to resolve
790
// this conflict by automatically inserting a blit task.
791
let vblur3 = tasks.add().init(RenderTask::new_test(color, dyn_location(80, 80), smallvec![scale3]));
792
let hblur3 = tasks.add().init(RenderTask::new_test(color, dyn_location(80, 80), smallvec![vblur3]));
793
794
// Insert a task that is an odd number > 1 of passes away from its dependency.
795
// This should force us to mark the dependency "for saving" to keep its content valid
796
// until the task can access it.
797
let vblur4 = tasks.add().init(RenderTask::new_test(color, dyn_location(160, 160), smallvec![scale2]));
798
let hblur4 = tasks.add().init(RenderTask::new_test(color, dyn_location(160, 160), smallvec![vblur4]));
799
800
let main_pic = tasks.add().init(RenderTask::new_test(
801
color,
802
RenderTaskLocation::Fixed(rect(0, 0, 3200, 1800)),
803
smallvec![hblur1, hblur2, hblur3, hblur4],
804
));
805
806
let initial_number_of_tasks = tasks.tasks.len();
807
808
let passes = tasks.generate_passes(Some(main_pic), size2(3200, 1800), true);
809
810
// We should have added a single blit task.
811
assert_eq!(tasks.tasks.len(), initial_number_of_tasks + 1);
812
813
// vblur3's dependency to scale3 should be replaced by a blit.
814
let blit = tasks[vblur3].children[0];
815
assert!(blit != scale3);
816
817
match tasks[blit].kind {
818
RenderTaskKind::Blit(..) => {}
819
_ => { panic!("This should be a blit task."); }
820
}
821
822
assert_eq!(passes.len(), 8);
823
824
assert_eq!(passes[0].tasks, vec![pic]);
825
assert_eq!(passes[1].tasks, vec![scale1]);
826
assert_eq!(passes[2].tasks, vec![scale2]);
827
assert_eq!(passes[3].tasks, vec![scale3]);
828
829
assert_eq!(passes[4].tasks.len(), 2);
830
assert!(passes[4].tasks.contains(&scale4));
831
assert!(passes[4].tasks.contains(&blit));
832
833
assert_eq!(passes[5].tasks.len(), 4);
834
assert!(passes[5].tasks.contains(&vblur1));
835
assert!(passes[5].tasks.contains(&vblur2));
836
assert!(passes[5].tasks.contains(&vblur3));
837
assert!(passes[5].tasks.contains(&vblur4));
838
839
assert_eq!(passes[6].tasks.len(), 4);
840
assert!(passes[6].tasks.contains(&hblur1));
841
assert!(passes[6].tasks.contains(&hblur2));
842
assert!(passes[6].tasks.contains(&hblur3));
843
assert!(passes[6].tasks.contains(&hblur4));
844
845
assert_eq!(passes[7].tasks, vec![main_pic]);
846
847
// See vblur4's comment above.
848
assert!(tasks[scale2].saved_index.is_some());
849
}
850
851
#[test]
852
fn culled_tasks() {
853
// This test checks that tasks that do not contribute to the frame don't appear in the
854
// generated passes.
855
856
let color = RenderTargetKind::Color;
857
858
let counters = RenderTaskGraphCounters::new();
859
let mut tasks = RenderTaskGraph::new(FrameId::first(), &counters);
860
861
let a1 = tasks.add().init(RenderTask::new_test(color, dyn_location(640, 640), SmallVec::new()));
862
let _a2 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![a1]));
863
864
let b1 = tasks.add().init(RenderTask::new_test(color, dyn_location(640, 640), SmallVec::new()));
865
let b2 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![b1]));
866
let _b3 = tasks.add().init(RenderTask::new_test(color, dyn_location(320, 320), smallvec![b2]));
867
868
let main_pic = tasks.add().init(RenderTask::new_test(
869
color,
870
RenderTaskLocation::Fixed(rect(0, 0, 3200, 1800)),
871
smallvec![b2],
872
));
873
874
let initial_number_of_tasks = tasks.tasks.len();
875
876
let passes = tasks.generate_passes(Some(main_pic), size2(3200, 1800), true);
877
878
// We should not have added any blits.
879
assert_eq!(tasks.tasks.len(), initial_number_of_tasks);
880
881
assert_eq!(passes.len(), 3);
882
assert_eq!(passes[0].tasks, vec![b1]);
883
assert_eq!(passes[1].tasks, vec![b2]);
884
assert_eq!(passes[2].tasks, vec![main_pic]);
885
}