Source code

Revision control

Other Tools

1
/* This Source Code Form is subject to the terms of the Mozilla Public
2
* License, v. 2.0. If a copy of the MPL was not distributed with this
3
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5
use api::{AlphaType, ClipMode, ExternalImageType, ImageRendering};
6
use api::{YuvColorSpace, YuvFormat, ColorDepth, ColorRange, PremultipliedColorF};
7
use api::units::*;
8
use crate::clip::{ClipDataStore, ClipNodeFlags, ClipNodeRange, ClipItemKind, ClipStore};
9
use crate::spatial_tree::{SpatialTree, ROOT_SPATIAL_NODE_INDEX, SpatialNodeIndex, CoordinateSystemId};
10
use crate::composite::{CompositeState};
11
use crate::glyph_rasterizer::GlyphFormat;
12
use crate::gpu_cache::{GpuBlockData, GpuCache, GpuCacheHandle, GpuCacheAddress};
13
use crate::gpu_types::{BrushFlags, BrushInstance, PrimitiveHeaders, ZBufferId, ZBufferIdGenerator};
14
use crate::gpu_types::{ClipMaskInstance, SplitCompositeInstance, BrushShaderKind};
15
use crate::gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance};
16
use crate::gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette};
17
use crate::gpu_types::{ImageBrushData, get_shader_opacity};
18
use crate::internal_types::{FastHashMap, SavedTargetIndex, Swizzle, TextureSource, Filter};
19
use crate::picture::{Picture3DContext, PictureCompositeMode, PicturePrimitive};
20
use crate::prim_store::{DeferredResolve, EdgeAaSegmentMask, PrimitiveInstanceKind, PrimitiveVisibilityIndex, PrimitiveVisibilityMask};
21
use crate::prim_store::{VisibleGradientTile, PrimitiveInstance, PrimitiveOpacity, SegmentInstanceIndex};
22
use crate::prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex, PrimitiveVisibilityFlags};
23
use crate::prim_store::{VECS_PER_SEGMENT, SpaceMapper};
24
use crate::prim_store::image::ImageSource;
25
use crate::render_target::RenderTargetContext;
26
use crate::render_task_graph::{RenderTaskId, RenderTaskGraph};
27
use crate::render_task::RenderTaskAddress;
28
use crate::renderer::{BlendMode, ImageBufferKind, ShaderColorMode};
29
use crate::renderer::{BLOCKS_PER_UV_RECT, MAX_VERTEX_TEXTURE_WIDTH};
30
use crate::resource_cache::{CacheItem, GlyphFetchResult, ImageRequest, ResourceCache};
31
use smallvec::SmallVec;
32
use std::{f32, i32, usize};
33
use crate::util::{project_rect, TransformedRectKind};
34
35
// Special sentinel value recognized by the shader. It is considered to be
36
// a dummy task that doesn't mask out anything.
37
const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fff);
38
39
/// Used to signal there are no segments provided with this primitive.
40
const INVALID_SEGMENT_INDEX: i32 = 0xffff;
41
42
/// Size in device pixels for tiles that clip masks are drawn in.
43
const CLIP_RECTANGLE_TILE_SIZE: i32 = 128;
44
45
/// The minimum size of a clip mask before trying to draw in tiles.
46
const CLIP_RECTANGLE_AREA_THRESHOLD: i32 = CLIP_RECTANGLE_TILE_SIZE * CLIP_RECTANGLE_TILE_SIZE * 4;
47
48
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
49
#[cfg_attr(feature = "capture", derive(Serialize))]
50
#[cfg_attr(feature = "replay", derive(Deserialize))]
51
pub enum BrushBatchKind {
52
Solid,
53
Image(ImageBufferKind),
54
Blend,
55
MixBlend {
56
task_id: RenderTaskId,
57
source_id: RenderTaskId,
58
backdrop_id: RenderTaskId,
59
},
60
YuvImage(ImageBufferKind, YuvFormat, ColorDepth, YuvColorSpace, ColorRange),
61
ConicGradient,
62
RadialGradient,
63
LinearGradient,
64
Opacity,
65
}
66
67
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
68
#[cfg_attr(feature = "capture", derive(Serialize))]
69
#[cfg_attr(feature = "replay", derive(Deserialize))]
70
pub enum BatchKind {
71
SplitComposite,
72
TextRun(GlyphFormat),
73
Brush(BrushBatchKind),
74
}
75
76
impl BatchKind {
77
fn shader_kind(&self) -> BrushShaderKind {
78
match self {
79
BatchKind::Brush(BrushBatchKind::Solid) => BrushShaderKind::Solid,
80
BatchKind::Brush(BrushBatchKind::Image(..)) => BrushShaderKind::Image,
81
BatchKind::Brush(BrushBatchKind::LinearGradient) => BrushShaderKind::LinearGradient,
82
BatchKind::Brush(BrushBatchKind::RadialGradient) => BrushShaderKind::RadialGradient,
83
BatchKind::Brush(BrushBatchKind::ConicGradient) => BrushShaderKind::ConicGradient,
84
BatchKind::Brush(BrushBatchKind::Blend) => BrushShaderKind::Blend,
85
BatchKind::Brush(BrushBatchKind::MixBlend { .. }) => BrushShaderKind::MixBlend,
86
BatchKind::Brush(BrushBatchKind::YuvImage(..)) => BrushShaderKind::Yuv,
87
BatchKind::Brush(BrushBatchKind::Opacity) => BrushShaderKind::Opacity,
88
BatchKind::TextRun(..) => BrushShaderKind::Text,
89
_ => BrushShaderKind::None,
90
}
91
}
92
}
93
94
/// Optional textures that can be used as a source in the shaders.
95
/// Textures that are not used by the batch are equal to TextureId::invalid().
96
#[derive(Copy, Clone, Debug)]
97
#[cfg_attr(feature = "capture", derive(Serialize))]
98
#[cfg_attr(feature = "replay", derive(Deserialize))]
99
pub struct BatchTextures {
100
pub colors: [TextureSource; 3],
101
}
102
103
impl BatchTextures {
104
pub fn no_texture() -> Self {
105
BatchTextures {
106
colors: [TextureSource::Invalid; 3],
107
}
108
}
109
110
pub fn render_target_cache() -> Self {
111
BatchTextures {
112
colors: [
113
TextureSource::PrevPassColor,
114
TextureSource::PrevPassAlpha,
115
TextureSource::Invalid,
116
],
117
}
118
}
119
120
pub fn color(texture: TextureSource) -> Self {
121
BatchTextures {
122
colors: [texture, texture, TextureSource::Invalid],
123
}
124
}
125
126
pub fn is_compatible_with(&self, other: &BatchTextures) -> bool {
127
self.colors.iter().zip(other.colors.iter()).all(|(t1, t2)| textures_compatible(*t1, *t2))
128
}
129
130
pub fn combine_textures(&self, other: BatchTextures) -> Option<BatchTextures> {
131
if !self.is_compatible_with(&other) {
132
return None;
133
}
134
135
let mut new_textures = BatchTextures::no_texture();
136
for (i, (color, other_color)) in self.colors.iter().zip(other.colors.iter()).enumerate() {
137
// If these textures are compatible, for each source either both sources are invalid or only one is not invalid.
138
new_textures.colors[i] = if *color == TextureSource::Invalid {
139
*other_color
140
} else {
141
*color
142
};
143
}
144
Some(new_textures)
145
}
146
}
147
148
#[derive(Copy, Clone, Debug)]
149
#[cfg_attr(feature = "capture", derive(Serialize))]
150
#[cfg_attr(feature = "replay", derive(Deserialize))]
151
pub struct BatchKey {
152
pub kind: BatchKind,
153
pub blend_mode: BlendMode,
154
pub textures: BatchTextures,
155
}
156
157
impl BatchKey {
158
pub fn new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self {
159
BatchKey {
160
kind,
161
blend_mode,
162
textures,
163
}
164
}
165
166
pub fn is_compatible_with(&self, other: &BatchKey) -> bool {
167
self.kind == other.kind && self.blend_mode == other.blend_mode && self.textures.is_compatible_with(&other.textures)
168
}
169
}
170
171
#[inline]
172
fn textures_compatible(t1: TextureSource, t2: TextureSource) -> bool {
173
t1 == TextureSource::Invalid || t2 == TextureSource::Invalid || t1 == t2
174
}
175
176
pub struct AlphaBatchList {
177
pub batches: Vec<PrimitiveBatch>,
178
pub item_rects: Vec<Vec<PictureRect>>,
179
current_batch_index: usize,
180
current_z_id: ZBufferId,
181
break_advanced_blend_batches: bool,
182
lookback_count: usize,
183
}
184
185
impl AlphaBatchList {
186
fn new(break_advanced_blend_batches: bool, lookback_count: usize) -> Self {
187
AlphaBatchList {
188
batches: Vec::new(),
189
item_rects: Vec::new(),
190
current_z_id: ZBufferId::invalid(),
191
current_batch_index: usize::MAX,
192
break_advanced_blend_batches,
193
lookback_count,
194
}
195
}
196
197
/// Clear all current batches in this list. This is typically used
198
/// when a primitive is encountered that occludes all previous
199
/// content in this batch list.
200
fn clear(&mut self) {
201
self.current_batch_index = usize::MAX;
202
self.current_z_id = ZBufferId::invalid();
203
self.batches.clear();
204
self.item_rects.clear();
205
}
206
207
pub fn set_params_and_get_batch(
208
&mut self,
209
key: BatchKey,
210
features: BatchFeatures,
211
// The bounding box of everything at this Z plane. We expect potentially
212
// multiple primitive segments coming with the same `z_id`.
213
z_bounding_rect: &PictureRect,
214
z_id: ZBufferId,
215
) -> &mut Vec<PrimitiveInstanceData> {
216
if z_id != self.current_z_id ||
217
self.current_batch_index == usize::MAX ||
218
!self.batches[self.current_batch_index].key.is_compatible_with(&key)
219
{
220
let mut selected_batch_index = None;
221
222
match key.blend_mode {
223
BlendMode::SubpixelWithBgColor => {
224
'outer_multipass: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
225
// Some subpixel batches are drawn in two passes. Because of this, we need
226
// to check for overlaps with every batch (which is a bit different
227
// than the normal batching below).
228
for item_rect in &self.item_rects[batch_index] {
229
if item_rect.intersects(z_bounding_rect) {
230
break 'outer_multipass;
231
}
232
}
233
234
if batch.key.is_compatible_with(&key) {
235
selected_batch_index = Some(batch_index);
236
break;
237
}
238
}
239
}
240
BlendMode::Advanced(_) if self.break_advanced_blend_batches => {
241
// don't try to find a batch
242
}
243
_ => {
244
'outer_default: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
245
// For normal batches, we only need to check for overlaps for batches
246
// other than the first batch we consider. If the first batch
247
// is compatible, then we know there isn't any potential overlap
248
// issues to worry about.
249
if batch.key.is_compatible_with(&key) {
250
selected_batch_index = Some(batch_index);
251
break;
252
}
253
254
// check for intersections
255
for item_rect in &self.item_rects[batch_index] {
256
if item_rect.intersects(z_bounding_rect) {
257
break 'outer_default;
258
}
259
}
260
}
261
}
262
}
263
264
if selected_batch_index.is_none() {
265
let new_batch = PrimitiveBatch::new(key);
266
selected_batch_index = Some(self.batches.len());
267
self.batches.push(new_batch);
268
self.item_rects.push(Vec::new());
269
}
270
271
self.current_batch_index = selected_batch_index.unwrap();
272
self.item_rects[self.current_batch_index].push(*z_bounding_rect);
273
self.current_z_id = z_id;
274
} else if cfg!(debug_assertions) {
275
// If it's a different segment of the same (larger) primitive, we expect the bounding box
276
// to be the same - coming from the primitive itself, not the segment.
277
assert_eq!(self.item_rects[self.current_batch_index].last(), Some(z_bounding_rect));
278
}
279
280
let batch = &mut self.batches[self.current_batch_index];
281
batch.features |= features;
282
283
&mut batch.instances
284
}
285
}
286
287
pub struct OpaqueBatchList {
288
pub pixel_area_threshold_for_new_batch: f32,
289
pub batches: Vec<PrimitiveBatch>,
290
pub current_batch_index: usize,
291
lookback_count: usize,
292
}
293
294
impl OpaqueBatchList {
295
fn new(pixel_area_threshold_for_new_batch: f32, lookback_count: usize) -> Self {
296
OpaqueBatchList {
297
batches: Vec::new(),
298
pixel_area_threshold_for_new_batch,
299
current_batch_index: usize::MAX,
300
lookback_count,
301
}
302
}
303
304
/// Clear all current batches in this list. This is typically used
305
/// when a primitive is encountered that occludes all previous
306
/// content in this batch list.
307
fn clear(&mut self) {
308
self.current_batch_index = usize::MAX;
309
self.batches.clear();
310
}
311
312
pub fn set_params_and_get_batch(
313
&mut self,
314
key: BatchKey,
315
features: BatchFeatures,
316
// The bounding box of everything at the current Z, whatever it is. We expect potentially
317
// multiple primitive segments produced by a primitive, which we allow to check
318
// `current_batch_index` instead of iterating the batches.
319
z_bounding_rect: &PictureRect,
320
) -> &mut Vec<PrimitiveInstanceData> {
321
if self.current_batch_index == usize::MAX ||
322
!self.batches[self.current_batch_index].key.is_compatible_with(&key) {
323
let mut selected_batch_index = None;
324
let item_area = z_bounding_rect.size.area();
325
326
// If the area of this primitive is larger than the given threshold,
327
// then it is large enough to warrant breaking a batch for. In this
328
// case we just see if it can be added to the existing batch or
329
// create a new one.
330
if item_area > self.pixel_area_threshold_for_new_batch {
331
if let Some(batch) = self.batches.last() {
332
if batch.key.is_compatible_with(&key) {
333
selected_batch_index = Some(self.batches.len() - 1);
334
}
335
}
336
} else {
337
// Otherwise, look back through a reasonable number of batches.
338
for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
339
if batch.key.is_compatible_with(&key) {
340
selected_batch_index = Some(batch_index);
341
break;
342
}
343
}
344
}
345
346
if selected_batch_index.is_none() {
347
let new_batch = PrimitiveBatch::new(key);
348
selected_batch_index = Some(self.batches.len());
349
self.batches.push(new_batch);
350
}
351
352
self.current_batch_index = selected_batch_index.unwrap();
353
}
354
355
let batch = &mut self.batches[self.current_batch_index];
356
batch.features |= features;
357
358
&mut batch.instances
359
}
360
361
fn finalize(&mut self) {
362
// Reverse the instance arrays in the opaque batches
363
// to get maximum z-buffer efficiency by drawing
364
// front-to-back.
365
// TODO(gw): Maybe we can change the batch code to
366
// build these in reverse and avoid having
367
// to reverse the instance array here.
368
for batch in &mut self.batches {
369
batch.instances.reverse();
370
}
371
}
372
}
373
374
#[cfg_attr(feature = "capture", derive(Serialize))]
375
#[cfg_attr(feature = "replay", derive(Deserialize))]
376
pub struct PrimitiveBatch {
377
pub key: BatchKey,
378
pub instances: Vec<PrimitiveInstanceData>,
379
pub features: BatchFeatures,
380
}
381
382
bitflags! {
383
/// Features of the batch that, if not requested, may allow a fast-path.
384
///
385
/// Rather than breaking batches when primitives request different features,
386
/// we always request the minimum amount of features to satisfy all items in
387
/// the batch.
388
/// The goal is to let the renderer be optionally select more specialized
389
/// versions of a shader if the batch doesn't require code certain code paths.
390
/// Not all shaders necessarily implement all of these features.
391
#[cfg_attr(feature = "capture", derive(Serialize))]
392
#[cfg_attr(feature = "replay", derive(Deserialize))]
393
pub struct BatchFeatures: u8 {
394
const ALPHA_PASS = 1 << 0;
395
const ANTIALIASING = 1 << 1;
396
const REPETITION = 1 << 2;
397
}
398
}
399
400
impl PrimitiveBatch {
401
fn new(key: BatchKey) -> PrimitiveBatch {
402
PrimitiveBatch {
403
key,
404
instances: Vec::new(),
405
features: BatchFeatures::empty(),
406
}
407
}
408
409
fn merge(&mut self, other: PrimitiveBatch) {
410
self.instances.extend(other.instances);
411
self.features |= other.features;
412
}
413
}
414
415
#[cfg_attr(feature = "capture", derive(Serialize))]
416
#[cfg_attr(feature = "replay", derive(Deserialize))]
417
pub struct AlphaBatchContainer {
418
pub opaque_batches: Vec<PrimitiveBatch>,
419
pub alpha_batches: Vec<PrimitiveBatch>,
420
/// The overall scissor rect for this render task, if one
421
/// is required.
422
pub task_scissor_rect: Option<DeviceIntRect>,
423
/// The rectangle of the owning render target that this
424
/// set of batches affects.
425
pub task_rect: DeviceIntRect,
426
}
427
428
impl AlphaBatchContainer {
429
pub fn new(
430
task_scissor_rect: Option<DeviceIntRect>,
431
) -> AlphaBatchContainer {
432
AlphaBatchContainer {
433
opaque_batches: Vec::new(),
434
alpha_batches: Vec::new(),
435
task_scissor_rect,
436
task_rect: DeviceIntRect::zero(),
437
}
438
}
439
440
pub fn is_empty(&self) -> bool {
441
self.opaque_batches.is_empty() &&
442
self.alpha_batches.is_empty()
443
}
444
445
fn merge(&mut self, builder: AlphaBatchBuilder, task_rect: &DeviceIntRect) {
446
self.task_rect = self.task_rect.union(task_rect);
447
448
for other_batch in builder.opaque_batch_list.batches {
449
let batch_index = self.opaque_batches.iter().position(|batch| {
450
batch.key.is_compatible_with(&other_batch.key)
451
});
452
453
match batch_index {
454
Some(batch_index) => {
455
self.opaque_batches[batch_index].merge(other_batch);
456
}
457
None => {
458
self.opaque_batches.push(other_batch);
459
}
460
}
461
}
462
463
let mut min_batch_index = 0;
464
465
for other_batch in builder.alpha_batch_list.batches {
466
let batch_index = self.alpha_batches.iter().skip(min_batch_index).position(|batch| {
467
batch.key.is_compatible_with(&other_batch.key)
468
});
469
470
match batch_index {
471
Some(batch_index) => {
472
let index = batch_index + min_batch_index;
473
self.alpha_batches[index].merge(other_batch);
474
min_batch_index = index;
475
}
476
None => {
477
self.alpha_batches.push(other_batch);
478
min_batch_index = self.alpha_batches.len();
479
}
480
}
481
}
482
}
483
}
484
485
/// Each segment can optionally specify a per-segment
486
/// texture set and one user data field.
487
#[derive(Debug, Copy, Clone)]
488
struct SegmentInstanceData {
489
textures: BatchTextures,
490
specific_resource_address: i32,
491
}
492
493
/// Encapsulates the logic of building batches for items that are blended.
494
pub struct AlphaBatchBuilder {
495
pub alpha_batch_list: AlphaBatchList,
496
pub opaque_batch_list: OpaqueBatchList,
497
pub render_task_id: RenderTaskId,
498
render_task_address: RenderTaskAddress,
499
pub vis_mask: PrimitiveVisibilityMask,
500
}
501
502
impl AlphaBatchBuilder {
503
pub fn new(
504
screen_size: DeviceIntSize,
505
break_advanced_blend_batches: bool,
506
lookback_count: usize,
507
render_task_id: RenderTaskId,
508
render_task_address: RenderTaskAddress,
509
vis_mask: PrimitiveVisibilityMask,
510
) -> Self {
511
// The threshold for creating a new batch is
512
// one quarter the screen size.
513
let batch_area_threshold = (screen_size.width * screen_size.height) as f32 / 4.0;
514
515
AlphaBatchBuilder {
516
alpha_batch_list: AlphaBatchList::new(break_advanced_blend_batches, lookback_count),
517
opaque_batch_list: OpaqueBatchList::new(batch_area_threshold, lookback_count),
518
render_task_id,
519
render_task_address,
520
vis_mask,
521
}
522
}
523
524
/// Clear all current batches in this builder. This is typically used
525
/// when a primitive is encountered that occludes all previous
526
/// content in this batch list.
527
fn clear(&mut self) {
528
self.alpha_batch_list.clear();
529
self.opaque_batch_list.clear();
530
}
531
532
pub fn build(
533
mut self,
534
batch_containers: &mut Vec<AlphaBatchContainer>,
535
merged_batches: &mut AlphaBatchContainer,
536
task_rect: DeviceIntRect,
537
task_scissor_rect: Option<DeviceIntRect>,
538
) {
539
self.opaque_batch_list.finalize();
540
541
if task_scissor_rect.is_none() {
542
merged_batches.merge(self, &task_rect);
543
} else {
544
batch_containers.push(AlphaBatchContainer {
545
alpha_batches: self.alpha_batch_list.batches,
546
opaque_batches: self.opaque_batch_list.batches,
547
task_scissor_rect,
548
task_rect,
549
});
550
}
551
}
552
553
pub fn push_single_instance(
554
&mut self,
555
key: BatchKey,
556
features: BatchFeatures,
557
bounding_rect: &PictureRect,
558
z_id: ZBufferId,
559
instance: PrimitiveInstanceData,
560
) {
561
self.set_params_and_get_batch(key, features, bounding_rect, z_id)
562
.push(instance);
563
}
564
565
pub fn set_params_and_get_batch(
566
&mut self,
567
key: BatchKey,
568
features: BatchFeatures,
569
bounding_rect: &PictureRect,
570
z_id: ZBufferId,
571
) -> &mut Vec<PrimitiveInstanceData> {
572
match key.blend_mode {
573
BlendMode::None => {
574
self.opaque_batch_list
575
.set_params_and_get_batch(key, features, bounding_rect)
576
}
577
BlendMode::Alpha |
578
BlendMode::PremultipliedAlpha |
579
BlendMode::PremultipliedDestOut |
580
BlendMode::SubpixelConstantTextColor(..) |
581
BlendMode::SubpixelWithBgColor |
582
BlendMode::SubpixelDualSource |
583
BlendMode::Advanced(_) => {
584
self.alpha_batch_list
585
.set_params_and_get_batch(key, features, bounding_rect, z_id)
586
}
587
}
588
}
589
}
590
591
/// Supports (recursively) adding a list of primitives and pictures to an alpha batch
592
/// builder. In future, it will support multiple dirty regions / slices, allowing the
593
/// contents of a picture to be spliced into multiple batch builders.
594
pub struct BatchBuilder {
595
/// A temporary buffer that is used during glyph fetching, stored here
596
/// to reduce memory allocations.
597
glyph_fetch_buffer: Vec<GlyphFetchResult>,
598
599
pub batchers: Vec<AlphaBatchBuilder>,
600
}
601
602
impl BatchBuilder {
603
pub fn new(batchers: Vec<AlphaBatchBuilder>) -> Self {
604
BatchBuilder {
605
glyph_fetch_buffer: Vec::new(),
606
batchers,
607
}
608
}
609
610
pub fn finalize(self) -> Vec<AlphaBatchBuilder> {
611
self.batchers
612
}
613
614
fn add_brush_instance_to_batches(
615
&mut self,
616
batch_key: BatchKey,
617
features: BatchFeatures,
618
bounding_rect: &PictureRect,
619
z_id: ZBufferId,
620
segment_index: i32,
621
edge_flags: EdgeAaSegmentMask,
622
clip_task_address: RenderTaskAddress,
623
brush_flags: BrushFlags,
624
prim_header_index: PrimitiveHeaderIndex,
625
resource_address: i32,
626
prim_vis_mask: PrimitiveVisibilityMask,
627
) {
628
for batcher in &mut self.batchers {
629
if batcher.vis_mask.intersects(prim_vis_mask) {
630
let render_task_address = batcher.render_task_address;
631
632
let instance = BrushInstance {
633
segment_index,
634
edge_flags,
635
clip_task_address,
636
render_task_address,
637
brush_flags,
638
prim_header_index,
639
resource_address,
640
brush_kind: batch_key.kind.shader_kind(),
641
};
642
643
batcher.push_single_instance(
644
batch_key,
645
features,
646
bounding_rect,
647
z_id,
648
PrimitiveInstanceData::from(instance),
649
);
650
}
651
}
652
}
653
654
fn add_split_composite_instance_to_batches(
655
&mut self,
656
batch_key: BatchKey,
657
bounding_rect: &PictureRect,
658
z_id: ZBufferId,
659
prim_header_index: PrimitiveHeaderIndex,
660
polygons_address: GpuCacheAddress,
661
prim_vis_mask: PrimitiveVisibilityMask,
662
) {
663
for batcher in &mut self.batchers {
664
if batcher.vis_mask.intersects(prim_vis_mask) {
665
let render_task_address = batcher.render_task_address;
666
667
batcher.push_single_instance(
668
batch_key,
669
BatchFeatures::empty(),
670
bounding_rect,
671
z_id,
672
PrimitiveInstanceData::from(SplitCompositeInstance {
673
prim_header_index,
674
render_task_address,
675
polygons_address,
676
z: z_id,
677
}),
678
);
679
}
680
}
681
}
682
683
/// Clear all current batchers. This is typically used when a primitive
684
/// is encountered that occludes all previous content in this batch list.
685
fn clear_batches(&mut self) {
686
for batcher in &mut self.batchers {
687
batcher.clear();
688
}
689
}
690
691
/// Add a picture to a given batch builder.
692
pub fn add_pic_to_batch(
693
&mut self,
694
pic: &PicturePrimitive,
695
ctx: &RenderTargetContext,
696
gpu_cache: &mut GpuCache,
697
render_tasks: &RenderTaskGraph,
698
deferred_resolves: &mut Vec<DeferredResolve>,
699
prim_headers: &mut PrimitiveHeaders,
700
transforms: &mut TransformPalette,
701
root_spatial_node_index: SpatialNodeIndex,
702
surface_spatial_node_index: SpatialNodeIndex,
703
z_generator: &mut ZBufferIdGenerator,
704
composite_state: &mut CompositeState,
705
) {
706
for cluster in &pic.prim_list.clusters {
707
// Add each run in this picture to the batch.
708
for prim_instance in &cluster.prim_instances {
709
self.add_prim_to_batch(
710
prim_instance,
711
cluster.spatial_node_index,
712
ctx,
713
gpu_cache,
714
render_tasks,
715
deferred_resolves,
716
prim_headers,
717
transforms,
718
root_spatial_node_index,
719
surface_spatial_node_index,
720
z_generator,
721
composite_state,
722
);
723
}
724
}
725
}
726
727
// Adds a primitive to a batch.
728
// It can recursively call itself in some situations, for
729
// example if it encounters a picture where the items
730
// in that picture are being drawn into the same target.
731
fn add_prim_to_batch(
732
&mut self,
733
prim_instance: &PrimitiveInstance,
734
prim_spatial_node_index: SpatialNodeIndex,
735
ctx: &RenderTargetContext,
736
gpu_cache: &mut GpuCache,
737
render_tasks: &RenderTaskGraph,
738
deferred_resolves: &mut Vec<DeferredResolve>,
739
prim_headers: &mut PrimitiveHeaders,
740
transforms: &mut TransformPalette,
741
root_spatial_node_index: SpatialNodeIndex,
742
surface_spatial_node_index: SpatialNodeIndex,
743
z_generator: &mut ZBufferIdGenerator,
744
composite_state: &mut CompositeState,
745
) {
746
if prim_instance.visibility_info == PrimitiveVisibilityIndex::INVALID {
747
return;
748
}
749
750
#[cfg(debug_assertions)] //TODO: why is this needed?
751
debug_assert_eq!(prim_instance.prepared_frame_id, render_tasks.frame_id());
752
753
let is_chased = prim_instance.is_chased();
754
755
let transform_id = transforms
756
.get_id(
757
prim_spatial_node_index,
758
root_spatial_node_index,
759
ctx.spatial_tree,
760
);
761
762
// TODO(gw): Calculating this for every primitive is a bit
763
// wasteful. We should probably cache this in
764
// the scroll node...
765
let transform_kind = transform_id.transform_kind();
766
let prim_info = &ctx.scratch.prim_info[prim_instance.visibility_info.0 as usize];
767
let bounding_rect = &prim_info.clip_chain.pic_clip_rect;
768
769
// If this primitive is a backdrop, that means that it is known to cover
770
// the entire picture cache background. In that case, the renderer will
771
// use the backdrop color as a clear color, and so we can drop this
772
// primitive and any prior primitives from the batch lists for this
773
// picture cache slice.
774
if prim_info.flags.contains(PrimitiveVisibilityFlags::IS_BACKDROP) {
775
self.clear_batches();
776
return;
777
}
778
779
let z_id = z_generator.next();
780
781
let prim_common_data = &ctx.data_stores.as_common_data(&prim_instance);
782
let prim_rect = LayoutRect::new(
783
prim_instance.prim_origin,
784
prim_common_data.prim_size
785
);
786
787
let mut batch_features = BatchFeatures::empty();
788
if prim_common_data.may_need_repetition {
789
batch_features |= BatchFeatures::REPETITION;
790
}
791
792
if transform_kind != TransformedRectKind::AxisAligned {
793
batch_features |= BatchFeatures::ANTIALIASING;
794
}
795
796
let prim_vis_mask = prim_info.visibility_mask;
797
let clip_task_address = ctx.get_prim_clip_task_address(
798
prim_info.clip_task_index,
799
render_tasks,
800
);
801
802
if is_chased {
803
println!("\tbatch {:?} with bound {:?} and clip task {:?}", prim_rect, bounding_rect, clip_task_address);
804
}
805
806
if !bounding_rect.is_empty() {
807
debug_assert_eq!(prim_info.clip_chain.pic_spatial_node_index, surface_spatial_node_index,
808
"The primitive's bounding box is specified in a different coordinate system from the current batch!");
809
}
810
811
match prim_instance.kind {
812
PrimitiveInstanceKind::PushClipChain |
813
PrimitiveInstanceKind::PopClipChain => {}
814
815
PrimitiveInstanceKind::Clear { data_handle } => {
816
let prim_data = &ctx.data_stores.prim[data_handle];
817
let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
818
819
// TODO(gw): We can abstract some of the common code below into
820
// helper methods, as we port more primitives to make
821
// use of interning.
822
823
let prim_header = PrimitiveHeader {
824
local_rect: prim_rect,
825
local_clip_rect: prim_info.combined_local_clip_rect,
826
specific_prim_address: prim_cache_address,
827
transform_id,
828
};
829
830
let prim_header_index = prim_headers.push(
831
&prim_header,
832
z_id,
833
[get_shader_opacity(1.0), 0, 0, 0],
834
);
835
836
let batch_key = BatchKey {
837
blend_mode: BlendMode::PremultipliedDestOut,
838
kind: BatchKind::Brush(BrushBatchKind::Solid),
839
textures: BatchTextures::no_texture(),
840
};
841
842
self.add_brush_instance_to_batches(
843
batch_key,
844
batch_features,
845
bounding_rect,
846
z_id,
847
INVALID_SEGMENT_INDEX,
848
EdgeAaSegmentMask::all(),
849
clip_task_address.unwrap(),
850
BrushFlags::PERSPECTIVE_INTERPOLATION,
851
prim_header_index,
852
0,
853
prim_vis_mask,
854
);
855
}
856
PrimitiveInstanceKind::NormalBorder { data_handle, ref cache_handles, .. } => {
857
let prim_data = &ctx.data_stores.normal_border[data_handle];
858
let common_data = &prim_data.common;
859
let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
860
let cache_handles = &ctx.scratch.border_cache_handles[*cache_handles];
861
let specified_blend_mode = BlendMode::PremultipliedAlpha;
862
let mut segment_data: SmallVec<[SegmentInstanceData; 8]> = SmallVec::new();
863
864
// Collect the segment instance data from each render
865
// task for each valid edge / corner of the border.
866
867
for handle in cache_handles {
868
let rt_cache_entry = ctx.resource_cache
869
.get_cached_render_task(handle);
870
let cache_item = ctx.resource_cache
871
.get_texture_cache_item(&rt_cache_entry.handle);
872
segment_data.push(
873
SegmentInstanceData {
874
textures: BatchTextures::color(cache_item.texture_id),
875
specific_resource_address: cache_item.uv_rect_handle.as_int(gpu_cache),
876
}
877
);
878
}
879
880
let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
881
prim_info.clip_task_index != ClipTaskIndex::INVALID ||
882
transform_kind == TransformedRectKind::Complex
883
{
884
specified_blend_mode
885
} else {
886
BlendMode::None
887
};
888
889
let prim_header = PrimitiveHeader {
890
local_rect: prim_rect,
891
local_clip_rect: prim_info.combined_local_clip_rect,
892
specific_prim_address: prim_cache_address,
893
transform_id,
894
};
895
896
let batch_params = BrushBatchParameters::instanced(
897
BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
898
ImageBrushData {
899
color_mode: ShaderColorMode::Image,
900
alpha_type: AlphaType::PremultipliedAlpha,
901
raster_space: RasterizationSpace::Local,
902
opacity: 1.0,
903
}.encode(),
904
segment_data,
905
);
906
907
let prim_header_index = prim_headers.push(
908
&prim_header,
909
z_id,
910
batch_params.prim_user_data,
911
);
912
913
let border_data = &prim_data.kind;
914
self.add_segmented_prim_to_batch(
915
Some(border_data.brush_segments.as_slice()),
916
common_data.opacity,
917
&batch_params,
918
specified_blend_mode,
919
non_segmented_blend_mode,
920
batch_features,
921
prim_header_index,
922
bounding_rect,
923
transform_kind,
924
render_tasks,
925
z_id,
926
prim_info.clip_task_index,
927
prim_vis_mask,
928
ctx,
929
);
930
}
931
PrimitiveInstanceKind::TextRun { data_handle, run_index, .. } => {
932
let run = &ctx.prim_store.text_runs[run_index];
933
let subpx_dir = run.used_font.get_subpx_dir();
934
935
// The GPU cache data is stored in the template and reused across
936
// frames and display lists.
937
let prim_data = &ctx.data_stores.text_run[data_handle];
938
let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
939
940
// The local prim rect is only informative for text primitives, as
941
// thus is not directly necessary for any drawing of the text run.
942
// However the glyph offsets are relative to the prim rect origin
943
// less the unsnapped reference frame offset. We also want the
944
// the snapped reference frame offset, because cannot recalculate
945
// it as it ignores the animated components for the transform. As
946
// such, we adjust the prim rect origin here, and replace the size
947
// with the unsnapped and snapped offsets respectively. This has
948
// the added bonus of avoiding quantization effects when storing
949
// floats in the extra header integers.
950
let prim_header = PrimitiveHeader {
951
local_rect: LayoutRect::new(
952
prim_rect.origin - run.reference_frame_relative_offset,
953
run.snapped_reference_frame_relative_offset.to_size(),
954
),
955
local_clip_rect: prim_info.combined_local_clip_rect,
956
specific_prim_address: prim_cache_address,
957
transform_id,
958
};
959
960
let glyph_keys = &ctx.scratch.glyph_keys[run.glyph_keys_range];
961
let raster_scale = run.raster_space.local_scale().unwrap_or(1.0).max(0.001);
962
let prim_header_index = prim_headers.push(
963
&prim_header,
964
z_id,
965
[
966
(raster_scale * 65535.0).round() as i32,
967
0,
968
0,
969
0,
970
],
971
);
972
let base_instance = GlyphInstance::new(
973
prim_header_index,
974
);
975
let batchers = &mut self.batchers;
976
977
ctx.resource_cache.fetch_glyphs(
978
run.used_font.clone(),
979
&glyph_keys,
980
&mut self.glyph_fetch_buffer,
981
gpu_cache,
982
|texture_id, mut glyph_format, glyphs| {
983
debug_assert_ne!(texture_id, TextureSource::Invalid);
984
985
// Ignore color and only sample alpha when shadowing.
986
if run.shadow {
987
glyph_format = glyph_format.ignore_color();
988
}
989
990
let subpx_dir = subpx_dir.limit_by(glyph_format);
991
992
let textures = BatchTextures {
993
colors: [
994
texture_id,
995
TextureSource::Invalid,
996
TextureSource::Invalid,
997
],
998
};
999
1000
let kind = BatchKind::TextRun(glyph_format);
1001
1002
let (blend_mode, color_mode) = match glyph_format {
1003
GlyphFormat::Subpixel |
1004
GlyphFormat::TransformedSubpixel => {
1005
if run.used_font.bg_color.a != 0 {
1006
(
1007
BlendMode::SubpixelWithBgColor,
1008
ShaderColorMode::FromRenderPassMode,
1009
)
1010
} else if ctx.use_dual_source_blending {
1011
(
1012
BlendMode::SubpixelDualSource,
1013
ShaderColorMode::SubpixelDualSource,
1014
)
1015
} else {
1016
(
1017
BlendMode::SubpixelConstantTextColor(run.used_font.color.into()),
1018
ShaderColorMode::SubpixelConstantTextColor,
1019
)
1020
}
1021
}
1022
GlyphFormat::Alpha |
1023
GlyphFormat::TransformedAlpha => {
1024
(
1025
BlendMode::PremultipliedAlpha,
1026
ShaderColorMode::Alpha,
1027
)
1028
}
1029
GlyphFormat::Bitmap => {
1030
(
1031
BlendMode::PremultipliedAlpha,
1032
ShaderColorMode::Bitmap,
1033
)
1034
}
1035
GlyphFormat::ColorBitmap => {
1036
(
1037
BlendMode::PremultipliedAlpha,
1038
ShaderColorMode::ColorBitmap,
1039
)
1040
}
1041
};
1042
1043
let key = BatchKey::new(kind, blend_mode, textures);
1044
1045
for batcher in batchers.iter_mut() {
1046
if batcher.vis_mask.intersects(prim_vis_mask) {
1047
let render_task_address = batcher.render_task_address;
1048
let batch = batcher.alpha_batch_list.set_params_and_get_batch(
1049
key,
1050
BatchFeatures::empty(),
1051
bounding_rect,
1052
z_id,
1053
);
1054
1055
for glyph in glyphs {
1056
batch.push(base_instance.build(
1057
render_task_address,
1058
clip_task_address.unwrap(),
1059
subpx_dir,
1060
glyph.index_in_text_run,
1061
glyph.uv_rect_address,
1062
color_mode,
1063
));
1064
}
1065
}
1066
}
1067
},
1068
);
1069
}
1070
PrimitiveInstanceKind::LineDecoration { data_handle, ref cache_handle, .. } => {
1071
// The GPU cache data is stored in the template and reused across
1072
// frames and display lists.
1073
let common_data = &ctx.data_stores.line_decoration[data_handle].common;
1074
let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
1075
1076
let (batch_kind, textures, prim_user_data, specific_resource_address) = match cache_handle {
1077
Some(cache_handle) => {
1078
let rt_cache_entry = ctx
1079
.resource_cache
1080
.get_cached_render_task(cache_handle);
1081
let cache_item = ctx
1082
.resource_cache
1083
.get_texture_cache_item(&rt_cache_entry.handle);
1084
let textures = BatchTextures::color(cache_item.texture_id);
1085
(
1086
BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1087
textures,
1088
ImageBrushData {
1089
color_mode: ShaderColorMode::Image,
1090
alpha_type: AlphaType::PremultipliedAlpha,
1091
raster_space: RasterizationSpace::Local,
1092
opacity: 1.0,
1093
}.encode(),
1094
cache_item.uv_rect_handle.as_int(gpu_cache),
1095
)
1096
}
1097
None => {
1098
(
1099
BrushBatchKind::Solid,
1100
BatchTextures::no_texture(),
1101
[get_shader_opacity(1.0), 0, 0, 0],
1102
0,
1103
)
1104
}
1105
};
1106
1107
// TODO(gw): We can abstract some of the common code below into
1108
// helper methods, as we port more primitives to make
1109
// use of interning.
1110
let blend_mode = if !common_data.opacity.is_opaque ||
1111
prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1112
transform_kind == TransformedRectKind::Complex
1113
{
1114
BlendMode::PremultipliedAlpha
1115
} else {
1116
BlendMode::None
1117
};
1118
1119
let prim_header = PrimitiveHeader {
1120
local_rect: prim_rect,
1121
local_clip_rect: prim_info.combined_local_clip_rect,
1122
specific_prim_address: prim_cache_address,
1123
transform_id,
1124
};
1125
1126
let prim_header_index = prim_headers.push(
1127
&prim_header,
1128
z_id,
1129
prim_user_data,
1130
);
1131
1132
let batch_key = BatchKey {
1133
blend_mode,
1134
kind: BatchKind::Brush(batch_kind),
1135
textures,
1136
};
1137
1138
self.add_brush_instance_to_batches(
1139
batch_key,
1140
batch_features,
1141
bounding_rect,
1142
z_id,
1143
INVALID_SEGMENT_INDEX,
1144
EdgeAaSegmentMask::all(),
1145
clip_task_address.unwrap(),
1146
BrushFlags::PERSPECTIVE_INTERPOLATION,
1147
prim_header_index,
1148
specific_resource_address,
1149
prim_vis_mask,
1150
);
1151
}
1152
PrimitiveInstanceKind::Picture { pic_index, segment_instance_index, .. } => {
1153
let picture = &ctx.prim_store.pictures[pic_index.0];
1154
let non_segmented_blend_mode = BlendMode::PremultipliedAlpha;
1155
let prim_cache_address = gpu_cache.get_address(&ctx.globals.default_image_handle);
1156
1157
let prim_header = PrimitiveHeader {
1158
local_rect: picture.precise_local_rect,
1159
local_clip_rect: prim_info.combined_local_clip_rect,
1160
specific_prim_address: prim_cache_address,
1161
transform_id,
1162
};
1163
1164
match picture.context_3d {
1165
// Convert all children of the 3D hierarchy root into batches.
1166
Picture3DContext::In { root_data: Some(ref list), .. } => {
1167
for child in list {
1168
let cluster = &picture.prim_list.clusters[child.anchor.cluster_index];
1169
let child_prim_instance = &cluster.prim_instances[child.anchor.instance_index];
1170
let child_prim_info = &ctx.scratch.prim_info[child_prim_instance.visibility_info.0 as usize];
1171
1172
let child_pic_index = match child_prim_instance.kind {
1173
PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index,
1174
_ => unreachable!(),
1175
};
1176
let pic = &ctx.prim_store.pictures[child_pic_index.0];
1177
1178
// Get clip task, if set, for the picture primitive.
1179
let child_clip_task_address = ctx.get_prim_clip_task_address(
1180
child_prim_info.clip_task_index,
1181
render_tasks,
1182
);
1183
1184
let prim_header = PrimitiveHeader {
1185
local_rect: pic.precise_local_rect,
1186
local_clip_rect: child_prim_info.combined_local_clip_rect,
1187
specific_prim_address: GpuCacheAddress::INVALID,
1188
transform_id: transforms
1189
.get_id(
1190
child.spatial_node_index,
1191
root_spatial_node_index,
1192
ctx.spatial_tree,
1193
),
1194
};
1195
1196
let raster_config = pic
1197
.raster_config
1198
.as_ref()
1199
.expect("BUG: 3d primitive was not assigned a surface");
1200
let (uv_rect_address, _) = render_tasks.resolve_surface(
1201
ctx.surfaces[raster_config.surface_index.0]
1202
.render_tasks
1203
.expect("BUG: no surface")
1204
.root,
1205
gpu_cache,
1206
);
1207
1208
// Need a new z-id for each child preserve-3d context added
1209
// by this inner loop.
1210
let z_id = z_generator.next();
1211
1212
let prim_header_index = prim_headers.push(&prim_header, z_id, [
1213
uv_rect_address.as_int(),
1214
if raster_config.establishes_raster_root { 1 } else { 0 },
1215
0,
1216
child_clip_task_address.unwrap().0 as i32,
1217
]);
1218
1219
let key = BatchKey::new(
1220
BatchKind::SplitComposite,
1221
BlendMode::PremultipliedAlpha,
1222
BatchTextures::no_texture(),
1223
);
1224
1225
self.add_split_composite_instance_to_batches(
1226
key,
1227
&child_prim_info.clip_chain.pic_clip_rect,
1228
z_id,
1229
prim_header_index,
1230
child.gpu_address,
1231
child_prim_info.visibility_mask,
1232
);
1233
}
1234
}
1235
// Ignore the 3D pictures that are not in the root of preserve-3D
1236
// hierarchy, since we process them with the root.
1237
Picture3DContext::In { root_data: None, .. } => return,
1238
// Proceed for non-3D pictures.
1239
Picture3DContext::Out => ()
1240
}
1241
1242
match picture.raster_config {
1243
Some(ref raster_config) => {
1244
// If the child picture was rendered in local space, we can safely
1245
// interpolate the UV coordinates with perspective correction.
1246
let brush_flags = if raster_config.establishes_raster_root {
1247
BrushFlags::PERSPECTIVE_INTERPOLATION
1248
} else {
1249
BrushFlags::empty()
1250
};
1251
1252
let surface = &ctx.surfaces[raster_config.surface_index.0];
1253
let surface_task = surface.render_tasks.map(|s| s.root);
1254
1255
match raster_config.composite_mode {
1256
PictureCompositeMode::TileCache { .. } => {
1257
// Tile cache instances are added to the composite config, rather than
1258
// directly added to batches. This allows them to be drawn with various
1259
// present modes during render, such as partial present etc.
1260
let tile_cache = picture.tile_cache.as_ref().unwrap();
1261
let map_local_to_world = SpaceMapper::new_with_target(
1262
ROOT_SPATIAL_NODE_INDEX,
1263
tile_cache.spatial_node_index,
1264
ctx.screen_world_rect,
1265
ctx.spatial_tree,
1266
);
1267
// TODO(gw): As a follow up to the valid_rect work, see why we use
1268
// prim_info.combined_local_clip_rect here instead of the
1269
// local_clip_rect built in the TileCacheInstance. Perhaps
1270
// these can be unified or are different for a good reason?
1271
let world_clip_rect = map_local_to_world
1272
.map(&prim_info.combined_local_clip_rect)
1273
.expect("bug: unable to map clip rect");
1274
let device_clip_rect = (world_clip_rect * ctx.global_device_pixel_scale).round();
1275
1276
composite_state.push_surface(
1277
tile_cache,
1278
device_clip_rect,
1279
ctx.global_device_pixel_scale,
1280
ctx.resource_cache,
1281
gpu_cache,
1282
deferred_resolves,
1283
);
1284
}
1285
PictureCompositeMode::Filter(ref filter) => {
1286
assert!(filter.is_visible());
1287
match filter {
1288
Filter::Blur(..) => {
1289
let kind = BatchKind::Brush(
1290
BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1291
);
1292
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1293
surface_task.expect("bug: surface must be allocated by now"),
1294
gpu_cache,
1295
);
1296
let key = BatchKey::new(
1297
kind,
1298
non_segmented_blend_mode,
1299
textures,
1300
);
1301
let prim_header_index = prim_headers.push(
1302
&prim_header,
1303
z_id,
1304
ImageBrushData {
1305
color_mode: ShaderColorMode::Image,
1306
alpha_type: AlphaType::PremultipliedAlpha,
1307
raster_space: RasterizationSpace::Screen,
1308
opacity: 1.0,
1309
}.encode(),
1310
);
1311
1312
self.add_brush_instance_to_batches(
1313
key,
1314
batch_features,
1315
bounding_rect,
1316
z_id,
1317
INVALID_SEGMENT_INDEX,
1318
EdgeAaSegmentMask::empty(),
1319
clip_task_address.unwrap(),
1320
brush_flags,
1321
prim_header_index,
1322
uv_rect_address.as_int(),
1323
prim_vis_mask,
1324
);
1325
}
1326
Filter::DropShadows(shadows) => {
1327
// Draw an instance per shadow first, following by the content.
1328
1329
// The shadows and the content get drawn as a brush image.
1330
let kind = BatchKind::Brush(
1331
BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1332
);
1333
1334
// Gets the saved render task ID of the content, which is
1335
// deeper in the render task graph than the direct child.
1336
let secondary_id = picture.secondary_render_task_id.expect("no secondary!?");
1337
let content_source = {
1338
let secondary_task = &render_tasks[secondary_id];
1339
let saved_index = secondary_task.saved_index.expect("no saved index!?");
1340
debug_assert_ne!(saved_index, SavedTargetIndex::PENDING);
1341
TextureSource::RenderTaskCache(saved_index, Swizzle::default())
1342
};
1343
1344
// Build BatchTextures for shadow/content
1345
let shadow_textures = BatchTextures::render_target_cache();
1346
let content_textures = BatchTextures {
1347
colors: [
1348
content_source,
1349
TextureSource::Invalid,
1350
TextureSource::Invalid,
1351
],
1352
};
1353
1354
// Build batch keys for shadow/content
1355
let shadow_key = BatchKey::new(kind, non_segmented_blend_mode, shadow_textures);
1356
let content_key = BatchKey::new(kind, non_segmented_blend_mode, content_textures);
1357
1358
// Retrieve the UV rect addresses for shadow/content.
1359
let cache_task_id = surface_task
1360
.expect("bug: surface must be allocated by now");
1361
let shadow_uv_rect_address = render_tasks[cache_task_id]
1362
.get_texture_address(gpu_cache)
1363
.as_int();
1364
let content_uv_rect_address = render_tasks[secondary_id]
1365
.get_texture_address(gpu_cache)
1366
.as_int();
1367
1368
for (shadow, shadow_gpu_data) in shadows.iter().zip(picture.extra_gpu_data_handles.iter()) {
1369
// Get the GPU cache address of the extra data handle.
1370
let shadow_prim_address = gpu_cache.get_address(shadow_gpu_data);
1371
1372
let shadow_rect = prim_header.local_rect.translate(shadow.offset);
1373
1374
let shadow_prim_header = PrimitiveHeader {
1375
local_rect: shadow_rect,
1376
specific_prim_address: shadow_prim_address,
1377
..prim_header
1378
};
1379
1380
let shadow_prim_header_index = prim_headers.push(
1381
&shadow_prim_header,
1382
z_id,
1383
ImageBrushData {
1384
color_mode: ShaderColorMode::Alpha,
1385
alpha_type: AlphaType::PremultipliedAlpha,
1386
raster_space: RasterizationSpace::Screen,
1387
opacity: 1.0,
1388
}.encode(),
1389
);
1390
1391
self.add_brush_instance_to_batches(
1392
shadow_key,
1393
batch_features,
1394
bounding_rect,
1395
z_id,
1396
INVALID_SEGMENT_INDEX,
1397
EdgeAaSegmentMask::empty(),
1398
clip_task_address.unwrap(),
1399
brush_flags,
1400
shadow_prim_header_index,
1401
shadow_uv_rect_address,
1402
prim_vis_mask,
1403
);
1404
}
1405
let z_id_content = z_generator.next();
1406
1407
let content_prim_header_index = prim_headers.push(
1408
&prim_header,
1409
z_id_content,
1410
ImageBrushData {
1411
color_mode: ShaderColorMode::Image,
1412
alpha_type: AlphaType::PremultipliedAlpha,
1413
raster_space: RasterizationSpace::Screen,
1414
opacity: 1.0,
1415
}.encode(),
1416
);
1417
1418
self.add_brush_instance_to_batches(
1419
content_key,
1420
batch_features,
1421
bounding_rect,
1422
z_id_content,
1423
INVALID_SEGMENT_INDEX,
1424
EdgeAaSegmentMask::empty(),
1425
clip_task_address.unwrap(),
1426
brush_flags,
1427
content_prim_header_index,
1428
content_uv_rect_address,
1429
prim_vis_mask,
1430
);
1431
}
1432
Filter::Opacity(_, amount) => {
1433
let amount = (amount * 65536.0) as i32;
1434
1435
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1436
surface_task.expect("bug: surface must be allocated by now"),
1437
gpu_cache,
1438
);
1439
1440
let key = BatchKey::new(
1441
BatchKind::Brush(BrushBatchKind::Opacity),
1442
BlendMode::PremultipliedAlpha,
1443
textures,
1444
);
1445
1446
let prim_header_index = prim_headers.push(&prim_header, z_id, [
1447
uv_rect_address.as_int(),
1448
amount,
1449
0,
1450
0,
1451
]);
1452
1453
self.add_brush_instance_to_batches(
1454
key,
1455
batch_features,
1456
bounding_rect,
1457
z_id,
1458
INVALID_SEGMENT_INDEX,
1459
EdgeAaSegmentMask::empty(),
1460
clip_task_address.unwrap(),
1461
brush_flags,
1462
prim_header_index,
1463
0,
1464
prim_vis_mask,
1465
);
1466
}
1467
_ => {
1468
// Must be kept in sync with brush_blend.glsl
1469
let filter_mode = filter.as_int();
1470
1471
let user_data = match filter {
1472
Filter::Identity => 0x10000i32, // matches `Contrast(1)`
1473
Filter::Contrast(amount) |
1474
Filter::Grayscale(amount) |
1475
Filter::Invert(amount) |
1476
Filter::Saturate(amount) |
1477
Filter::Sepia(amount) |
1478
Filter::Brightness(amount) => {
1479
(amount * 65536.0) as i32
1480
}
1481
Filter::SrgbToLinear | Filter::LinearToSrgb => 0,
1482
Filter::HueRotate(angle) => {
1483
(0.01745329251 * angle * 65536.0) as i32
1484
}
1485
Filter::ColorMatrix(_) => {
1486
picture.extra_gpu_data_handles[0].as_int(gpu_cache)
1487
}
1488
Filter::Flood(_) => {
1489
picture.extra_gpu_data_handles[0].as_int(gpu_cache)
1490
}
1491
1492
// These filters are handled via different paths.
1493
Filter::ComponentTransfer |
1494
Filter::Blur(..) |
1495
Filter::DropShadows(..) |
1496
Filter::Opacity(..) => unreachable!(),
1497
};
1498
1499
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1500
surface_task.expect("bug: surface must be allocated by now"),
1501
gpu_cache,
1502
);
1503
1504
let key = BatchKey::new(
1505
BatchKind::Brush(BrushBatchKind::Blend),
1506
BlendMode::PremultipliedAlpha,
1507
textures,
1508
);
1509
1510
let prim_header_index = prim_headers.push(&prim_header, z_id, [
1511
uv_rect_address.as_int(),
1512
filter_mode,
1513
user_data,
1514
0,
1515
]);
1516
1517
self.add_brush_instance_to_batches(
1518
key,
1519
batch_features,
1520
bounding_rect,
1521
z_id,
1522
INVALID_SEGMENT_INDEX,
1523
EdgeAaSegmentMask::empty(),
1524
clip_task_address.unwrap(),
1525
brush_flags,
1526
prim_header_index,
1527
0,
1528
prim_vis_mask,
1529
);
1530
}
1531
}
1532
}
1533
PictureCompositeMode::ComponentTransferFilter(handle) => {
1534
// This is basically the same as the general filter case above
1535
// except we store a little more data in the filter mode and
1536
// a gpu cache handle in the user data.
1537
let filter_data = &ctx.data_stores.filter_data[handle];
1538
let filter_mode : i32 = Filter::ComponentTransfer.as_int() |
1539
((filter_data.data.r_func.to_int() << 28 |
1540
filter_data.data.g_func.to_int() << 24 |
1541
filter_data.data.b_func.to_int() << 20 |
1542
filter_data.data.a_func.to_int() << 16) as i32);
1543
1544
let user_data = filter_data.gpu_cache_handle.as_int(gpu_cache);
1545
1546
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1547
surface_task.expect("bug: surface must be allocated by now"),
1548
gpu_cache,
1549
);
1550
1551
let key = BatchKey::new(
1552
BatchKind::Brush(BrushBatchKind::Blend),
1553
BlendMode::PremultipliedAlpha,
1554
textures,
1555
);
1556
1557
let prim_header_index = prim_headers.push(&prim_header, z_id, [
1558
uv_rect_address.as_int(),
1559
filter_mode,
1560
user_data,
1561
0,
1562
]);
1563
1564
self.add_brush_instance_to_batches(
1565
key,
1566
batch_features,
1567
bounding_rect,
1568
z_id,
1569
INVALID_SEGMENT_INDEX,
1570
EdgeAaSegmentMask::empty(),
1571
clip_task_address.unwrap(),
1572
brush_flags,
1573
prim_header_index,
1574
0,
1575
prim_vis_mask,
1576
);
1577
}
1578
PictureCompositeMode::MixBlend(mode) if ctx.use_advanced_blending => {
1579
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1580
surface_task.expect("bug: surface must be allocated by now"),
1581
gpu_cache,
1582
);
1583
let key = BatchKey::new(
1584
BatchKind::Brush(
1585
BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1586
),
1587
BlendMode::Advanced(mode),
1588
textures,
1589
);
1590
let prim_header_index = prim_headers.push(
1591
&prim_header,
1592
z_id,
1593
ImageBrushData {
1594
color_mode: ShaderColorMode::Image,
1595
alpha_type: AlphaType::PremultipliedAlpha,
1596
raster_space: RasterizationSpace::Local,
1597
opacity: 1.0,
1598
}.encode(),
1599
);
1600
1601
self.add_brush_instance_to_batches(
1602
key,
1603
batch_features,
1604
bounding_rect,
1605
z_id,
1606
INVALID_SEGMENT_INDEX,
1607
EdgeAaSegmentMask::empty(),
1608
clip_task_address.unwrap(),
1609
brush_flags,
1610
prim_header_index,
1611
uv_rect_address.as_int(),
1612
prim_vis_mask,
1613
);
1614
}
1615
PictureCompositeMode::MixBlend(mode) => {
1616
let cache_task_id = surface_task.expect("bug: surface must be allocated by now");
1617
let backdrop_id = picture.secondary_render_task_id.expect("no backdrop!?");
1618
1619
// TODO(gw): For now, mix-blend is not supported as a picture
1620
// caching root, so we can safely assume there is
1621
// only a single batcher present.
1622
assert_eq!(self.batchers.len(), 1);
1623
1624
let key = BatchKey::new(
1625
BatchKind::Brush(
1626
BrushBatchKind::MixBlend {
1627
task_id: self.batchers[0].render_task_id,
1628
source_id: cache_task_id,
1629
backdrop_id,
1630
},
1631
),
1632
BlendMode::PremultipliedAlpha,
1633
BatchTextures::no_texture(),
1634
);
1635
let backdrop_task_address = render_tasks.get_task_address(backdrop_id);
1636
let source_task_address = render_tasks.get_task_address(cache_task_id);
1637
let prim_header_index = prim_headers.push(&prim_header, z_id, [
1638
mode as u32 as i32,
1639
backdrop_task_address.0 as i32,
1640
source_task_address.0 as i32,
1641
0,
1642
]);
1643
1644
self.add_brush_instance_to_batches(
1645
key,
1646
batch_features,
1647
bounding_rect,
1648
z_id,
1649
INVALID_SEGMENT_INDEX,
1650
EdgeAaSegmentMask::empty(),
1651
clip_task_address.unwrap(),
1652
brush_flags,
1653
prim_header_index,
1654
0,
1655
prim_vis_mask,
1656
);
1657
}
1658
PictureCompositeMode::Blit(_) => {
1659
let cache_task_id = surface_task.expect("bug: surface must be allocated by now");
1660
let uv_rect_address = render_tasks[cache_task_id]
1661
.get_texture_address(gpu_cache)
1662
.as_int();
1663
let textures = match render_tasks[cache_task_id].saved_index {
1664
Some(saved_index) => BatchTextures {
1665
colors: [
1666
TextureSource::RenderTaskCache(saved_index, Swizzle::default()),
1667
TextureSource::PrevPassAlpha,
1668
TextureSource::Invalid,
1669
]
1670
},
1671
None => BatchTextures::render_target_cache(),
1672
};
1673
let batch_params = BrushBatchParameters::shared(
1674
BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1675
textures,
1676
ImageBrushData {
1677
color_mode: ShaderColorMode::Image,
1678
alpha_type: AlphaType::PremultipliedAlpha,
1679
raster_space: RasterizationSpace::Screen,
1680
opacity: 1.0,
1681
}.encode(),
1682
uv_rect_address,
1683
);
1684
1685
let is_segmented =
1686
segment_instance_index != SegmentInstanceIndex::INVALID &&
1687
segment_instance_index != SegmentInstanceIndex::UNUSED;
1688
1689
let (prim_cache_address, segments) = if is_segmented {
1690
let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
1691
let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1692
(gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1693
} else {
1694
(prim_cache_address, None)
1695
};
1696
1697
let prim_header = PrimitiveHeader {
1698
local_rect: picture.precise_local_rect,
1699
local_clip_rect: prim_info.combined_local_clip_rect,
1700
specific_prim_address: prim_cache_address,
1701
transform_id,
1702
};
1703
1704
let prim_header_index = prim_headers.push(
1705
&prim_header,
1706
z_id,
1707
batch_params.prim_user_data,
1708
);
1709
1710
// TODO(gw): As before, all pictures that get blitted are assumed
1711
// to have alpha. However, we could determine (at least for
1712
// simple, common cases) if the picture content is opaque.
1713
// That would allow inner segments of pictures to be drawn
1714
// with blend disabled, which is a big performance win on
1715
// integrated GPUs.
1716
let opacity = PrimitiveOpacity::translucent();
1717
let specified_blend_mode = BlendMode::PremultipliedAlpha;
1718
1719
self.add_segmented_prim_to_batch(
1720
segments,
1721
opacity,
1722
&batch_params,
1723
specified_blend_mode,
1724
non_segmented_blend_mode,
1725
batch_features,
1726
prim_header_index,
1727
bounding_rect,
1728
transform_kind,
1729
render_tasks,
1730
z_id,
1731
prim_info.clip_task_index,
1732
prim_vis_mask,
1733
ctx,
1734
);
1735
}
1736
PictureCompositeMode::SvgFilter(..) => {
1737
let kind = BatchKind::Brush(
1738
BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1739
);
1740
let (uv_rect_address, textures) = render_tasks.resolve_surface(
1741
surface_task.expect("bug: surface must be allocated by now"),
1742
gpu_cache,
1743
);
1744
let key = BatchKey::new(
1745
kind,
1746
non_segmented_blend_mode,
1747
textures,
1748
);
1749
let prim_header_index = prim_headers.push(
1750
&prim_header,
1751
z_id,
1752
ImageBrushData {
1753
color_mode: ShaderColorMode::Image,
1754
alpha_type: AlphaType::PremultipliedAlpha,
1755
raster_space: RasterizationSpace::Screen,
1756
opacity: 1.0,
1757
}.encode(),
1758
);
1759
1760
self.add_brush_instance_to_batches(
1761
key,
1762
batch_features,
1763
bounding_rect,
1764
z_id,
1765
INVALID_SEGMENT_INDEX,
1766
EdgeAaSegmentMask::empty(),
1767
clip_task_address.unwrap(),
1768
brush_flags,
1769
prim_header_index,
1770
uv_rect_address.as_int(),
1771
prim_vis_mask,
1772
);
1773
}
1774
}
1775
}
1776
None => {
1777
// If this picture is being drawn into an existing target (i.e. with
1778
// no composition operation), recurse and add to the current batch list.
1779
self.add_pic_to_batch(
1780
picture,
1781
ctx,
1782
gpu_cache,
1783
render_tasks,
1784
deferred_resolves,
1785
prim_headers,
1786
transforms,
1787
root_spatial_node_index,
1788
surface_spatial_node_index,
1789
z_generator,
1790
composite_state,
1791
);
1792
}
1793
}
1794
}
1795
PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
1796
let prim_data = &ctx.data_stores.image_border[data_handle];
1797
let common_data = &prim_data.common;
1798
let border_data = &prim_data.kind;
1799
1800
let cache_item = resolve_image(
1801
border_data.request,
1802
ctx.resource_cache,
1803
gpu_cache,
1804
deferred_resolves,
1805
);
1806
if cache_item.texture_id == TextureSource::Invalid {
1807
return;
1808
}
1809
1810
let textures = BatchTextures::color(cache_item.texture_id);
1811
let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
1812
let specified_blend_mode = BlendMode::PremultipliedAlpha;
1813
let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
1814
prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1815
transform_kind == TransformedRectKind::Complex
1816
{
1817
specified_blend_mode
1818
} else {
1819
BlendMode::None
1820
};
1821
1822
let prim_header = PrimitiveHeader {
1823
local_rect: prim_rect,
1824
local_clip_rect: prim_info.combined_local_clip_rect,
1825
specific_prim_address: prim_cache_address,
1826
transform_id,
1827
};
1828
1829
let batch_params = BrushBatchParameters::shared(
1830
BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1831
textures,
1832
ImageBrushData {
1833
color_mode: ShaderColorMode::Image,
1834
alpha_type: AlphaType::PremultipliedAlpha,
1835
raster_space: RasterizationSpace::Local,
1836
opacity: 1.0,
1837
}.encode(),
1838
cache_item.uv_rect_handle.as_int(gpu_cache),
1839
);
1840
1841
let prim_header_index = prim_headers.push(
1842
&prim_header,
1843
z_id,
1844
batch_params.prim_user_data,
1845
);
1846
1847
self.add_segmented_prim_to_batch(
1848
Some(border_data.brush_segments.as_slice()),
1849
common_data.opacity,
1850
&batch_params,
1851
specified_blend_mode,
1852
non_segmented_blend_mode,
1853
batch_features,
1854
prim_header_index,
1855
bounding_rect,
1856
transform_kind,
1857
render_tasks,
1858
z_id,
1859
prim_info.clip_task_index,
1860
prim_vis_mask,
1861
ctx,
1862
);
1863
}
1864
PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, opacity_binding_index, .. } => {
1865
let prim_data = &ctx.data_stores.prim[data_handle];
1866
let specified_blend_mode = BlendMode::PremultipliedAlpha;
1867
let opacity_binding = ctx.prim_store.get_opacity_binding(opacity_binding_index);
1868
1869
let opacity = PrimitiveOpacity::from_alpha(opacity_binding);
1870
let opacity = opacity.combine(prim_data.opacity);