Source code

Revision control

Other Tools

1
/* This Source Code Form is subject to the terms of the Mozilla Public
2
* License, v. 2.0. If a copy of the MPL was not distributed with this
3
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5
use api::BorderRadius;
6
use api::units::*;
7
use euclid::{Point2D, Rect, Size2D, Vector2D};
8
use euclid::{default, Transform2D, Transform3D, Scale};
9
use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
10
use plane_split::{Clipper, Polygon};
11
use std::{i32, f32, fmt, ptr};
12
use std::borrow::Cow;
13
use std::num::NonZeroUsize;
14
use std::os::raw::c_void;
15
use std::sync::Arc;
16
use std::mem::replace;
17
18
19
// Matches the definition of SK_ScalarNearlyZero in Skia.
20
const NEARLY_ZERO: f32 = 1.0 / 4096.0;
21
22
/// A typesafe helper that separates new value construction from
23
/// vector growing, allowing LLVM to ideally construct the element in place.
24
pub struct Allocation<'a, T: 'a> {
25
vec: &'a mut Vec<T>,
26
index: usize,
27
}
28
29
impl<'a, T> Allocation<'a, T> {
30
// writing is safe because alloc() ensured enough capacity
31
// and `Allocation` holds a mutable borrow to prevent anyone else
32
// from breaking this invariant.
33
#[inline(always)]
34
pub fn init(self, value: T) -> usize {
35
unsafe {
36
ptr::write(self.vec.as_mut_ptr().add(self.index), value);
37
self.vec.set_len(self.index + 1);
38
}
39
self.index
40
}
41
}
42
43
/// An entry into a vector, similar to `std::collections::hash_map::Entry`.
44
pub enum VecEntry<'a, T: 'a> {
45
Vacant(Allocation<'a, T>),
46
Occupied(&'a mut T),
47
}
48
49
impl<'a, T> VecEntry<'a, T> {
50
#[inline(always)]
51
pub fn set(self, value: T) {
52
match self {
53
VecEntry::Vacant(alloc) => { alloc.init(value); }
54
VecEntry::Occupied(slot) => { *slot = value; }
55
}
56
}
57
}
58
59
pub trait VecHelper<T> {
60
/// Growns the vector by a single entry, returning the allocation.
61
fn alloc(&mut self) -> Allocation<T>;
62
/// Either returns an existing elemenet, or grows the vector by one.
63
/// Doesn't expect indices to be higher than the current length.
64
fn entry(&mut self, index: usize) -> VecEntry<T>;
65
66
/// Equivalent to `mem::replace(&mut vec, Vec::new())`
67
fn take(&mut self) -> Self;
68
69
/// Functionally equivalent to `mem::replace(&mut vec, Vec::new())` but tries
70
/// to keep the allocation in the caller if it is empty or replace it with a
71
/// pre-allocated vector.
72
fn take_and_preallocate(&mut self) -> Self;
73
}
74
75
impl<T> VecHelper<T> for Vec<T> {
76
fn alloc(&mut self) -> Allocation<T> {
77
let index = self.len();
78
if self.capacity() == index {
79
self.reserve(1);
80
}
81
Allocation {
82
vec: self,
83
index,
84
}
85
}
86
87
fn entry(&mut self, index: usize) -> VecEntry<T> {
88
if index < self.len() {
89
VecEntry::Occupied(unsafe {
90
self.get_unchecked_mut(index)
91
})
92
} else {
93
assert_eq!(index, self.len());
94
VecEntry::Vacant(self.alloc())
95
}
96
}
97
98
fn take(&mut self) -> Self {
99
replace(self, Vec::new())
100
}
101
102
fn take_and_preallocate(&mut self) -> Self {
103
let len = self.len();
104
if len == 0 {
105
self.clear();
106
return Vec::new();
107
}
108
replace(self, Vec::with_capacity(len + 8))
109
}
110
}
111
112
113
// Represents an optimized transform where there is only
114
// a scale and translation (which are guaranteed to maintain
115
// an axis align rectangle under transformation). The
116
// scaling is applied first, followed by the translation.
117
// TODO(gw): We should try and incorporate F <-> T units here,
118
// but it's a bit tricky to do that now with the
119
// way the current spatial tree works.
120
#[derive(Debug, Clone, Copy, MallocSizeOf)]
121
#[cfg_attr(feature = "capture", derive(Serialize))]
122
pub struct ScaleOffset {
123
pub scale: default::Vector2D<f32>,
124
pub offset: default::Vector2D<f32>,
125
}
126
127
impl ScaleOffset {
128
pub fn identity() -> Self {
129
ScaleOffset {
130
scale: Vector2D::new(1.0, 1.0),
131
offset: Vector2D::zero(),
132
}
133
}
134
135
// Construct a ScaleOffset from a transform. Returns
136
// None if the matrix is not a pure scale / translation.
137
pub fn from_transform<F, T>(
138
m: &Transform3D<f32, F, T>,
139
) -> Option<ScaleOffset> {
140
141
// To check that we have a pure scale / translation:
142
// Every field must match an identity matrix, except:
143
// - Any value present in tx,ty
144
// - Any non-neg value present in sx,sy (avoid negative for reflection/rotation)
145
146
if m.m11 < 0.0 ||
147
m.m12.abs() > NEARLY_ZERO ||
148
m.m13.abs() > NEARLY_ZERO ||
149
m.m14.abs() > NEARLY_ZERO ||
150
m.m21.abs() > NEARLY_ZERO ||
151
m.m22 < 0.0 ||
152
m.m23.abs() > NEARLY_ZERO ||
153
m.m24.abs() > NEARLY_ZERO ||
154
m.m31.abs() > NEARLY_ZERO ||
155
m.m32.abs() > NEARLY_ZERO ||
156
(m.m33 - 1.0).abs() > NEARLY_ZERO ||
157
m.m34.abs() > NEARLY_ZERO ||
158
m.m43.abs() > NEARLY_ZERO ||
159
(m.m44 - 1.0).abs() > NEARLY_ZERO {
160
return None;
161
}
162
163
Some(ScaleOffset {
164
scale: Vector2D::new(m.m11, m.m22),
165
offset: Vector2D::new(m.m41, m.m42),
166
})
167
}
168
169
pub fn from_offset(offset: default::Vector2D<f32>) -> Self {
170
ScaleOffset {
171
scale: Vector2D::new(1.0, 1.0),
172
offset,
173
}
174
}
175
176
pub fn inverse(&self) -> Self {
177
ScaleOffset {
178
scale: Vector2D::new(
179
1.0 / self.scale.x,
180
1.0 / self.scale.y,
181
),
182
offset: Vector2D::new(
183
-self.offset.x / self.scale.x,
184
-self.offset.y / self.scale.y,
185
),
186
}
187
}
188
189
pub fn offset(&self, offset: default::Vector2D<f32>) -> Self {
190
self.accumulate(
191
&ScaleOffset {
192
scale: Vector2D::new(1.0, 1.0),
193
offset,
194
}
195
)
196
}
197
198
pub fn scale(&self, scale: f32) -> Self {
199
self.accumulate(
200
&ScaleOffset {
201
scale: Vector2D::new(scale, scale),
202
offset: Vector2D::zero(),
203
}
204
)
205
}
206
207
/// Produce a ScaleOffset that includes both self and other.
208
/// The 'self' ScaleOffset is applied after other.
209
/// This is equivalent to `Transform3D::pre_transform`.
210
pub fn accumulate(&self, other: &ScaleOffset) -> Self {
211
ScaleOffset {
212
scale: Vector2D::new(
213
self.scale.x * other.scale.x,
214
self.scale.y * other.scale.y,
215
),
216
offset: Vector2D::new(
217
self.offset.x + self.scale.x * other.offset.x,
218
self.offset.y + self.scale.y * other.offset.y,
219
),
220
}
221
}
222
223
pub fn map_rect<F, T>(&self, rect: &Rect<f32, F>) -> Rect<f32, T> {
224
Rect::new(
225
Point2D::new(
226
rect.origin.x * self.scale.x + self.offset.x,
227
rect.origin.y * self.scale.y + self.offset.y,
228
),
229
Size2D::new(
230
rect.size.width * self.scale.x,
231
rect.size.height * self.scale.y,
232
)
233
)
234
}
235
236
pub fn unmap_rect<F, T>(&self, rect: &Rect<f32, F>) -> Rect<f32, T> {
237
Rect::new(
238
Point2D::new(
239
(rect.origin.x - self.offset.x) / self.scale.x,
240
(rect.origin.y - self.offset.y) / self.scale.y,
241
),
242
Size2D::new(
243
rect.size.width / self.scale.x,
244
rect.size.height / self.scale.y,
245
)
246
)
247
}
248
249
pub fn map_vector<F, T>(&self, vector: &Vector2D<f32, F>) -> Vector2D<f32, T> {
250
Vector2D::new(
251
vector.x * self.scale.x + self.offset.x,
252
vector.y * self.scale.y + self.offset.y,
253
)
254
}
255
256
pub fn unmap_vector<F, T>(&self, vector: &Vector2D<f32, F>) -> Vector2D<f32, T> {
257
Vector2D::new(
258
(vector.x - self.offset.x) / self.scale.x,
259
(vector.y - self.offset.y) / self.scale.y,
260
)
261
}
262
263
pub fn to_transform<F, T>(&self) -> Transform3D<f32, F, T> {
264
Transform3D::row_major(
265
self.scale.x,
266
0.0,
267
0.0,
268
0.0,
269
270
0.0,
271
self.scale.y,
272
0.0,
273
0.0,
274
275
0.0,
276
0.0,
277
1.0,
278
0.0,
279
280
self.offset.x,
281
self.offset.y,
282
0.0,
283
1.0,
284
)
285
}
286
}
287
288
// TODO: Implement these in euclid!
289
pub trait MatrixHelpers<Src, Dst> {
290
/// A port of the preserves2dAxisAlignment function in Skia.
291
/// Defined in the SkMatrix44 class.
292
fn preserves_2d_axis_alignment(&self) -> bool;
293
fn has_perspective_component(&self) -> bool;
294
fn has_2d_inverse(&self) -> bool;
295
/// Check if the matrix post-scaling on either the X or Y axes could cause geometry
296
/// transformed by this matrix to have scaling exceeding the supplied limit.
297
fn exceeds_2d_scale(&self, limit: f64) -> bool;
298
fn inverse_project(&self, target: &Point2D<f32, Dst>) -> Option<Point2D<f32, Src>>;
299
fn inverse_rect_footprint(&self, rect: &Rect<f32, Dst>) -> Option<Rect<f32, Src>>;
300
fn transform_kind(&self) -> TransformedRectKind;
301
fn is_simple_translation(&self) -> bool;
302
fn is_simple_2d_translation(&self) -> bool;
303
/// Return the determinant of the 2D part of the matrix.
304
fn determinant_2d(&self) -> f32;
305
/// This function returns a point in the `Src` space that projects into zero XY.
306
/// It ignores the Z coordinate and is usable for "flattened" transformations,
307
/// since they are not generally inversible.
308
fn inverse_project_2d_origin(&self) -> Option<Point2D<f32, Src>>;
309
/// Turn Z transformation into identity. This is useful when crossing "flat"
310
/// transform styled stacking contexts upon traversing the coordinate systems.
311
fn flatten_z_output(&mut self);
312
}
313
314
impl<Src, Dst> MatrixHelpers<Src, Dst> for Transform3D<f32, Src, Dst> {
315
fn preserves_2d_axis_alignment(&self) -> bool {
316
if self.m14 != 0.0 || self.m24 != 0.0 {
317
return false;
318
}
319
320
let mut col0 = 0;
321
let mut col1 = 0;
322
let mut row0 = 0;
323
let mut row1 = 0;
324
325
if self.m11.abs() > NEARLY_ZERO {
326
col0 += 1;
327
row0 += 1;
328
}
329
if self.m12.abs() > NEARLY_ZERO {
330
col1 += 1;
331
row0 += 1;
332
}
333
if self.m21.abs() > NEARLY_ZERO {
334
col0 += 1;
335
row1 += 1;
336
}
337
if self.m22.abs() > NEARLY_ZERO {
338
col1 += 1;
339
row1 += 1;
340
}
341
342
col0 < 2 && col1 < 2 && row0 < 2 && row1 < 2
343
}
344
345
fn has_perspective_component(&self) -> bool {
346
self.m14.abs() > NEARLY_ZERO ||
347
self.m24.abs() > NEARLY_ZERO ||
348
self.m34.abs() > NEARLY_ZERO ||
349
(self.m44 - 1.0).abs() > NEARLY_ZERO
350
}
351
352
fn has_2d_inverse(&self) -> bool {
353
self.determinant_2d() != 0.0
354
}
355
356
fn exceeds_2d_scale(&self, limit: f64) -> bool {
357
let limit2 = (limit * limit) as f32;
358
self.m11 * self.m11 + self.m12 * self.m12 > limit2 ||
359
self.m21 * self.m21 + self.m22 * self.m22 > limit2
360
}
361
362
fn inverse_project(&self, target: &Point2D<f32, Dst>) -> Option<Point2D<f32, Src>> {
363
let m: Transform2D<f32, Src, Dst>;
364
m = Transform2D::column_major(
365
self.m11 - target.x * self.m14,
366
self.m21 - target.x * self.m24,
367
self.m41 - target.x * self.m44,
368
self.m12 - target.y * self.m14,
369
self.m22 - target.y * self.m24,
370
self.m42 - target.y * self.m44,
371
);
372
m.inverse().map(|inv| Point2D::new(inv.m31, inv.m32))
373
}
374
375
fn inverse_rect_footprint(&self, rect: &Rect<f32, Dst>) -> Option<Rect<f32, Src>> {
376
Some(Rect::from_points(&[
377
self.inverse_project(&rect.origin)?,
378
self.inverse_project(&rect.top_right())?,
379
self.inverse_project(&rect.bottom_left())?,
380
self.inverse_project(&rect.bottom_right())?,
381
]))
382
}
383
384
fn transform_kind(&self) -> TransformedRectKind {
385
if self.preserves_2d_axis_alignment() {
386
TransformedRectKind::AxisAligned
387
} else {
388
TransformedRectKind::Complex
389
}
390
}
391
392
fn is_simple_translation(&self) -> bool {
393
if (self.m11 - 1.0).abs() > NEARLY_ZERO ||
394
(self.m22 - 1.0).abs() > NEARLY_ZERO ||
395
(self.m33 - 1.0).abs() > NEARLY_ZERO ||
396
(self.m44 - 1.0).abs() > NEARLY_ZERO {
397
return false;
398
}
399
400
self.m12.abs() < NEARLY_ZERO && self.m13.abs() < NEARLY_ZERO &&
401
self.m14.abs() < NEARLY_ZERO && self.m21.abs() < NEARLY_ZERO &&
402
self.m23.abs() < NEARLY_ZERO && self.m24.abs() < NEARLY_ZERO &&
403
self.m31.abs() < NEARLY_ZERO && self.m32.abs() < NEARLY_ZERO &&
404
self.m34.abs() < NEARLY_ZERO
405
}
406
407
fn is_simple_2d_translation(&self) -> bool {
408
if !self.is_simple_translation() {
409
return false;
410
}
411
412
self.m43.abs() < NEARLY_ZERO
413
}
414
415
fn determinant_2d(&self) -> f32 {
416
self.m11 * self.m22 - self.m12 * self.m21
417
}
418
419
fn inverse_project_2d_origin(&self) -> Option<Point2D<f32, Src>> {
420
let det = self.determinant_2d();
421
if det != 0.0 {
422
let x = (self.m21 * self.m42 - self.m41 * self.m22) / det;
423
let y = (self.m12 * self.m41 - self.m11 * self.m42) / det;
424
Some(Point2D::new(x, y))
425
} else {
426
None
427
}
428
}
429
430
fn flatten_z_output(&mut self) {
431
self.m13 = 0.0;
432
self.m23 = 0.0;
433
self.m33 = 1.0;
434
self.m43 = 0.0;
435
}
436
}
437
438
pub trait PointHelpers<U>
439
where
440
Self: Sized,
441
{
442
fn snap(&self) -> Self;
443
}
444
445
impl<U> PointHelpers<U> for Point2D<f32, U> {
446
fn snap(&self) -> Self {
447
Point2D::new(
448
(self.x + 0.5).floor(),
449
(self.y + 0.5).floor(),
450
)
451
}
452
}
453
454
pub trait RectHelpers<U>
455
where
456
Self: Sized,
457
{
458
fn from_floats(x0: f32, y0: f32, x1: f32, y1: f32) -> Self;
459
fn is_well_formed_and_nonempty(&self) -> bool;
460
fn snap(&self) -> Self;
461
}
462
463
impl<U> RectHelpers<U> for Rect<f32, U> {
464
fn from_floats(x0: f32, y0: f32, x1: f32, y1: f32) -> Self {
465
Rect::new(
466
Point2D::new(x0, y0),
467
Size2D::new(x1 - x0, y1 - y0),
468
)
469
}
470
471
fn is_well_formed_and_nonempty(&self) -> bool {
472
self.size.width > 0.0 && self.size.height > 0.0
473
}
474
475
fn snap(&self) -> Self {
476
let origin = Point2D::new(
477
(self.origin.x + 0.5).floor(),
478
(self.origin.y + 0.5).floor(),
479
);
480
Rect::new(
481
origin,
482
Size2D::new(
483
(self.origin.x + self.size.width + 0.5).floor() - origin.x,
484
(self.origin.y + self.size.height + 0.5).floor() - origin.y,
485
),
486
)
487
}
488
}
489
490
pub trait VectorHelpers<U>
491
where
492
Self: Sized,
493
{
494
fn snap(&self) -> Self;
495
}
496
497
impl<U> VectorHelpers<U> for Vector2D<f32, U> {
498
fn snap(&self) -> Self {
499
Vector2D::new(
500
(self.x + 0.5).floor(),
501
(self.y + 0.5).floor(),
502
)
503
}
504
}
505
506
pub fn lerp(a: f32, b: f32, t: f32) -> f32 {
507
(b - a) * t + a
508
}
509
510
#[repr(u32)]
511
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
512
#[cfg_attr(feature = "capture", derive(Serialize))]
513
#[cfg_attr(feature = "replay", derive(Deserialize))]
514
pub enum TransformedRectKind {
515
AxisAligned = 0,
516
Complex = 1,
517
}
518
519
#[inline(always)]
520
pub fn pack_as_float(value: u32) -> f32 {
521
value as f32 + 0.5
522
}
523
524
#[inline]
525
fn extract_inner_rect_impl<U>(
526
rect: &Rect<f32, U>,
527
radii: &BorderRadius,
528
k: f32,
529
) -> Option<Rect<f32, U>> {
530
// `k` defines how much border is taken into account
531
// We enforce the offsets to be rounded to pixel boundaries
532
// by `ceil`-ing and `floor`-ing them
533
534
let xl = (k * radii.top_left.width.max(radii.bottom_left.width)).ceil();
535
let xr = (rect.size.width - k * radii.top_right.width.max(radii.bottom_right.width)).floor();
536
let yt = (k * radii.top_left.height.max(radii.top_right.height)).ceil();
537
let yb =
538
(rect.size.height - k * radii.bottom_left.height.max(radii.bottom_right.height)).floor();
539
540
if xl <= xr && yt <= yb {
541
Some(Rect::new(
542
Point2D::new(rect.origin.x + xl, rect.origin.y + yt),
543
Size2D::new(xr - xl, yb - yt),
544
))
545
} else {
546
None
547
}
548
}
549
550
/// Return an aligned rectangle that is inside the clip region and doesn't intersect
551
/// any of the bounding rectangles of the rounded corners.
552
pub fn extract_inner_rect_safe<U>(
553
rect: &Rect<f32, U>,
554
radii: &BorderRadius,
555
) -> Option<Rect<f32, U>> {
556
// value of `k==1.0` is used for extraction of the corner rectangles
557
// see `SEGMENT_CORNER_*` in `clip_shared.glsl`
558
extract_inner_rect_impl(rect, radii, 1.0)
559
}
560
561
#[cfg(test)]
562
pub mod test {
563
use super::*;
564
use euclid::default::{Point2D, Transform3D};
565
use euclid::Angle;
566
use std::f32::consts::PI;
567
568
#[test]
569
fn inverse_project() {
570
let m0 = Transform3D::identity();
571
let p0 = Point2D::new(1.0, 2.0);
572
// an identical transform doesn't need any inverse projection
573
assert_eq!(m0.inverse_project(&p0), Some(p0));
574
let m1 = Transform3D::create_rotation(0.0, 1.0, 0.0, Angle::radians(PI / 3.0));
575
// rotation by 60 degrees would imply scaling of X component by a factor of 2
576
assert_eq!(m1.inverse_project(&p0), Some(Point2D::new(2.0, 2.0)));
577
}
578
579
fn validate_convert(xref: &LayoutTransform) {
580
let so = ScaleOffset::from_transform(xref).unwrap();
581
let xf = so.to_transform();
582
assert!(xref.approx_eq(&xf));
583
}
584
585
#[test]
586
fn scale_offset_convert() {
587
let xref = LayoutTransform::create_translation(130.0, 200.0, 0.0);
588
validate_convert(&xref);
589
590
let xref = LayoutTransform::create_scale(13.0, 8.0, 1.0);
591
validate_convert(&xref);
592
593
let xref = LayoutTransform::create_scale(0.5, 0.5, 1.0)
594
.pre_translate(LayoutVector3D::new(124.0, 38.0, 0.0));
595
validate_convert(&xref);
596
597
let xref = LayoutTransform::create_translation(50.0, 240.0, 0.0)
598
.pre_transform(&LayoutTransform::create_scale(30.0, 11.0, 1.0));
599
validate_convert(&xref);
600
}
601
602
fn validate_inverse(xref: &LayoutTransform) {
603
let s0 = ScaleOffset::from_transform(xref).unwrap();
604
let s1 = s0.inverse().accumulate(&s0);
605
assert!((s1.scale.x - 1.0).abs() < NEARLY_ZERO &&
606
(s1.scale.y - 1.0).abs() < NEARLY_ZERO &&
607
s1.offset.x.abs() < NEARLY_ZERO &&
608
s1.offset.y.abs() < NEARLY_ZERO,
609
"{:?}",
610
s1);
611
}
612
613
#[test]
614
fn scale_offset_inverse() {
615
let xref = LayoutTransform::create_translation(130.0, 200.0, 0.0);
616
validate_inverse(&xref);
617
618
let xref = LayoutTransform::create_scale(13.0, 8.0, 1.0);
619
validate_inverse(&xref);
620
621
let xref = LayoutTransform::create_scale(0.5, 0.5, 1.0)
622
.pre_translate(LayoutVector3D::new(124.0, 38.0, 0.0));
623
validate_inverse(&xref);
624
625
let xref = LayoutTransform::create_translation(50.0, 240.0, 0.0)
626
.pre_transform(&LayoutTransform::create_scale(30.0, 11.0, 1.0));
627
validate_inverse(&xref);
628
}
629
630
fn validate_accumulate(x0: &LayoutTransform, x1: &LayoutTransform) {
631
let x = x0.pre_transform(x1);
632
633
let s0 = ScaleOffset::from_transform(x0).unwrap();
634
let s1 = ScaleOffset::from_transform(x1).unwrap();
635
636
let s = s0.accumulate(&s1).to_transform();
637
638
assert!(x.approx_eq(&s), "{:?}\n{:?}", x, s);
639
}
640
641
#[test]
642
fn scale_offset_accumulate() {
643
let x0 = LayoutTransform::create_translation(130.0, 200.0, 0.0);
644
let x1 = LayoutTransform::create_scale(7.0, 3.0, 1.0);
645
646
validate_accumulate(&x0, &x1);
647
}
648
649
#[test]
650
fn inverse_project_2d_origin() {
651
let mut m = Transform3D::identity();
652
assert_eq!(m.inverse_project_2d_origin(), Some(Point2D::zero()));
653
m.m11 = 0.0;
654
assert_eq!(m.inverse_project_2d_origin(), None);
655
m.m21 = -2.0;
656
m.m22 = 0.0;
657
m.m12 = -0.5;
658
m.m41 = 1.0;
659
m.m42 = 0.5;
660
let origin = m.inverse_project_2d_origin().unwrap();
661
assert_eq!(origin, Point2D::new(1.0, 0.5));
662
assert_eq!(m.transform_point2d(origin), Some(Point2D::zero()));
663
}
664
}
665
666
pub trait MaxRect {
667
fn max_rect() -> Self;
668
}
669
670
impl MaxRect for DeviceIntRect {
671
fn max_rect() -> Self {
672
DeviceIntRect::new(
673
DeviceIntPoint::new(i32::MIN / 2, i32::MIN / 2),
674
DeviceIntSize::new(i32::MAX, i32::MAX),
675
)
676
}
677
}
678
679
impl<U> MaxRect for Rect<f32, U> {
680
fn max_rect() -> Self {
681
// Having an unlimited bounding box is fine up until we try
682
// to cast it to `i32`, where we get `-2147483648` for any
683
// values larger than or equal to 2^31.
684
//
685
// Note: clamping to i32::MIN and i32::MAX is not a solution,
686
// with explanation left as an exercise for the reader.
687
const MAX_COORD: f32 = 1.0e9;
688
689
Rect::new(
690
Point2D::new(-MAX_COORD, -MAX_COORD),
691
Size2D::new(2.0 * MAX_COORD, 2.0 * MAX_COORD),
692
)
693
}
694
}
695
696
/// An enum that tries to avoid expensive transformation matrix calculations
697
/// when possible when dealing with non-perspective axis-aligned transformations.
698
#[derive(Debug, MallocSizeOf)]
699
pub enum FastTransform<Src, Dst> {
700
/// A simple offset, which can be used without doing any matrix math.
701
Offset(Vector2D<f32, Src>),
702
703
/// A 2D transformation with an inverse.
704
Transform {
705
transform: Transform3D<f32, Src, Dst>,
706
inverse: Option<Transform3D<f32, Dst, Src>>,
707
is_2d: bool,
708
},
709
}
710
711
impl<Src, Dst> Clone for FastTransform<Src, Dst> {
712
fn clone(&self) -> Self {
713
*self
714
}
715
}
716
717
impl<Src, Dst> Copy for FastTransform<Src, Dst> { }
718
719
impl<Src, Dst> FastTransform<Src, Dst> {
720
pub fn identity() -> Self {
721
FastTransform::Offset(Vector2D::zero())
722
}
723
724
pub fn with_vector(offset: Vector2D<f32, Src>) -> Self {
725
FastTransform::Offset(offset)
726
}
727
728
pub fn with_scale_offset(scale_offset: ScaleOffset) -> Self {
729
if scale_offset.scale == Vector2D::new(1.0, 1.0) {
730
FastTransform::Offset(Vector2D::from_untyped(scale_offset.offset))
731
} else {
732
FastTransform::Transform {
733
transform: scale_offset.to_transform(),
734
inverse: Some(scale_offset.inverse().to_transform()),
735
is_2d: true,
736
}
737
}
738
}
739
740
#[inline(always)]
741
pub fn with_transform(transform: Transform3D<f32, Src, Dst>) -> Self {
742
if transform.is_simple_2d_translation() {
743
return FastTransform::Offset(Vector2D::new(transform.m41, transform.m42));
744
}
745
let inverse = transform.inverse();
746
let is_2d = transform.is_2d();
747
FastTransform::Transform { transform, inverse, is_2d}
748
}
749
750
pub fn to_transform(&self) -> Cow<Transform3D<f32, Src, Dst>> {
751
match *self {
752
FastTransform::Offset(offset) => Cow::Owned(
753
Transform3D::create_translation(offset.x, offset.y, 0.0)
754
),
755
FastTransform::Transform { ref transform, .. } => Cow::Borrowed(transform),
756
}
757
}
758
759
/// Return true if this is an identity transform
760
#[allow(unused)]
761
pub fn is_identity(&self)-> bool {
762
match *self {
763
FastTransform::Offset(offset) => {
764
offset == Vector2D::zero()
765
}
766
FastTransform::Transform { ref transform, .. } => {
767
*transform == Transform3D::identity()
768
}
769
}
770
}
771
772
pub fn post_transform<NewDst>(&self, other: &FastTransform<Dst, NewDst>) -> FastTransform<Src, NewDst> {
773
match *self {
774
FastTransform::Offset(offset) => match *other {
775
FastTransform::Offset(other_offset) => {
776
FastTransform::Offset(offset + other_offset * Scale::<_, _, Src>::new(1.0))
777
}
778
FastTransform::Transform { transform: ref other_transform, .. } => {
779
FastTransform::with_transform(
780
other_transform
781
.with_source::<Src>()
782
.pre_translate(offset.to_3d())
783
)
784
}
785
}
786
FastTransform::Transform { ref transform, ref inverse, is_2d } => match *other {
787
FastTransform::Offset(other_offset) => {
788
FastTransform::with_transform(
789
transform
790
.post_translate(other_offset.to_3d())
791
.with_destination::<NewDst>()
792
)
793
}
794
FastTransform::Transform { transform: ref other_transform, inverse: ref other_inverse, is_2d: other_is_2d } => {
795
FastTransform::Transform {
796
transform: transform.post_transform(other_transform),
797
inverse: inverse.as_ref().and_then(|self_inv|
798
other_inverse.as_ref().map(|other_inv| self_inv.pre_transform(other_inv))
799
),
800
is_2d: is_2d & other_is_2d,
801
}
802
}
803
}
804
}
805
}
806
807
pub fn pre_transform<NewSrc>(
808
&self,
809
other: &FastTransform<NewSrc, Src>
810
) -> FastTransform<NewSrc, Dst> {
811
other.post_transform(self)
812
}
813
814
pub fn pre_translate(&self, other_offset: Vector2D<f32, Src>) -> Self {
815
match *self {
816
FastTransform::Offset(offset) =>
817
FastTransform::Offset(offset + other_offset),
818
FastTransform::Transform { transform, .. } =>
819
FastTransform::with_transform(transform.pre_translate(other_offset.to_3d()))
820
}
821
}
822
823
pub fn post_translate(&self, other_offset: Vector2D<f32, Dst>) -> Self {
824
match *self {
825
FastTransform::Offset(offset) => {
826
FastTransform::Offset(offset + other_offset * Scale::<_, _, Src>::new(1.0))
827
}
828
FastTransform::Transform { ref transform, .. } => {
829
let transform = transform.post_translate(other_offset.to_3d());
830
FastTransform::with_transform(transform)
831
}
832
}
833
}
834
835
#[inline(always)]
836
pub fn is_backface_visible(&self) -> bool {
837
match *self {
838
FastTransform::Offset(..) => false,
839
FastTransform::Transform { inverse: None, .. } => false,
840
//TODO: fix this properly by taking "det|M33| * det|M34| > 0"
842
FastTransform::Transform { inverse: Some(ref inverse), .. } => inverse.m33 < 0.0,
843
}
844
}
845
846
#[inline(always)]
847
pub fn transform_point2d(&self, point: Point2D<f32, Src>) -> Option<Point2D<f32, Dst>> {
848
match *self {
849
FastTransform::Offset(offset) => {
850
let new_point = point + offset;
851
Some(Point2D::from_untyped(new_point.to_untyped()))
852
}
853
FastTransform::Transform { ref transform, .. } => transform.transform_point2d(point),
854
}
855
}
856
857
#[inline(always)]
858
pub fn inverse(&self) -> Option<FastTransform<Dst, Src>> {
859
match *self {
860
FastTransform::Offset(offset) =>
861
Some(FastTransform::Offset(Vector2D::new(-offset.x, -offset.y))),
862
FastTransform::Transform { transform, inverse: Some(inverse), is_2d, } =>
863
Some(FastTransform::Transform {
864
transform: inverse,
865
inverse: Some(transform),
866
is_2d
867
}),
868
FastTransform::Transform { inverse: None, .. } => None,
869
870
}
871
}
872
}
873
874
impl<Src, Dst> From<Transform3D<f32, Src, Dst>> for FastTransform<Src, Dst> {
875
fn from(transform: Transform3D<f32, Src, Dst>) -> Self {
876
FastTransform::with_transform(transform)
877
}
878
}
879
880
impl<Src, Dst> From<Vector2D<f32, Src>> for FastTransform<Src, Dst> {
881
fn from(vector: Vector2D<f32, Src>) -> Self {
882
FastTransform::with_vector(vector)
883
}
884
}
885
886
pub type LayoutFastTransform = FastTransform<LayoutPixel, LayoutPixel>;
887
pub type LayoutToWorldFastTransform = FastTransform<LayoutPixel, WorldPixel>;
888
889
pub fn project_rect<F, T>(
890
transform: &Transform3D<f32, F, T>,
891
rect: &Rect<f32, F>,
892
bounds: &Rect<f32, T>,
893
) -> Option<Rect<f32, T>>
894
where F: fmt::Debug
895
{
896
let homogens = [
897
transform.transform_point2d_homogeneous(rect.origin),
898
transform.transform_point2d_homogeneous(rect.top_right()),
899
transform.transform_point2d_homogeneous(rect.bottom_left()),
900
transform.transform_point2d_homogeneous(rect.bottom_right()),
901
];
902
903
// Note: we only do the full frustum collision when the polygon approaches the camera plane.
904
// Otherwise, it will be clamped to the screen bounds anyway.
905
if homogens.iter().any(|h| h.w <= 0.0 || h.w.is_nan()) {
906
let mut clipper = Clipper::new();
907
let polygon = Polygon::from_rect(*rect, 1);
908
909
let planes = match Clipper::<_, _, usize>::frustum_planes(
910
transform,
911
Some(*bounds),
912
) {
913
Ok(planes) => planes,
914
Err(..) => return None,
915
};
916
917
for plane in planes {
918
clipper.add(plane);
919
}
920
921
let results = clipper.clip(polygon);
922
if results.is_empty() {
923
return None
924
}
925
926
Some(Rect::from_points(results
927
.into_iter()
928
// filter out parts behind the view plane
929
.flat_map(|poly| &poly.points)
930
.map(|p| {
931
let mut homo = transform.transform_point2d_homogeneous(p.to_2d());
932
homo.w = homo.w.max(0.00000001); // avoid infinite values
933
homo.to_point2d().unwrap()
934
})
935
))
936
} else {
937
// we just checked for all the points to be in positive hemisphere, so `unwrap` is valid
938
Some(Rect::from_points(&[
939
homogens[0].to_point2d().unwrap(),
940
homogens[1].to_point2d().unwrap(),
941
homogens[2].to_point2d().unwrap(),
942
homogens[3].to_point2d().unwrap(),
943
]))
944
}
945
}
946
947
pub fn raster_rect_to_device_pixels(
948
rect: RasterRect,
949
device_pixel_scale: DevicePixelScale,
950
) -> DeviceRect {
951
let world_rect = rect * Scale::new(1.0);
952
let device_rect = world_rect * device_pixel_scale;
953
device_rect.round_out()
954
}
955
956
/// Run the first callback over all elements in the array. If the callback returns true,
957
/// the element is removed from the array and moved to a second callback.
958
///
959
/// This is a simple implementation waiting for Vec::drain_filter to be stable.
960
/// When that happens, code like:
961
///
962
/// let filter = |op| {
963
/// match *op {
964
/// Enum::Foo | Enum::Bar => true,
965
/// Enum::Baz => false,
966
/// }
967
/// };
968
/// drain_filter(
969
/// &mut ops,
970
/// filter,
971
/// |op| {
972
/// match op {
973
/// Enum::Foo => { foo(); }
974
/// Enum::Bar => { bar(); }
975
/// Enum::Baz => { unreachable!(); }
976
/// }
977
/// },
978
/// );
979
///
980
/// Can be rewritten as:
981
///
982
/// let filter = |op| {
983
/// match *op {
984
/// Enum::Foo | Enum::Bar => true,
985
/// Enum::Baz => false,
986
/// }
987
/// };
988
/// for op in ops.drain_filter(filter) {
989
/// match op {
990
/// Enum::Foo => { foo(); }
991
/// Enum::Bar => { bar(); }
992
/// Enum::Baz => { unreachable!(); }
993
/// }
994
/// }
995
///
997
pub fn drain_filter<T, Filter, Action>(
998
vec: &mut Vec<T>,
999
mut filter: Filter,
1000
mut action: Action,
1001
)
1002
where
1003
Filter: FnMut(&mut T) -> bool,
1004
Action: FnMut(T)
1005
{
1006
let mut i = 0;
1007
while i != vec.len() {
1008
if filter(&mut vec[i]) {
1009
action(vec.remove(i));
1010
} else {
1011
i += 1;
1012
}
1013
}
1014
}
1015
1016
1017
#[derive(Debug)]
1018
pub struct Recycler {
1019
pub num_allocations: usize,
1020
}
1021
1022
impl Recycler {
1023
/// Maximum extra capacity that a recycled vector is allowed to have. If the actual capacity
1024
/// is larger, we re-allocate the vector storage with lower capacity.
1025
const MAX_EXTRA_CAPACITY_PERCENT: usize = 200;
1026
/// Minimum extra capacity to keep when re-allocating the vector storage.
1027
const MIN_EXTRA_CAPACITY_PERCENT: usize = 20;
1028
/// Minimum sensible vector length to consider for re-allocation.
1029
const MIN_VECTOR_LENGTH: usize = 16;
1030
1031
pub fn new() -> Self {
1032
Recycler {
1033
num_allocations: 0,
1034
}
1035
}
1036
1037
/// Clear a vector for re-use, while retaining the backing memory buffer. May shrink the buffer
1038
/// if it's currently much larger than was actually used.
1039
pub fn recycle_vec<T>(&mut self, vec: &mut Vec<T>) {
1040
let extra_capacity = (vec.capacity() - vec.len()) * 100 / vec.len().max(Self::MIN_VECTOR_LENGTH);
1041
1042
if extra_capacity > Self::MAX_EXTRA_CAPACITY_PERCENT {
1043
// Reduce capacity of the buffer if it is a lot larger than it needs to be. This prevents
1044
// a frame with exceptionally large allocations to cause subsequent frames to retain
1045
// more memory than they need.
1046
//TODO: use `shrink_to` when it's stable
1047
*vec = Vec::with_capacity(vec.len() + vec.len() * Self::MIN_EXTRA_CAPACITY_PERCENT / 100);
1048
self.num_allocations += 1;
1049
} else {
1050
vec.clear();
1051
}
1052
}
1053
}
1054
1055
/// Arc wrapper to support measurement via MallocSizeOf.
1056
///
1057
/// Memory reporting for Arcs is tricky because of the risk of double-counting.
1058
/// One way to measure them is to keep a table of pointers that have already been
1059
/// traversed. The other way is to use knowledge of the program structure to
1060
/// identify which Arc instances should be measured and which should be skipped to
1061
/// avoid double-counting.
1062
///
1063
/// This struct implements the second approach. It identifies the "main" pointer
1064
/// to the Arc-ed resource, and measures the buffer as if it were an owned pointer.
1065
/// The programmer should ensure that there is at most one PrimaryArc for a given
1066
/// underlying ArcInner.
1067
#[cfg_attr(feature = "capture", derive(Serialize))]
1068
#[cfg_attr(feature = "replay", derive(Deserialize))]
1069
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1070
pub struct PrimaryArc<T>(pub Arc<T>);
1071
1072
impl<T> ::std::ops::Deref for PrimaryArc<T> {
1073
type Target = Arc<T>;
1074
1075
#[inline]
1076
fn deref(&self) -> &Arc<T> {
1077
&self.0
1078
}
1079
}
1080
1081
impl<T> MallocShallowSizeOf for PrimaryArc<T> {
1082
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
1083
unsafe {
1084
// This is a bit sketchy, but std::sync::Arc doesn't expose the
1085
// base pointer.
1086
let raw_arc_ptr: *const Arc<T> = &self.0;
1087
let raw_ptr_ptr: *const *const c_void = raw_arc_ptr as _;
1088
let raw_ptr = *raw_ptr_ptr;
1089
(ops.size_of_op)(raw_ptr)
1090
}
1091
}
1092
}
1093
1094
impl<T: MallocSizeOf> MallocSizeOf for PrimaryArc<T> {
1095
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
1096
self.shallow_size_of(ops) + (**self).size_of(ops)
1097
}
1098
}
1099
1100
/// Computes the scale factors of this matrix; that is,
1101
/// the amounts each basis vector is scaled by.
1102
///
1103
/// This code comes from gecko gfx/2d/Matrix.h with the following
1104
/// modifications:
1105
///
1106
/// * Removed `xMajor` parameter.
1107
pub fn scale_factors<Src, Dst>(
1108
mat: &Transform3D<f32, Src, Dst>
1109
) -> (f32, f32) {
1110
// Determinant is just of the 2D component.
1111
let det = mat.m11 * mat.m22 - mat.m12 * mat.m21;
1112
if det == 0.0 {
1113
return (0.0, 0.0);
1114
}
1115
1116
// ignore mirroring
1117
let det = det.abs();
1118
1119
let major = (mat.m11 * mat.m11 + mat.m12 * mat.m12).sqrt();
1120
let minor = if major != 0.0 { det / major } else { 0.0 };
1121
1122
(major, minor)
1123
}
1124
1125
/// Clamp scaling factor to a power of two.
1126
///
1127
/// This code comes from gecko gfx/thebes/gfxUtils.cpp with the following
1128
/// modification:
1129
///
1130
/// * logs are taken in base 2 instead of base e.
1131
pub fn clamp_to_scale_factor(val: f32, round_down: bool) -> f32 {
1132
// Arbitary scale factor limitation. We can increase this
1133
// for better scaling performance at the cost of worse
1134
// quality.
1135
const SCALE_RESOLUTION: f32 = 2.0;
1136
1137
// Negative scaling is just a flip and irrelevant to
1138
// our resolution calculation.
1139
let val = val.abs();
1140
1141
let (val, inverse) = if val < 1.0 {
1142
(1.0 / val, true)
1143
} else {
1144
(val, false)
1145
};
1146
1147
let power = val.log2() / SCALE_RESOLUTION.log2();
1148
1149
// If power is within 1e-5 of an integer, round to nearest to
1150
// prevent floating point errors, otherwise round up to the
1151
// next integer value.
1152
let power = if (power - power.round()).abs() < 1e-5 {
1153
power.round()
1154
} else if inverse != round_down {
1155
// Use floor when we are either inverted or rounding down, but
1156
// not both.
1157
power.floor()
1158
} else {
1159
// Otherwise, ceil when we are not inverted and not rounding
1160
// down, or we are inverted and rounding down.
1161
power.ceil()
1162
};
1163
1164
let scale = SCALE_RESOLUTION.powf(power);
1165
1166
if inverse {
1167
1.0 / scale
1168
} else {
1169
scale
1170
}
1171
}
1172
1173
/// Rounds a value up to the nearest multiple of mul
1174
pub fn round_up_to_multiple(val: usize, mul: NonZeroUsize) -> usize {
1175
match val % mul.get() {
1176
0 => val,
1177
rem => val - rem + mul.get(),
1178
}
1179
}
1180
1181
1182
#[macro_export]
1183
macro_rules! c_str {
1184
($lit:expr) => {
1185
unsafe {
1186
std::ffi::CStr::from_ptr(concat!($lit, "\0").as_ptr()
1187
as *const std::os::raw::c_char)
1188
}
1189
}
1190
}