Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
/* A type/length-parametrized vector class. */
8
9
#ifndef mozilla_Vector_h
10
#define mozilla_Vector_h
11
12
#include "mozilla/Alignment.h"
13
#include "mozilla/AllocPolicy.h"
14
#include "mozilla/ArrayUtils.h" // for PointerRangeSize
15
#include "mozilla/Assertions.h"
16
#include "mozilla/Attributes.h"
17
#include "mozilla/MathAlgorithms.h"
18
#include "mozilla/MemoryReporting.h"
19
#include "mozilla/Move.h"
20
#include "mozilla/OperatorNewExtensions.h"
21
#include "mozilla/ReentrancyGuard.h"
22
#include "mozilla/TemplateLib.h"
23
#include "mozilla/TypeTraits.h"
24
#include "mozilla/Span.h"
25
26
#include <new> // for placement new
27
28
namespace mozilla {
29
30
template <typename T, size_t N, class AllocPolicy>
31
class Vector;
32
33
namespace detail {
34
35
/*
36
* Check that the given capacity wastes the minimal amount of space if
37
* allocated on the heap. This means that aCapacity*sizeof(T) is as close to a
38
* power-of-two as possible. growStorageBy() is responsible for ensuring this.
39
*/
40
template <typename T>
41
static bool CapacityHasExcessSpace(size_t aCapacity) {
42
size_t size = aCapacity * sizeof(T);
43
return RoundUpPow2(size) - size >= sizeof(T);
44
}
45
46
/*
47
* This template class provides a default implementation for vector operations
48
* when the element type is not known to be a POD, as judged by IsPod.
49
*/
50
template <typename T, size_t N, class AP, bool IsPod>
51
struct VectorImpl {
52
/*
53
* Constructs an object in the uninitialized memory at *aDst with aArgs.
54
*/
55
template <typename... Args>
56
MOZ_NONNULL(1)
57
static inline void new_(T* aDst, Args&&... aArgs) {
58
new (KnownNotNull, aDst) T(std::forward<Args>(aArgs)...);
59
}
60
61
/* Destroys constructed objects in the range [aBegin, aEnd). */
62
static inline void destroy(T* aBegin, T* aEnd) {
63
MOZ_ASSERT(aBegin <= aEnd);
64
for (T* p = aBegin; p < aEnd; ++p) {
65
p->~T();
66
}
67
}
68
69
/* Constructs objects in the uninitialized range [aBegin, aEnd). */
70
static inline void initialize(T* aBegin, T* aEnd) {
71
MOZ_ASSERT(aBegin <= aEnd);
72
for (T* p = aBegin; p < aEnd; ++p) {
73
new_(p);
74
}
75
}
76
77
/*
78
* Copy-constructs objects in the uninitialized range
79
* [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
80
*/
81
template <typename U>
82
static inline void copyConstruct(T* aDst, const U* aSrcStart,
83
const U* aSrcEnd) {
84
MOZ_ASSERT(aSrcStart <= aSrcEnd);
85
for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
86
new_(aDst, *p);
87
}
88
}
89
90
/*
91
* Move-constructs objects in the uninitialized range
92
* [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
93
*/
94
template <typename U>
95
static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd) {
96
MOZ_ASSERT(aSrcStart <= aSrcEnd);
97
for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
98
new_(aDst, std::move(*p));
99
}
100
}
101
102
/*
103
* Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
104
* the same object aU.
105
*/
106
template <typename U>
107
static inline void copyConstructN(T* aDst, size_t aN, const U& aU) {
108
for (T* end = aDst + aN; aDst < end; ++aDst) {
109
new_(aDst, aU);
110
}
111
}
112
113
/*
114
* Grows the given buffer to have capacity aNewCap, preserving the objects
115
* constructed in the range [begin, end) and updating aV. Assumes that (1)
116
* aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
117
* not overflow.
118
*/
119
static inline MOZ_MUST_USE bool growTo(Vector<T, N, AP>& aV, size_t aNewCap) {
120
MOZ_ASSERT(!aV.usingInlineStorage());
121
MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
122
T* newbuf = aV.template pod_malloc<T>(aNewCap);
123
if (MOZ_UNLIKELY(!newbuf)) {
124
return false;
125
}
126
T* dst = newbuf;
127
T* src = aV.beginNoCheck();
128
for (; src < aV.endNoCheck(); ++dst, ++src) {
129
new_(dst, std::move(*src));
130
}
131
VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
132
aV.free_(aV.mBegin, aV.mTail.mCapacity);
133
aV.mBegin = newbuf;
134
/* aV.mLength is unchanged. */
135
aV.mTail.mCapacity = aNewCap;
136
return true;
137
}
138
};
139
140
/*
141
* This partial template specialization provides a default implementation for
142
* vector operations when the element type is known to be a POD, as judged by
143
* IsPod.
144
*/
145
template <typename T, size_t N, class AP>
146
struct VectorImpl<T, N, AP, true> {
147
template <typename... Args>
148
MOZ_NONNULL(1)
149
static inline void new_(T* aDst, Args&&... aArgs) {
150
// Explicitly construct a local object instead of using a temporary since
151
// T(args...) will be treated like a C-style cast in the unary case and
152
// allow unsafe conversions. Both forms should be equivalent to an
153
// optimizing compiler.
154
T temp(std::forward<Args>(aArgs)...);
155
*aDst = temp;
156
}
157
158
static inline void destroy(T*, T*) {}
159
160
static inline void initialize(T* aBegin, T* aEnd) {
161
/*
162
* You would think that memset would be a big win (or even break even)
163
* when we know T is a POD. But currently it's not. This is probably
164
* because |append| tends to be given small ranges and memset requires
165
* a function call that doesn't get inlined.
166
*
167
* memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
168
*/
169
MOZ_ASSERT(aBegin <= aEnd);
170
for (T* p = aBegin; p < aEnd; ++p) {
171
new_(p);
172
}
173
}
174
175
template <typename U>
176
static inline void copyConstruct(T* aDst, const U* aSrcStart,
177
const U* aSrcEnd) {
178
/*
179
* See above memset comment. Also, notice that copyConstruct is
180
* currently templated (T != U), so memcpy won't work without
181
* requiring T == U.
182
*
183
* memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
184
*/
185
MOZ_ASSERT(aSrcStart <= aSrcEnd);
186
for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
187
new_(aDst, *p);
188
}
189
}
190
191
template <typename U>
192
static inline void moveConstruct(T* aDst, const U* aSrcStart,
193
const U* aSrcEnd) {
194
copyConstruct(aDst, aSrcStart, aSrcEnd);
195
}
196
197
static inline void copyConstructN(T* aDst, size_t aN, const T& aT) {
198
for (T* end = aDst + aN; aDst < end; ++aDst) {
199
new_(aDst, aT);
200
}
201
}
202
203
static inline MOZ_MUST_USE bool growTo(Vector<T, N, AP>& aV, size_t aNewCap) {
204
MOZ_ASSERT(!aV.usingInlineStorage());
205
MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
206
T* newbuf =
207
aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
208
if (MOZ_UNLIKELY(!newbuf)) {
209
return false;
210
}
211
aV.mBegin = newbuf;
212
/* aV.mLength is unchanged. */
213
aV.mTail.mCapacity = aNewCap;
214
return true;
215
}
216
217
static inline void podResizeToFit(Vector<T, N, AP>& aV) {
218
if (aV.usingInlineStorage() || aV.mLength == aV.mTail.mCapacity) {
219
return;
220
}
221
if (!aV.mLength) {
222
aV.free_(aV.mBegin, aV.mTail.mCapacity);
223
aV.mBegin = aV.inlineStorage();
224
aV.mTail.mCapacity = aV.kInlineCapacity;
225
#ifdef DEBUG
226
aV.mTail.mReserved = 0;
227
#endif
228
return;
229
}
230
T* newbuf =
231
aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aV.mLength);
232
if (MOZ_UNLIKELY(!newbuf)) {
233
return;
234
}
235
aV.mBegin = newbuf;
236
aV.mTail.mCapacity = aV.mLength;
237
#ifdef DEBUG
238
aV.mTail.mReserved = aV.mLength;
239
#endif
240
}
241
};
242
243
// A struct for TestVector.cpp to access private internal fields.
244
// DO NOT DEFINE IN YOUR OWN CODE.
245
struct VectorTesting;
246
247
} // namespace detail
248
249
/*
250
* STL-like container providing a short-lived, dynamic buffer. Vector calls the
251
* constructors/destructors of all elements stored in its internal buffer, so
252
* non-PODs may be safely used. Additionally, Vector will store the first N
253
* elements in-place before resorting to dynamic allocation.
254
*
255
* T requirements:
256
* - default and copy constructible, assignable, destructible
257
* - operations do not throw
258
* MinInlineCapacity requirements:
259
* - any value, however, MinInlineCapacity is clamped to min/max values
260
* AllocPolicy:
261
* - see "Allocation policies" in AllocPolicy.h (defaults to
262
* mozilla::MallocAllocPolicy)
263
*
264
* Vector is not reentrant: T member functions called during Vector member
265
* functions must not call back into the same object!
266
*/
267
template <typename T, size_t MinInlineCapacity = 0,
268
class AllocPolicy = MallocAllocPolicy>
269
class MOZ_NON_PARAM Vector final : private AllocPolicy {
270
/* utilities */
271
272
static const bool kElemIsPod = IsPod<T>::value;
273
typedef detail::VectorImpl<T, MinInlineCapacity, AllocPolicy, kElemIsPod>
274
Impl;
275
friend struct detail::VectorImpl<T, MinInlineCapacity, AllocPolicy,
276
kElemIsPod>;
277
278
friend struct detail::VectorTesting;
279
280
MOZ_MUST_USE bool growStorageBy(size_t aIncr);
281
MOZ_MUST_USE bool convertToHeapStorage(size_t aNewCap);
282
MOZ_MUST_USE bool maybeCheckSimulatedOOM(size_t aRequestedSize);
283
284
/* magic constants */
285
286
/**
287
* The maximum space allocated for inline element storage.
288
*
289
* We reduce space by what the AllocPolicy base class and prior Vector member
290
* fields likely consume to attempt to play well with binary size classes.
291
*/
292
static constexpr size_t kMaxInlineBytes =
293
1024 -
294
(sizeof(AllocPolicy) + sizeof(T*) + sizeof(size_t) + sizeof(size_t));
295
296
/**
297
* The number of T elements of inline capacity built into this Vector. This
298
* is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
299
*
300
* We use a partially-specialized template (not explicit specialization, which
301
* is only allowed at namespace scope) to compute this value. The benefit is
302
* that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
303
* defined at the time |Vector<T>| appears, if no inline storage is requested.
304
*/
305
template <size_t MinimumInlineCapacity, size_t Dummy>
306
struct ComputeCapacity {
307
static constexpr size_t value =
308
tl::Min<MinimumInlineCapacity, kMaxInlineBytes / sizeof(T)>::value;
309
};
310
311
template <size_t Dummy>
312
struct ComputeCapacity<0, Dummy> {
313
static constexpr size_t value = 0;
314
};
315
316
/** The actual inline capacity in number of elements T. This may be zero! */
317
static constexpr size_t kInlineCapacity =
318
ComputeCapacity<MinInlineCapacity, 0>::value;
319
320
/* member data */
321
322
/*
323
* Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
324
* mBegin + mLength) hold valid constructed T objects. The range [mBegin +
325
* mLength, mBegin + mCapacity) holds uninitialized memory. The range
326
* [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
327
* previously allocated by a call to reserve().
328
*/
329
T* mBegin;
330
331
/* Number of elements in the vector. */
332
size_t mLength;
333
334
/*
335
* Memory used to store capacity, reserved element count (debug builds only),
336
* and inline storage. The simple "answer" is:
337
*
338
* size_t mCapacity;
339
* #ifdef DEBUG
340
* size_t mReserved;
341
* #endif
342
* alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
343
*
344
* but there are complications. First, C++ forbids zero-sized arrays that
345
* might result. Second, we don't want zero capacity to affect Vector's size
346
* (even empty classes take up a byte, unless they're base classes).
347
*
348
* Yet again, we eliminate the zero-sized array using partial specialization.
349
* And we eliminate potential size hit by putting capacity/reserved in one
350
* struct, then putting the array (if any) in a derived struct. If no array
351
* is needed, the derived struct won't consume extra space.
352
*/
353
struct CapacityAndReserved {
354
explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
355
: mCapacity(aCapacity)
356
#ifdef DEBUG
357
,
358
mReserved(aReserved)
359
#endif
360
{
361
}
362
CapacityAndReserved() = default;
363
364
/* Max number of elements storable in the vector without resizing. */
365
size_t mCapacity;
366
367
#ifdef DEBUG
368
/* Max elements of reserved or used space in this vector. */
369
size_t mReserved;
370
#endif
371
};
372
373
// Silence warnings about this struct possibly being padded dued to the
374
// alignas() in it -- there's nothing we can do to avoid it.
375
#ifdef _MSC_VER
376
# pragma warning(push)
377
# pragma warning(disable : 4324)
378
#endif // _MSC_VER
379
380
template <size_t Capacity, size_t Dummy>
381
struct CRAndStorage : CapacityAndReserved {
382
explicit CRAndStorage(size_t aCapacity, size_t aReserved)
383
: CapacityAndReserved(aCapacity, aReserved) {}
384
CRAndStorage() = default;
385
386
alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
387
388
// GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
389
// T*. Indirecting through this function addresses the problem.
390
void* data() { return mBytes; }
391
392
T* storage() { return static_cast<T*>(data()); }
393
};
394
395
template <size_t Dummy>
396
struct CRAndStorage<0, Dummy> : CapacityAndReserved {
397
explicit CRAndStorage(size_t aCapacity, size_t aReserved)
398
: CapacityAndReserved(aCapacity, aReserved) {}
399
CRAndStorage() = default;
400
401
T* storage() {
402
// If this returns |nullptr|, functions like |Vector::begin()| would too,
403
// breaking callers that pass a vector's elements as pointer/length to
404
// code that bounds its operation by length but (even just as a sanity
405
// check) always wants a non-null pointer. Fake up an aligned, non-null
406
// pointer to support these callers.
407
return reinterpret_cast<T*>(sizeof(T));
408
}
409
};
410
411
CRAndStorage<kInlineCapacity, 0> mTail;
412
413
#ifdef _MSC_VER
414
# pragma warning(pop)
415
#endif // _MSC_VER
416
417
#ifdef DEBUG
418
friend class ReentrancyGuard;
419
bool mEntered;
420
#endif
421
422
/* private accessors */
423
424
bool usingInlineStorage() const {
425
return mBegin == const_cast<Vector*>(this)->inlineStorage();
426
}
427
428
T* inlineStorage() { return mTail.storage(); }
429
430
T* beginNoCheck() const { return mBegin; }
431
432
T* endNoCheck() { return mBegin + mLength; }
433
434
const T* endNoCheck() const { return mBegin + mLength; }
435
436
#ifdef DEBUG
437
/**
438
* The amount of explicitly allocated space in this vector that is immediately
439
* available to be filled by appending additional elements. This value is
440
* always greater than or equal to |length()| -- the vector's actual elements
441
* are implicitly reserved. This value is always less than or equal to
442
* |capacity()|. It may be explicitly increased using the |reserve()| method.
443
*/
444
size_t reserved() const {
445
MOZ_ASSERT(mLength <= mTail.mReserved);
446
MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
447
return mTail.mReserved;
448
}
449
#endif
450
451
/* Append operations guaranteed to succeed due to pre-reserved space. */
452
template <typename U>
453
void internalAppend(U&& aU);
454
template <typename U, size_t O, class BP>
455
void internalAppendAll(const Vector<U, O, BP>& aU);
456
void internalAppendN(const T& aT, size_t aN);
457
template <typename U>
458
void internalAppend(const U* aBegin, size_t aLength);
459
460
public:
461
static const size_t sMaxInlineStorage = MinInlineCapacity;
462
463
typedef T ElementType;
464
465
explicit Vector(AllocPolicy = AllocPolicy());
466
Vector(Vector&&); /* Move constructor. */
467
Vector& operator=(Vector&&); /* Move assignment. */
468
~Vector();
469
470
/* accessors */
471
472
const AllocPolicy& allocPolicy() const { return *this; }
473
474
AllocPolicy& allocPolicy() { return *this; }
475
476
enum { InlineLength = MinInlineCapacity };
477
478
size_t length() const { return mLength; }
479
480
bool empty() const { return mLength == 0; }
481
482
size_t capacity() const { return mTail.mCapacity; }
483
484
T* begin() {
485
MOZ_ASSERT(!mEntered);
486
return mBegin;
487
}
488
489
const T* begin() const {
490
MOZ_ASSERT(!mEntered);
491
return mBegin;
492
}
493
494
T* end() {
495
MOZ_ASSERT(!mEntered);
496
return mBegin + mLength;
497
}
498
499
const T* end() const {
500
MOZ_ASSERT(!mEntered);
501
return mBegin + mLength;
502
}
503
504
T& operator[](size_t aIndex) {
505
MOZ_ASSERT(!mEntered);
506
MOZ_ASSERT(aIndex < mLength);
507
return begin()[aIndex];
508
}
509
510
const T& operator[](size_t aIndex) const {
511
MOZ_ASSERT(!mEntered);
512
MOZ_ASSERT(aIndex < mLength);
513
return begin()[aIndex];
514
}
515
516
T& back() {
517
MOZ_ASSERT(!mEntered);
518
MOZ_ASSERT(!empty());
519
return *(end() - 1);
520
}
521
522
const T& back() const {
523
MOZ_ASSERT(!mEntered);
524
MOZ_ASSERT(!empty());
525
return *(end() - 1);
526
}
527
528
operator mozilla::Span<const T>() const {
529
return mozilla::MakeSpan(mBegin, mLength);
530
}
531
532
operator mozilla::Span<T>() { return mozilla::MakeSpan(mBegin, mLength); }
533
534
class Range {
535
friend class Vector;
536
T* mCur;
537
T* mEnd;
538
Range(T* aCur, T* aEnd) : mCur(aCur), mEnd(aEnd) {
539
MOZ_ASSERT(aCur <= aEnd);
540
}
541
542
public:
543
bool empty() const { return mCur == mEnd; }
544
size_t remain() const { return PointerRangeSize(mCur, mEnd); }
545
T& front() const {
546
MOZ_ASSERT(!empty());
547
return *mCur;
548
}
549
void popFront() {
550
MOZ_ASSERT(!empty());
551
++mCur;
552
}
553
T popCopyFront() {
554
MOZ_ASSERT(!empty());
555
return *mCur++;
556
}
557
};
558
559
class ConstRange {
560
friend class Vector;
561
const T* mCur;
562
const T* mEnd;
563
ConstRange(const T* aCur, const T* aEnd) : mCur(aCur), mEnd(aEnd) {
564
MOZ_ASSERT(aCur <= aEnd);
565
}
566
567
public:
568
bool empty() const { return mCur == mEnd; }
569
size_t remain() const { return PointerRangeSize(mCur, mEnd); }
570
const T& front() const {
571
MOZ_ASSERT(!empty());
572
return *mCur;
573
}
574
void popFront() {
575
MOZ_ASSERT(!empty());
576
++mCur;
577
}
578
T popCopyFront() {
579
MOZ_ASSERT(!empty());
580
return *mCur++;
581
}
582
};
583
584
Range all() { return Range(begin(), end()); }
585
ConstRange all() const { return ConstRange(begin(), end()); }
586
587
/* mutators */
588
589
/**
590
* Reverse the order of the elements in the vector in place.
591
*/
592
void reverse();
593
594
/**
595
* Given that the vector is empty, grow the internal capacity to |aRequest|,
596
* keeping the length 0.
597
*/
598
MOZ_MUST_USE bool initCapacity(size_t aRequest);
599
600
/**
601
* Given that the vector is empty, grow the internal capacity and length to
602
* |aRequest| leaving the elements' memory completely uninitialized (with all
603
* the associated hazards and caveats). This avoids the usual allocation-size
604
* rounding that happens in resize and overhead of initialization for elements
605
* that are about to be overwritten.
606
*/
607
MOZ_MUST_USE bool initLengthUninitialized(size_t aRequest);
608
609
/**
610
* If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
611
* |aRequest - length()| elements, in any sequence of append/appendAll calls,
612
* is guaranteed to succeed.
613
*
614
* A request to reserve an amount less than the current length does not affect
615
* reserved space.
616
*/
617
MOZ_MUST_USE bool reserve(size_t aRequest);
618
619
/**
620
* Destroy elements in the range [end() - aIncr, end()). Does not deallocate
621
* or unreserve storage for those elements.
622
*/
623
void shrinkBy(size_t aIncr);
624
625
/**
626
* Destroy elements in the range [aNewLength, end()). Does not deallocate
627
* or unreserve storage for those elements.
628
*/
629
void shrinkTo(size_t aNewLength);
630
631
/** Grow the vector by aIncr elements. */
632
MOZ_MUST_USE bool growBy(size_t aIncr);
633
634
/** Call shrinkBy or growBy based on whether newSize > length(). */
635
MOZ_MUST_USE bool resize(size_t aNewLength);
636
637
/**
638
* Increase the length of the vector, but don't initialize the new elements
639
* -- leave them as uninitialized memory.
640
*/
641
MOZ_MUST_USE bool growByUninitialized(size_t aIncr);
642
void infallibleGrowByUninitialized(size_t aIncr);
643
MOZ_MUST_USE bool resizeUninitialized(size_t aNewLength);
644
645
/** Shorthand for shrinkBy(length()). */
646
void clear();
647
648
/** Clears and releases any heap-allocated storage. */
649
void clearAndFree();
650
651
/**
652
* Calls the AllocPolicy's pod_realloc to release excess capacity. Since
653
* realloc is only safe on PODs, this method fails to compile if IsPod<T>
654
* is false.
655
*/
656
void podResizeToFit();
657
658
/**
659
* If true, appending |aNeeded| elements won't reallocate elements storage.
660
* This *doesn't* mean that infallibleAppend may be used! You still must
661
* reserve the extra space, even if this method indicates that appends won't
662
* need to reallocate elements storage.
663
*/
664
bool canAppendWithoutRealloc(size_t aNeeded) const;
665
666
/** Potentially fallible append operations. */
667
668
/**
669
* This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
670
* vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
671
* not amused.")
672
*/
673
template <typename U>
674
MOZ_MUST_USE bool append(U&& aU);
675
676
/**
677
* Construct a T in-place as a new entry at the end of this vector.
678
*/
679
template <typename... Args>
680
MOZ_MUST_USE bool emplaceBack(Args&&... aArgs) {
681
if (!growByUninitialized(1)) return false;
682
Impl::new_(&back(), std::forward<Args>(aArgs)...);
683
return true;
684
}
685
686
template <typename U, size_t O, class BP>
687
MOZ_MUST_USE bool appendAll(const Vector<U, O, BP>& aU);
688
MOZ_MUST_USE bool appendN(const T& aT, size_t aN);
689
template <typename U>
690
MOZ_MUST_USE bool append(const U* aBegin, const U* aEnd);
691
template <typename U>
692
MOZ_MUST_USE bool append(const U* aBegin, size_t aLength);
693
694
/*
695
* Guaranteed-infallible append operations for use upon vectors whose
696
* memory has been pre-reserved. Don't use this if you haven't reserved the
697
* memory!
698
*/
699
template <typename U>
700
void infallibleAppend(U&& aU) {
701
internalAppend(std::forward<U>(aU));
702
}
703
void infallibleAppendN(const T& aT, size_t aN) { internalAppendN(aT, aN); }
704
template <typename U>
705
void infallibleAppend(const U* aBegin, const U* aEnd) {
706
internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
707
}
708
template <typename U>
709
void infallibleAppend(const U* aBegin, size_t aLength) {
710
internalAppend(aBegin, aLength);
711
}
712
template <typename... Args>
713
void infallibleEmplaceBack(Args&&... aArgs) {
714
infallibleGrowByUninitialized(1);
715
Impl::new_(&back(), std::forward<Args>(aArgs)...);
716
}
717
718
void popBack();
719
720
T popCopy();
721
722
/**
723
* If elements are stored in-place, return nullptr and leave this vector
724
* unmodified.
725
*
726
* Otherwise return this vector's elements buffer, and clear this vector as if
727
* by clearAndFree(). The caller now owns the buffer and is responsible for
728
* deallocating it consistent with this vector's AllocPolicy.
729
*
730
* N.B. Although a T*, only the range [0, length()) is constructed.
731
*/
732
MOZ_MUST_USE T* extractRawBuffer();
733
734
/**
735
* If elements are stored in-place, allocate a new buffer, move this vector's
736
* elements into it, and return that buffer.
737
*
738
* Otherwise return this vector's elements buffer. The caller now owns the
739
* buffer and is responsible for deallocating it consistent with this vector's
740
* AllocPolicy.
741
*
742
* This vector is cleared, as if by clearAndFree(), when this method
743
* succeeds. This method fails and returns nullptr only if new elements buffer
744
* allocation fails.
745
*
746
* N.B. Only the range [0, length()) of the returned buffer is constructed.
747
* If any of these elements are uninitialized (as growByUninitialized
748
* enables), behavior is undefined.
749
*/
750
MOZ_MUST_USE T* extractOrCopyRawBuffer();
751
752
/**
753
* Transfer ownership of an array of objects into the vector. The caller
754
* must have allocated the array in accordance with this vector's
755
* AllocPolicy.
756
*
757
* N.B. This call assumes that there are no uninitialized elements in the
758
* passed range [aP, aP + aLength). The range [aP + aLength, aP +
759
* aCapacity) must be allocated uninitialized memory.
760
*/
761
void replaceRawBuffer(T* aP, size_t aLength, size_t aCapacity);
762
763
/**
764
* Transfer ownership of an array of objects into the vector. The caller
765
* must have allocated the array in accordance with this vector's
766
* AllocPolicy.
767
*
768
* N.B. This call assumes that there are no uninitialized elements in the
769
* passed array.
770
*/
771
void replaceRawBuffer(T* aP, size_t aLength);
772
773
/**
774
* Places |aVal| at position |aP|, shifting existing elements from |aP| onward
775
* one position higher. On success, |aP| should not be reused because it'll
776
* be a dangling pointer if reallocation of the vector storage occurred; the
777
* return value should be used instead. On failure, nullptr is returned.
778
*
779
* Example usage:
780
*
781
* if (!(p = vec.insert(p, val))) {
782
* <handle failure>
783
* }
784
* <keep working with p>
785
*
786
* This is inherently a linear-time operation. Be careful!
787
*/
788
template <typename U>
789
MOZ_MUST_USE T* insert(T* aP, U&& aVal);
790
791
/**
792
* Removes the element |aT|, which must fall in the bounds [begin, end),
793
* shifting existing elements from |aT + 1| onward one position lower.
794
*/
795
void erase(T* aT);
796
797
/**
798
* Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
799
* [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
800
* position.
801
*/
802
void erase(T* aBegin, T* aEnd);
803
804
/**
805
* Removes all elements that satisfy the predicate, shifting existing elements
806
* lower to fill erased gaps.
807
*/
808
template <typename Pred>
809
void eraseIf(Pred aPred);
810
811
/**
812
* Removes all elements that compare equal to |aU|, shifting existing elements
813
* lower to fill erased gaps.
814
*/
815
template <typename U>
816
void eraseIfEqual(const U& aU);
817
818
/**
819
* Measure the size of the vector's heap-allocated storage.
820
*/
821
size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
822
823
/**
824
* Like sizeOfExcludingThis, but also measures the size of the vector
825
* object (which must be heap-allocated) itself.
826
*/
827
size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
828
829
void swap(Vector& aOther);
830
831
private:
832
Vector(const Vector&) = delete;
833
void operator=(const Vector&) = delete;
834
};
835
836
/* This does the re-entrancy check plus several other sanity checks. */
837
#define MOZ_REENTRANCY_GUARD_ET_AL \
838
ReentrancyGuard g(*this); \
839
MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
840
MOZ_ASSERT(reserved() <= mTail.mCapacity); \
841
MOZ_ASSERT(mLength <= reserved()); \
842
MOZ_ASSERT(mLength <= mTail.mCapacity)
843
844
/* Vector Implementation */
845
846
template <typename T, size_t N, class AP>
847
MOZ_ALWAYS_INLINE Vector<T, N, AP>::Vector(AP aAP)
848
: AP(std::move(aAP)),
849
mLength(0),
850
mTail(kInlineCapacity, 0)
851
#ifdef DEBUG
852
,
853
mEntered(false)
854
#endif
855
{
856
mBegin = inlineStorage();
857
}
858
859
/* Move constructor. */
860
template <typename T, size_t N, class AllocPolicy>
861
MOZ_ALWAYS_INLINE Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
862
: AllocPolicy(std::move(aRhs))
863
#ifdef DEBUG
864
,
865
mEntered(false)
866
#endif
867
{
868
mLength = aRhs.mLength;
869
mTail.mCapacity = aRhs.mTail.mCapacity;
870
#ifdef DEBUG
871
mTail.mReserved = aRhs.mTail.mReserved;
872
#endif
873
874
if (aRhs.usingInlineStorage()) {
875
/* We can't move the buffer over in this case, so copy elements. */
876
mBegin = inlineStorage();
877
Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
878
/*
879
* Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
880
* The elements in its in-line storage still need to be destroyed.
881
*/
882
} else {
883
/*
884
* Take src's buffer, and turn src into an empty vector using
885
* in-line storage.
886
*/
887
mBegin = aRhs.mBegin;
888
aRhs.mBegin = aRhs.inlineStorage();
889
aRhs.mTail.mCapacity = kInlineCapacity;
890
aRhs.mLength = 0;
891
#ifdef DEBUG
892
aRhs.mTail.mReserved = 0;
893
#endif
894
}
895
}
896
897
/* Move assignment. */
898
template <typename T, size_t N, class AP>
899
MOZ_ALWAYS_INLINE Vector<T, N, AP>& Vector<T, N, AP>::operator=(Vector&& aRhs) {
900
MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
901
this->~Vector();
902
new (KnownNotNull, this) Vector(std::move(aRhs));
903
return *this;
904
}
905
906
template <typename T, size_t N, class AP>
907
MOZ_ALWAYS_INLINE Vector<T, N, AP>::~Vector() {
908
MOZ_REENTRANCY_GUARD_ET_AL;
909
Impl::destroy(beginNoCheck(), endNoCheck());
910
if (!usingInlineStorage()) {
911
this->free_(beginNoCheck(), mTail.mCapacity);
912
}
913
}
914
915
template <typename T, size_t N, class AP>
916
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::reverse() {
917
MOZ_REENTRANCY_GUARD_ET_AL;
918
T* elems = mBegin;
919
size_t len = mLength;
920
size_t mid = len / 2;
921
for (size_t i = 0; i < mid; i++) {
922
Swap(elems[i], elems[len - i - 1]);
923
}
924
}
925
926
/*
927
* This function will create a new heap buffer with capacity aNewCap,
928
* move all elements in the inline buffer to this new buffer,
929
* and fail on OOM.
930
*/
931
template <typename T, size_t N, class AP>
932
inline bool Vector<T, N, AP>::convertToHeapStorage(size_t aNewCap) {
933
MOZ_ASSERT(usingInlineStorage());
934
935
/* Allocate buffer. */
936
MOZ_ASSERT(!detail::CapacityHasExcessSpace<T>(aNewCap));
937
T* newBuf = this->template pod_malloc<T>(aNewCap);
938
if (MOZ_UNLIKELY(!newBuf)) {
939
return false;
940
}
941
942
/* Copy inline elements into heap buffer. */
943
Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
944
Impl::destroy(beginNoCheck(), endNoCheck());
945
946
/* Switch in heap buffer. */
947
mBegin = newBuf;
948
/* mLength is unchanged. */
949
mTail.mCapacity = aNewCap;
950
return true;
951
}
952
953
template <typename T, size_t N, class AP>
954
MOZ_NEVER_INLINE bool Vector<T, N, AP>::growStorageBy(size_t aIncr) {
955
MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
956
957
/*
958
* When choosing a new capacity, its size should is as close to 2**N bytes
959
* as possible. 2**N-sized requests are best because they are unlikely to
960
* be rounded up by the allocator. Asking for a 2**N number of elements
961
* isn't as good, because if sizeof(T) is not a power-of-two that would
962
* result in a non-2**N request size.
963
*/
964
965
size_t newCap;
966
967
if (aIncr == 1) {
968
if (usingInlineStorage()) {
969
/* This case occurs in ~70--80% of the calls to this function. */
970
size_t newSize =
971
tl::RoundUpPow2<(kInlineCapacity + 1) * sizeof(T)>::value;
972
newCap = newSize / sizeof(T);
973
goto convert;
974
}
975
976
if (mLength == 0) {
977
/* This case occurs in ~0--10% of the calls to this function. */
978
newCap = 1;
979
goto grow;
980
}
981
982
/* This case occurs in ~15--20% of the calls to this function. */
983
984
/*
985
* Will mLength * 4 *sizeof(T) overflow? This condition limits a vector
986
* to 1GB of memory on a 32-bit system, which is a reasonable limit. It
987
* also ensures that
988
*
989
* static_cast<char*>(end()) - static_cast<char*>(begin())
990
*
991
* doesn't overflow ptrdiff_t (see bug 510319).
992
*/
993
if (MOZ_UNLIKELY(mLength & tl::MulOverflowMask<4 * sizeof(T)>::value)) {
994
this->reportAllocOverflow();
995
return false;
996
}
997
998
/*
999
* If we reach here, the existing capacity will have a size that is already
1000
* as close to 2^N as sizeof(T) will allow. Just double the capacity, and
1001
* then there might be space for one more element.
1002
*/
1003
newCap = mLength * 2;
1004
if (detail::CapacityHasExcessSpace<T>(newCap)) {
1005
newCap += 1;
1006
}
1007
} else {
1008
/* This case occurs in ~2% of the calls to this function. */
1009
size_t newMinCap = mLength + aIncr;
1010
1011
/* Did mLength + aIncr overflow? Will newCap * sizeof(T) overflow? */
1012
if (MOZ_UNLIKELY(newMinCap < mLength ||
1013
newMinCap & tl::MulOverflowMask<2 * sizeof(T)>::value)) {
1014
this->reportAllocOverflow();
1015
return false;
1016
}
1017
1018
size_t newMinSize = newMinCap * sizeof(T);
1019
size_t newSize = RoundUpPow2(newMinSize);
1020
newCap = newSize / sizeof(T);
1021
}
1022
1023
if (usingInlineStorage()) {
1024
convert:
1025
return convertToHeapStorage(newCap);
1026
}
1027
1028
grow:
1029
return Impl::growTo(*this, newCap);
1030
}
1031
1032
template <typename T, size_t N, class AP>
1033
inline bool Vector<T, N, AP>::initCapacity(size_t aRequest) {
1034
MOZ_ASSERT(empty());
1035
MOZ_ASSERT(usingInlineStorage());
1036
if (aRequest == 0) {
1037
return true;
1038
}
1039
T* newbuf = this->template pod_malloc<T>(aRequest);
1040
if (MOZ_UNLIKELY(!newbuf)) {
1041
return false;
1042
}
1043
mBegin = newbuf;
1044
mTail.mCapacity = aRequest;
1045
#ifdef DEBUG
1046
mTail.mReserved = aRequest;
1047
#endif
1048
return true;
1049
}
1050
1051
template <typename T, size_t N, class AP>
1052
inline bool Vector<T, N, AP>::initLengthUninitialized(size_t aRequest) {
1053
if (!initCapacity(aRequest)) {
1054
return false;
1055
}
1056
infallibleGrowByUninitialized(aRequest);
1057
return true;
1058
}
1059
1060
template <typename T, size_t N, class AP>
1061
inline bool Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize) {
1062
if (aRequestedSize <= N) {
1063
return true;
1064
}
1065
1066
#ifdef DEBUG
1067
if (aRequestedSize <= mTail.mReserved) {
1068
return true;
1069
}
1070
#endif
1071
1072
return allocPolicy().checkSimulatedOOM();
1073
}
1074
1075
template <typename T, size_t N, class AP>
1076
inline bool Vector<T, N, AP>::reserve(size_t aRequest) {
1077
MOZ_REENTRANCY_GUARD_ET_AL;
1078
if (aRequest > mTail.mCapacity) {
1079
if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
1080
return false;
1081
}
1082
} else if (!maybeCheckSimulatedOOM(aRequest)) {
1083
return false;
1084
}
1085
#ifdef DEBUG
1086
if (aRequest > mTail.mReserved) {
1087
mTail.mReserved = aRequest;
1088
}
1089
MOZ_ASSERT(mLength <= mTail.mReserved);
1090
MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1091
#endif
1092
return true;
1093
}
1094
1095
template <typename T, size_t N, class AP>
1096
inline void Vector<T, N, AP>::shrinkBy(size_t aIncr) {
1097
MOZ_REENTRANCY_GUARD_ET_AL;
1098
MOZ_ASSERT(aIncr <= mLength);
1099
Impl::destroy(endNoCheck() - aIncr, endNoCheck());
1100
mLength -= aIncr;
1101
}
1102
1103
template <typename T, size_t N, class AP>
1104
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::shrinkTo(size_t aNewLength) {
1105
MOZ_ASSERT(aNewLength <= mLength);
1106
shrinkBy(mLength - aNewLength);
1107
}
1108
1109
template <typename T, size_t N, class AP>
1110
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growBy(size_t aIncr) {
1111
MOZ_REENTRANCY_GUARD_ET_AL;
1112
if (aIncr > mTail.mCapacity - mLength) {
1113
if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1114
return false;
1115
}
1116
} else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1117
return false;
1118
}
1119
MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
1120
T* newend = endNoCheck() + aIncr;
1121
Impl::initialize(endNoCheck(), newend);
1122
mLength += aIncr;
1123
#ifdef DEBUG
1124
if (mLength > mTail.mReserved) {
1125
mTail.mReserved = mLength;
1126
}
1127
#endif
1128
return true;
1129
}
1130
1131
template <typename T, size_t N, class AP>
1132
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growByUninitialized(size_t aIncr) {
1133
MOZ_REENTRANCY_GUARD_ET_AL;
1134
if (aIncr > mTail.mCapacity - mLength) {
1135
if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1136
return false;
1137
}
1138
} else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1139
return false;
1140
}
1141
#ifdef DEBUG
1142
if (mLength + aIncr > mTail.mReserved) {
1143
mTail.mReserved = mLength + aIncr;
1144
}
1145
#endif
1146
infallibleGrowByUninitialized(aIncr);
1147
return true;
1148
}
1149
1150
template <typename T, size_t N, class AP>
1151
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::infallibleGrowByUninitialized(
1152
size_t aIncr) {
1153
MOZ_ASSERT(mLength + aIncr <= reserved());
1154
mLength += aIncr;
1155
}
1156
1157
template <typename T, size_t N, class AP>
1158
inline bool Vector<T, N, AP>::resize(size_t aNewLength) {
1159
size_t curLength = mLength;
1160
if (aNewLength > curLength) {
1161
return growBy(aNewLength - curLength);
1162
}
1163
shrinkBy(curLength - aNewLength);
1164
return true;
1165
}
1166
1167
template <typename T, size_t N, class AP>
1168
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::resizeUninitialized(
1169
size_t aNewLength) {
1170
size_t curLength = mLength;
1171
if (aNewLength > curLength) {
1172
return growByUninitialized(aNewLength - curLength);
1173
}
1174
shrinkBy(curLength - aNewLength);
1175
return true;
1176
}
1177
1178
template <typename T, size_t N, class AP>
1179
inline void Vector<T, N, AP>::clear() {
1180
MOZ_REENTRANCY_GUARD_ET_AL;
1181
Impl::destroy(beginNoCheck(), endNoCheck());
1182
mLength = 0;
1183
}
1184
1185
template <typename T, size_t N, class AP>
1186
inline void Vector<T, N, AP>::clearAndFree() {
1187
clear();
1188
1189
if (usingInlineStorage()) {
1190
return;
1191
}
1192
this->free_(beginNoCheck(), mTail.mCapacity);
1193
mBegin = inlineStorage();
1194
mTail.mCapacity = kInlineCapacity;
1195
#ifdef DEBUG
1196
mTail.mReserved = 0;
1197
#endif
1198
}
1199
1200
template <typename T, size_t N, class AP>
1201
inline void Vector<T, N, AP>::podResizeToFit() {
1202
// This function is only defined if IsPod is true and will fail to compile
1203
// otherwise.
1204
Impl::podResizeToFit(*this);
1205
}
1206
1207
template <typename T, size_t N, class AP>
1208
inline bool Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const {
1209
return mLength + aNeeded <= mTail.mCapacity;
1210
}
1211
1212
template <typename T, size_t N, class AP>
1213
template <typename U, size_t O, class BP>
1214
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendAll(
1215
const Vector<U, O, BP>& aOther) {
1216
internalAppend(aOther.begin(), aOther.length());
1217
}
1218
1219
template <typename T, size_t N, class AP>
1220
template <typename U>
1221
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(U&& aU) {
1222
MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
1223
MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1224
Impl::new_(endNoCheck(), std::forward<U>(aU));
1225
++mLength;
1226
}
1227
1228
template <typename T, size_t N, class AP>
1229
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded) {
1230
MOZ_REENTRANCY_GUARD_ET_AL;
1231
if (mLength + aNeeded > mTail.mCapacity) {
1232
if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1233
return false;
1234
}
1235
} else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1236
return false;
1237
}
1238
#ifdef DEBUG
1239
if (mLength + aNeeded > mTail.mReserved) {
1240
mTail.mReserved = mLength + aNeeded;
1241
}
1242
#endif
1243
internalAppendN(aT, aNeeded);
1244
return true;
1245
}
1246
1247
template <typename T, size_t N, class AP>
1248
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendN(const T& aT,
1249
size_t aNeeded) {
1250
MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
1251
MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1252
Impl::copyConstructN(endNoCheck(), aNeeded, aT);
1253
mLength += aNeeded;
1254
}
1255
1256
template <typename T, size_t N, class AP>
1257
template <typename U>
1258
inline T* Vector<T, N, AP>::insert(T* aP, U&& aVal) {
1259
MOZ_ASSERT(begin() <= aP);
1260
MOZ_ASSERT(aP <= end());
1261
size_t pos = aP - begin();
1262
MOZ_ASSERT(pos <= mLength);
1263
size_t oldLength = mLength;
1264
if (pos == oldLength) {
1265
if (!append(std::forward<U>(aVal))) {
1266
return nullptr;
1267
}
1268
} else {
1269
T oldBack = std::move(back());
1270
if (!append(std::move(oldBack))) {
1271
return nullptr;
1272
}
1273
for (size_t i = oldLength - 1; i > pos; --i) {
1274
(*this)[i] = std::move((*this)[i - 1]);
1275
}
1276
(*this)[pos] = std::forward<U>(aVal);
1277
}
1278
return begin() + pos;
1279
}
1280
1281
template <typename T, size_t N, class AP>
1282
inline void Vector<T, N, AP>::erase(T* aIt) {
1283
MOZ_ASSERT(begin() <= aIt);
1284
MOZ_ASSERT(aIt < end());
1285
while (aIt + 1 < end()) {
1286
*aIt = std::move(*(aIt + 1));
1287
++aIt;
1288
}
1289
popBack();
1290
}
1291
1292
template <typename T, size_t N, class AP>
1293
inline void Vector<T, N, AP>::erase(T* aBegin, T* aEnd) {
1294
MOZ_ASSERT(begin() <= aBegin);
1295
MOZ_ASSERT(aBegin <= aEnd);
1296
MOZ_ASSERT(aEnd <= end());
1297
while (aEnd < end()) {
1298
*aBegin++ = std::move(*aEnd++);
1299
}
1300
shrinkBy(aEnd - aBegin);
1301
}
1302
1303
template <typename T, size_t N, class AP>
1304
template <typename Pred>
1305
void Vector<T, N, AP>::eraseIf(Pred aPred) {
1306
// remove_if finds the first element to be erased, and then efficiently move-
1307
// assigns elements to effectively overwrite elements that satisfy the
1308
// predicate. It returns the new end pointer, after which there are only
1309
// moved-from elements ready to be destroyed, so we just need to shrink the
1310
// vector accordingly.
1311
T* newEnd = std::remove_if(begin(), end(),
1312
[&aPred](const T& aT) { return aPred(aT); });
1313
MOZ_ASSERT(newEnd <= end());
1314
shrinkBy(end() - newEnd);
1315
}
1316
1317
template <typename T, size_t N, class AP>
1318
template <typename U>
1319
void Vector<T, N, AP>::eraseIfEqual(const U& aU) {
1320
return eraseIf([&aU](const T& aT) { return aT == aU; });
1321
}
1322
1323
template <typename T, size_t N, class AP>
1324
template <typename U>
1325
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1326
const U* aInsEnd) {
1327
MOZ_REENTRANCY_GUARD_ET_AL;
1328
size_t aNeeded = PointerRangeSize(aInsBegin, aInsEnd);
1329
if (mLength + aNeeded > mTail.mCapacity) {
1330
if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1331
return false;
1332
}
1333
} else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1334
return false;
1335
}
1336
#ifdef DEBUG
1337
if (mLength + aNeeded > mTail.mReserved) {
1338
mTail.mReserved = mLength + aNeeded;
1339
}
1340
#endif
1341
internalAppend(aInsBegin, aNeeded);
1342
return true;
1343
}
1344
1345
template <typename T, size_t N, class AP>
1346
template <typename U>
1347
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(const U* aInsBegin,
1348
size_t aInsLength) {
1349
MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
1350
MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1351
Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
1352
mLength += aInsLength;
1353
}
1354
1355
template <typename T, size_t N, class AP>
1356
template <typename U>
1357
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(U&& aU) {
1358
MOZ_REENTRANCY_GUARD_ET_AL;
1359
if (mLength == mTail.mCapacity) {
1360
if (MOZ_UNLIKELY(!growStorageBy(1))) {
1361
return false;
1362
}
1363
} else if (!maybeCheckSimulatedOOM(mLength + 1)) {
1364
return false;
1365
}
1366
#ifdef DEBUG
1367
if (mLength + 1 > mTail.mReserved) {
1368
mTail.mReserved = mLength + 1;
1369
}
1370
#endif
1371
internalAppend(std::forward<U>(aU));
1372
return true;
1373
}
1374
1375
template <typename T, size_t N, class AP>
1376
template <typename U, size_t O, class BP>
1377
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(
1378
const Vector<U, O, BP>& aOther) {
1379
return append(aOther.begin(), aOther.length());
1380
}
1381
1382
template <typename T, size_t N, class AP>
1383
template <class U>
1384
MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1385
size_t aInsLength) {
1386
return append(aInsBegin, aInsBegin + aInsLength);
1387
}
1388
1389
template <typename T, size_t N, class AP>
1390
MOZ_ALWAYS_INLINE void Vector<T, N, AP>::popBack() {
1391
MOZ_REENTRANCY_GUARD_ET_AL;
1392
MOZ_ASSERT(!empty());
1393
--mLength;
1394
endNoCheck()->~T();
1395
}
1396
1397
template <typename T, size_t N, class AP>
1398
MOZ_ALWAYS_INLINE T Vector<T, N, AP>::popCopy() {
1399
T ret = back();
1400
popBack();
1401
return ret;
1402
}
1403
1404
template <typename T, size_t N, class AP>
1405
inline T* Vector<T, N, AP>::extractRawBuffer() {
1406
MOZ_REENTRANCY_GUARD_ET_AL;
1407
1408
if (usingInlineStorage()) {
1409
return nullptr;
1410
}
1411
1412
T* ret = mBegin;
1413
mBegin = inlineStorage();
1414
mLength = 0;
1415
mTail.mCapacity = kInlineCapacity;
1416
#ifdef DEBUG
1417
mTail.mReserved = 0;
1418
#endif
1419
return ret;
1420
}
1421
1422
template <typename T, size_t N, class AP>
1423
inline T* Vector<T, N, AP>::extractOrCopyRawBuffer() {
1424
if (T* ret = extractRawBuffer()) {
1425
return ret;
1426
}
1427
1428
MOZ_REENTRANCY_GUARD_ET_AL;
1429
1430
T* copy = this->template pod_malloc<T>(mLength);
1431
if (!copy) {
1432
return nullptr;
1433
}
1434
1435
Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
1436
Impl::destroy(beginNoCheck(), endNoCheck());
1437
mBegin = inlineStorage();
1438
mLength = 0;
1439
mTail.mCapacity = kInlineCapacity;
1440
#ifdef DEBUG
1441
mTail.mReserved = 0;
1442
#endif
1443
return copy;
1444
}
1445
1446
template <typename T, size_t N, class AP>
1447
inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength,
1448
size_t aCapacity) {
1449
MOZ_REENTRANCY_GUARD_ET_AL;
1450
1451
/* Destroy what we have. */
1452
Impl::destroy(beginNoCheck(), endNoCheck());
1453
if (!usingInlineStorage()) {
1454
this->free_(beginNoCheck(), mTail.mCapacity);
1455
}
1456
1457
/* Take in the new buffer. */
1458
if (aCapacity <= kInlineCapacity) {
1459
/*
1460
* We convert to inline storage if possible, even though aP might
1461
* otherwise be acceptable. Maybe this behaviour should be
1462
* specifiable with an argument to this function.
1463
*/
1464
mBegin = inlineStorage();
1465
mLength = aLength;
1466
mTail.mCapacity = kInlineCapacity;
1467
Impl::moveConstruct(mBegin, aP, aP + aLength);
1468
Impl::destroy(aP, aP + aLength);
1469
this->free_(aP, aCapacity);
1470
} else {
1471
mBegin = aP;
1472
mLength = aLength;
1473
mTail.mCapacity = aCapacity;
1474
}
1475
#ifdef DEBUG
1476
mTail.mReserved = aCapacity;
1477
#endif
1478
}
1479
1480
template <typename T, size_t N, class AP>
1481
inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength) {
1482
replaceRawBuffer(aP, aLength, aLength);
1483
}
1484
1485
template <typename T, size_t N, class AP>
1486
inline size_t Vector<T, N, AP>::sizeOfExcludingThis(
1487
MallocSizeOf aMallocSizeOf) const {
1488
return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
1489
}
1490
1491
template <typename T, size_t N, class AP>
1492
inline size_t Vector<T, N, AP>::sizeOfIncludingThis(
1493
MallocSizeOf aMallocSizeOf) const {
1494
return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
1495
}
1496
1497
template <typename T, size_t N, class AP>
1498
inline void Vector<T, N, AP>::swap(Vector& aOther) {
1499
static_assert(N == 0, "still need to implement this for N != 0");
1500
1501
// This only works when inline storage is always empty.
1502
if (!usingInlineStorage() && aOther.usingInlineStorage()) {
1503
aOther.mBegin = mBegin;
1504
mBegin = inlineStorage();
1505
} else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
1506
mBegin = aOther.mBegin;
1507
aOther.mBegin = aOther.inlineStorage();
1508
} else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
1509
Swap(mBegin, aOther.mBegin);
1510
} else {
1511
// This case is a no-op, since we'd set both to use their inline storage.
1512
}
1513
1514
Swap(mLength, aOther.mLength);
1515
Swap(mTail.mCapacity, aOther.mTail.mCapacity);
1516
#ifdef DEBUG
1517
Swap(mTail.mReserved, aOther.mTail.mReserved);
1518
#endif
1519
}
1520
1521
} // namespace mozilla
1522
1523
#endif /* mozilla_Vector_h */