Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#ifndef ds_OrderedHashTable_h
8
#define ds_OrderedHashTable_h
9
10
/*
11
* Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
12
* They are like js::HashMap and js::HashSet except that:
13
*
14
* - Iterating over an Ordered hash table visits the entries in the order in
15
* which they were inserted. This means that unlike a HashMap, the behavior
16
* of an OrderedHashMap is deterministic (as long as the HashPolicy methods
17
* are effect-free and consistent); the hashing is a pure performance
18
* optimization.
19
*
20
* - Range objects over Ordered tables remain valid even when entries are
21
* added or removed or the table is resized. (However in the case of
22
* removing entries, note the warning on class Range below.)
23
*
24
* - The API is a little different, so it's not a drop-in replacement.
25
* In particular, the hash policy is a little different.
26
* Also, the Ordered templates lack the Ptr and AddPtr types.
27
*
28
* Hash policies
29
*
30
* See the comment about "Hash policy" in HashTable.h for general features that
31
* hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
32
* differ in that the hash() method takes an extra argument:
33
* static js::HashNumber hash(Lookup, const HashCodeScrambler&);
34
* They must additionally provide a distinguished "empty" key value and the
35
* following static member functions:
36
* bool isEmpty(const Key&);
37
* void makeEmpty(Key*);
38
*/
39
40
#include "mozilla/HashFunctions.h"
41
42
#include <utility>
43
44
#include "js/HashTable.h"
45
46
namespace js {
47
48
namespace detail {
49
50
/*
51
* detail::OrderedHashTable is the underlying data structure used to implement
52
* both OrderedHashMap and OrderedHashSet. Programs should use one of those two
53
* templates rather than OrderedHashTable.
54
*/
55
template <class T, class Ops, class AllocPolicy>
56
class OrderedHashTable {
57
public:
58
using Key = typename Ops::KeyType;
59
using Lookup = typename Ops::Lookup;
60
61
struct Data {
62
T element;
63
Data* chain;
64
65
Data(const T& e, Data* c) : element(e), chain(c) {}
66
Data(T&& e, Data* c) : element(std::move(e)), chain(c) {}
67
};
68
69
class Range;
70
friend class Range;
71
72
private:
73
Data** hashTable; // hash table (has hashBuckets() elements)
74
Data* data; // data vector, an array of Data objects
75
// data[0:dataLength] are constructed
76
uint32_t dataLength; // number of constructed elements in data
77
uint32_t dataCapacity; // size of data, in elements
78
uint32_t liveCount; // dataLength less empty (removed) entries
79
uint32_t hashShift; // multiplicative hash shift
80
Range* ranges; // list of all live Ranges on this table in malloc memory
81
Range*
82
nurseryRanges; // list of all live Ranges on this table in the GC nursery
83
AllocPolicy alloc;
84
mozilla::HashCodeScrambler hcs; // don't reveal pointer hash codes
85
86
// TODO: This should be templated on a functor type and receive lambda
87
// arguments but this causes problems for the hazard analysis builds. See
88
// bug 1398213.
89
template <void (*f)(Range* range, uint32_t arg)>
90
void forEachRange(uint32_t arg = 0) {
91
Range* next;
92
for (Range* r = ranges; r; r = next) {
93
next = r->next;
94
f(r, arg);
95
}
96
for (Range* r = nurseryRanges; r; r = next) {
97
next = r->next;
98
f(r, arg);
99
}
100
}
101
102
public:
103
OrderedHashTable(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
104
: hashTable(nullptr),
105
data(nullptr),
106
dataLength(0),
107
dataCapacity(0),
108
liveCount(0),
109
hashShift(0),
110
ranges(nullptr),
111
nurseryRanges(nullptr),
112
alloc(std::move(ap)),
113
hcs(hcs) {}
114
115
MOZ_MUST_USE bool init() {
116
MOZ_ASSERT(!hashTable, "init must be called at most once");
117
118
uint32_t buckets = initialBuckets();
119
Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
120
if (!tableAlloc) {
121
return false;
122
}
123
for (uint32_t i = 0; i < buckets; i++) {
124
tableAlloc[i] = nullptr;
125
}
126
127
uint32_t capacity = uint32_t(buckets * fillFactor());
128
Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
129
if (!dataAlloc) {
130
alloc.free_(tableAlloc, buckets);
131
return false;
132
}
133
134
// clear() requires that members are assigned only after all allocation
135
// has succeeded, and that this->ranges is left untouched.
136
hashTable = tableAlloc;
137
data = dataAlloc;
138
dataLength = 0;
139
dataCapacity = capacity;
140
liveCount = 0;
141
hashShift = js::kHashNumberBits - initialBucketsLog2();
142
MOZ_ASSERT(hashBuckets() == buckets);
143
return true;
144
}
145
146
~OrderedHashTable() {
147
forEachRange<Range::onTableDestroyed>();
148
if (hashTable) {
149
// |hashBuckets()| isn't valid when |hashTable| hasn't been created.
150
alloc.free_(hashTable, hashBuckets());
151
}
152
freeData(data, dataLength, dataCapacity);
153
}
154
155
/* Return the number of elements in the table. */
156
uint32_t count() const { return liveCount; }
157
158
/* True if any element matches l. */
159
bool has(const Lookup& l) const { return lookup(l) != nullptr; }
160
161
/* Return a pointer to the element, if any, that matches l, or nullptr. */
162
T* get(const Lookup& l) {
163
Data* e = lookup(l, prepareHash(l));
164
return e ? &e->element : nullptr;
165
}
166
167
/* Return a pointer to the element, if any, that matches l, or nullptr. */
168
const T* get(const Lookup& l) const {
169
return const_cast<OrderedHashTable*>(this)->get(l);
170
}
171
172
/*
173
* If the table already contains an entry that matches |element|,
174
* replace that entry with |element|. Otherwise add a new entry.
175
*
176
* On success, return true, whether there was already a matching element or
177
* not. On allocation failure, return false. If this returns false, it
178
* means the element was not added to the table.
179
*/
180
template <typename ElementInput>
181
MOZ_MUST_USE bool put(ElementInput&& element) {
182
HashNumber h = prepareHash(Ops::getKey(element));
183
if (Data* e = lookup(Ops::getKey(element), h)) {
184
e->element = std::forward<ElementInput>(element);
185
return true;
186
}
187
188
if (dataLength == dataCapacity) {
189
// If the hashTable is more than 1/4 deleted data, simply rehash in
190
// place to free up some space. Otherwise, grow the table.
191
uint32_t newHashShift =
192
liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
193
if (!rehash(newHashShift)) {
194
return false;
195
}
196
}
197
198
h >>= hashShift;
199
liveCount++;
200
Data* e = &data[dataLength++];
201
new (e) Data(std::forward<ElementInput>(element), hashTable[h]);
202
hashTable[h] = e;
203
return true;
204
}
205
206
/*
207
* If the table contains an element matching l, remove it and set *foundp
208
* to true. Otherwise set *foundp to false.
209
*
210
* Return true on success, false if we tried to shrink the table and hit an
211
* allocation failure. Even if this returns false, *foundp is set correctly
212
* and the matching element was removed. Shrinking is an optimization and
213
* it's OK for it to fail.
214
*/
215
bool remove(const Lookup& l, bool* foundp) {
216
// Note: This could be optimized so that removing the last entry,
217
// data[dataLength - 1], decrements dataLength. LIFO use cases would
218
// benefit.
219
220
// If a matching entry exists, empty it.
221
Data* e = lookup(l, prepareHash(l));
222
if (e == nullptr) {
223
*foundp = false;
224
return true;
225
}
226
227
*foundp = true;
228
liveCount--;
229
Ops::makeEmpty(&e->element);
230
231
// Update active Ranges.
232
uint32_t pos = e - data;
233
forEachRange<&Range::onRemove>(pos);
234
235
// If many entries have been removed, try to shrink the table.
236
if (hashBuckets() > initialBuckets() &&
237
liveCount < dataLength * minDataFill()) {
238
if (!rehash(hashShift + 1)) {
239
return false;
240
}
241
}
242
return true;
243
}
244
245
/*
246
* Remove all entries.
247
*
248
* Returns false on OOM, leaving the OrderedHashTable and any live Ranges
249
* in the old state.
250
*
251
* The effect on live Ranges is the same as removing all entries; in
252
* particular, those Ranges are still live and will see any entries added
253
* after a successful clear().
254
*/
255
MOZ_MUST_USE bool clear() {
256
if (dataLength != 0) {
257
Data** oldHashTable = hashTable;
258
Data* oldData = data;
259
uint32_t oldHashBuckets = hashBuckets();
260
uint32_t oldDataLength = dataLength;
261
uint32_t oldDataCapacity = dataCapacity;
262
263
hashTable = nullptr;
264
if (!init()) {
265
// init() only mutates members on success; see comment above.
266
hashTable = oldHashTable;
267
return false;
268
}
269
270
alloc.free_(oldHashTable, oldHashBuckets);
271
freeData(oldData, oldDataLength, oldDataCapacity);
272
forEachRange<&Range::onClear>();
273
}
274
275
MOZ_ASSERT(hashTable);
276
MOZ_ASSERT(data);
277
MOZ_ASSERT(dataLength == 0);
278
MOZ_ASSERT(liveCount == 0);
279
return true;
280
}
281
282
/*
283
* Ranges are used to iterate over OrderedHashTables.
284
*
285
* Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
286
* Then you can walk all the key-value pairs like this:
287
*
288
* for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
289
* Map::Entry& pair = r.front();
290
* ... do something with pair ...
291
* }
292
*
293
* Ranges remain valid for the lifetime of the OrderedHashTable, even if
294
* entries are added or removed or the table is resized. Don't do anything
295
* to a Range, except destroy it, after the OrderedHashTable has been
296
* destroyed. (We support destroying the two objects in either order to
297
* humor the GC, bless its nondeterministic heart.)
298
*
299
* Warning: The behavior when the current front() entry is removed from the
300
* table is subtly different from js::HashTable<>::Enum::removeFront()!
301
* HashTable::Enum doesn't skip any entries when you removeFront() and then
302
* popFront(). OrderedHashTable::Range does! (This is useful for using a
303
* Range to implement JS Map.prototype.iterator.)
304
*
305
* The workaround is to call popFront() as soon as possible,
306
* before there's any possibility of modifying the table:
307
*
308
* for (Map::Range r = map.all(); !r.empty(); ) {
309
* Key key = r.front().key; // this won't modify map
310
* Value val = r.front().value; // this won't modify map
311
* r.popFront();
312
* // ...do things that might modify map...
313
* }
314
*/
315
class Range {
316
friend class OrderedHashTable;
317
318
// Cannot be a reference since we need to be able to do
319
// |offsetof(Range, ht)|.
320
OrderedHashTable* ht;
321
322
/* The index of front() within ht->data. */
323
uint32_t i;
324
325
/*
326
* The number of nonempty entries in ht->data to the left of front().
327
* This is used when the table is resized or compacted.
328
*/
329
uint32_t count;
330
331
/*
332
* Links in the doubly-linked list of active Ranges on ht.
333
*
334
* prevp points to the previous Range's .next field;
335
* or to ht->ranges if this is the first Range in the list.
336
* next points to the next Range;
337
* or nullptr if this is the last Range in the list.
338
*
339
* Invariant: *prevp == this.
340
*/
341
Range** prevp;
342
Range* next;
343
344
/*
345
* Create a Range over all the entries in ht.
346
* (This is private on purpose. End users must use ht->all().)
347
*/
348
Range(OrderedHashTable* ht, Range** listp)
349
: ht(ht), i(0), count(0), prevp(listp), next(*listp) {
350
*prevp = this;
351
if (next) {
352
next->prevp = &next;
353
}
354
seek();
355
}
356
357
public:
358
Range(const Range& other)
359
: ht(other.ht),
360
i(other.i),
361
count(other.count),
362
prevp(&ht->ranges),
363
next(ht->ranges) {
364
*prevp = this;
365
if (next) {
366
next->prevp = &next;
367
}
368
}
369
370
~Range() {
371
*prevp = next;
372
if (next) {
373
next->prevp = prevp;
374
}
375
}
376
377
private:
378
// Prohibit copy assignment.
379
Range& operator=(const Range& other) = delete;
380
381
void seek() {
382
while (i < ht->dataLength &&
383
Ops::isEmpty(Ops::getKey(ht->data[i].element))) {
384
i++;
385
}
386
}
387
388
/*
389
* The hash table calls this when an entry is removed.
390
* j is the index of the removed entry.
391
*/
392
void onRemove(uint32_t j) {
393
MOZ_ASSERT(valid());
394
if (j < i) {
395
count--;
396
}
397
if (j == i) {
398
seek();
399
}
400
}
401
402
/*
403
* The hash table calls this when the table is resized or compacted.
404
* Since |count| is the number of nonempty entries to the left of
405
* front(), discarding the empty entries will not affect count, and it
406
* will make i and count equal.
407
*/
408
void onCompact() {
409
MOZ_ASSERT(valid());
410
i = count;
411
}
412
413
/* The hash table calls this when cleared. */
414
void onClear() {
415
MOZ_ASSERT(valid());
416
i = count = 0;
417
}
418
419
bool valid() const { return next != this; }
420
421
void onTableDestroyed() {
422
MOZ_ASSERT(valid());
423
prevp = &next;
424
next = this;
425
}
426
427
public:
428
bool empty() const {
429
MOZ_ASSERT(valid());
430
return i >= ht->dataLength;
431
}
432
433
/*
434
* Return the first element in the range. This must not be called if
435
* this->empty().
436
*
437
* Warning: Removing an entry from the table also removes it from any
438
* live Ranges, and a Range can become empty that way, rendering
439
* front() invalid. If in doubt, check empty() before calling front().
440
*/
441
T& front() {
442
MOZ_ASSERT(valid());
443
MOZ_ASSERT(!empty());
444
return ht->data[i].element;
445
}
446
447
/*
448
* Remove the first element from this range.
449
* This must not be called if this->empty().
450
*
451
* Warning: Removing an entry from the table also removes it from any
452
* live Ranges, and a Range can become empty that way, rendering
453
* popFront() invalid. If in doubt, check empty() before calling
454
* popFront().
455
*/
456
void popFront() {
457
MOZ_ASSERT(valid());
458
MOZ_ASSERT(!empty());
459
MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
460
count++;
461
i++;
462
seek();
463
}
464
465
/*
466
* Change the key of the front entry.
467
*
468
* This calls Ops::hash on both the current key and the new key.
469
* Ops::hash on the current key must return the same hash code as
470
* when the entry was added to the table.
471
*/
472
void rekeyFront(const Key& k) {
473
MOZ_ASSERT(valid());
474
Data& entry = ht->data[i];
475
HashNumber oldHash =
476
ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
477
HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
478
Ops::setKey(entry.element, k);
479
if (newHash != oldHash) {
480
// Remove this entry from its old hash chain. (If this crashes
481
// reading nullptr, it would mean we did not find this entry on
482
// the hash chain where we expected it. That probably means the
483
// key's hash code changed since it was inserted, breaking the
484
// hash code invariant.)
485
Data** ep = &ht->hashTable[oldHash];
486
while (*ep != &entry) {
487
ep = &(*ep)->chain;
488
}
489
*ep = entry.chain;
490
491
// Add it to the new hash chain. We could just insert it at the
492
// beginning of the chain. Instead, we do a bit of work to
493
// preserve the invariant that hash chains always go in reverse
494
// insertion order (descending memory order). No code currently
495
// depends on this invariant, so it's fine to kill it if
496
// needed.
497
ep = &ht->hashTable[newHash];
498
while (*ep && *ep > &entry) {
499
ep = &(*ep)->chain;
500
}
501
entry.chain = *ep;
502
*ep = &entry;
503
}
504
}
505
506
static size_t offsetOfHashTable() { return offsetof(Range, ht); }
507
static size_t offsetOfI() { return offsetof(Range, i); }
508
static size_t offsetOfCount() { return offsetof(Range, count); }
509
static size_t offsetOfPrevP() { return offsetof(Range, prevp); }
510
static size_t offsetOfNext() { return offsetof(Range, next); }
511
512
static void onTableDestroyed(Range* range, uint32_t arg) {
513
range->onTableDestroyed();
514
}
515
static void onRemove(Range* range, uint32_t arg) { range->onRemove(arg); }
516
static void onClear(Range* range, uint32_t arg) { range->onClear(); }
517
static void onCompact(Range* range, uint32_t arg) { range->onCompact(); }
518
};
519
520
Range all() { return Range(this, &ranges); }
521
522
/*
523
* Allocate a new Range, possibly in nursery memory. The buffer must be
524
* large enough to hold a Range object.
525
*
526
* All nursery-allocated ranges can be freed in one go by calling
527
* destroyNurseryRanges().
528
*/
529
Range* createRange(void* buffer, bool inNursery) {
530
auto range = static_cast<Range*>(buffer);
531
new (range) Range(this, inNursery ? &nurseryRanges : &ranges);
532
return range;
533
}
534
535
void destroyNurseryRanges() { nurseryRanges = nullptr; }
536
537
/*
538
* Change the value of the given key.
539
*
540
* This calls Ops::hash on both the current key and the new key.
541
* Ops::hash on the current key must return the same hash code as
542
* when the entry was added to the table.
543
*/
544
void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
545
if (current == newKey) {
546
return;
547
}
548
549
Data* entry = lookup(current, prepareHash(current));
550
if (!entry) {
551
return;
552
}
553
554
HashNumber oldHash = prepareHash(current) >> hashShift;
555
HashNumber newHash = prepareHash(newKey) >> hashShift;
556
557
entry->element = element;
558
559
// Remove this entry from its old hash chain. (If this crashes
560
// reading nullptr, it would mean we did not find this entry on
561
// the hash chain where we expected it. That probably means the
562
// key's hash code changed since it was inserted, breaking the
563
// hash code invariant.)
564
Data** ep = &hashTable[oldHash];
565
while (*ep != entry) {
566
ep = &(*ep)->chain;
567
}
568
*ep = entry->chain;
569
570
// Add it to the new hash chain. We could just insert it at the
571
// beginning of the chain. Instead, we do a bit of work to
572
// preserve the invariant that hash chains always go in reverse
573
// insertion order (descending memory order). No code currently
574
// depends on this invariant, so it's fine to kill it if
575
// needed.
576
ep = &hashTable[newHash];
577
while (*ep && *ep > entry) {
578
ep = &(*ep)->chain;
579
}
580
entry->chain = *ep;
581
*ep = entry;
582
}
583
584
static size_t offsetOfDataLength() {
585
return offsetof(OrderedHashTable, dataLength);
586
}
587
static size_t offsetOfData() { return offsetof(OrderedHashTable, data); }
588
static constexpr size_t offsetOfDataElement() {
589
static_assert(offsetof(Data, element) == 0,
590
"RangeFront and RangePopFront depend on offsetof(Data, "
591
"element) being 0");
592
return offsetof(Data, element);
593
}
594
static constexpr size_t sizeofData() { return sizeof(Data); }
595
596
private:
597
/* Logarithm base 2 of the number of buckets in the hash table initially. */
598
static uint32_t initialBucketsLog2() { return 1; }
599
static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
600
601
/*
602
* The maximum load factor (mean number of entries per bucket).
603
* It is an invariant that
604
* dataCapacity == floor(hashBuckets() * fillFactor()).
605
*
606
* The fill factor should be between 2 and 4, and it should be chosen so that
607
* the fill factor times sizeof(Data) is close to but <= a power of 2.
608
* This fixed fill factor was chosen to make the size of the data
609
* array, in bytes, close to a power of two when sizeof(T) is 16.
610
*/
611
static double fillFactor() { return 8.0 / 3.0; }
612
613
/*
614
* The minimum permitted value of (liveCount / dataLength).
615
* If that ratio drops below this value, we shrink the table.
616
*/
617
static double minDataFill() { return 0.25; }
618
619
public:
620
HashNumber prepareHash(const Lookup& l) const {
621
return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
622
}
623
624
private:
625
/* The size of hashTable, in elements. Always a power of two. */
626
uint32_t hashBuckets() const {
627
return 1 << (js::kHashNumberBits - hashShift);
628
}
629
630
static void destroyData(Data* data, uint32_t length) {
631
for (Data* p = data + length; p != data;) {
632
(--p)->~Data();
633
}
634
}
635
636
void freeData(Data* data, uint32_t length, uint32_t capacity) {
637
destroyData(data, length);
638
alloc.free_(data, capacity);
639
}
640
641
Data* lookup(const Lookup& l, HashNumber h) {
642
for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
643
if (Ops::match(Ops::getKey(e->element), l)) {
644
return e;
645
}
646
}
647
return nullptr;
648
}
649
650
const Data* lookup(const Lookup& l) const {
651
return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
652
}
653
654
/* This is called after rehashing the table. */
655
void compacted() {
656
// If we had any empty entries, compacting may have moved live entries
657
// to the left within |data|. Notify all live Ranges of the change.
658
forEachRange<&Range::onCompact>();
659
}
660
661
/* Compact the entries in |data| and rehash them. */
662
void rehashInPlace() {
663
for (uint32_t i = 0, N = hashBuckets(); i < N; i++) {
664
hashTable[i] = nullptr;
665
}
666
Data* wp = data;
667
Data* end = data + dataLength;
668
for (Data* rp = data; rp != end; rp++) {
669
if (!Ops::isEmpty(Ops::getKey(rp->element))) {
670
HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
671
if (rp != wp) {
672
wp->element = std::move(rp->element);
673
}
674
wp->chain = hashTable[h];
675
hashTable[h] = wp;
676
wp++;
677
}
678
}
679
MOZ_ASSERT(wp == data + liveCount);
680
681
while (wp != end) {
682
(--end)->~Data();
683
}
684
dataLength = liveCount;
685
compacted();
686
}
687
688
/*
689
* Grow, shrink, or compact both |hashTable| and |data|.
690
*
691
* On success, this returns true, dataLength == liveCount, and there are no
692
* empty elements in data[0:dataLength]. On allocation failure, this
693
* leaves everything as it was and returns false.
694
*/
695
MOZ_MUST_USE bool rehash(uint32_t newHashShift) {
696
// If the size of the table is not changing, rehash in place to avoid
697
// allocating memory.
698
if (newHashShift == hashShift) {
699
rehashInPlace();
700
return true;
701
}
702
703
size_t newHashBuckets = size_t(1) << (js::kHashNumberBits - newHashShift);
704
Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
705
if (!newHashTable) {
706
return false;
707
}
708
for (uint32_t i = 0; i < newHashBuckets; i++) {
709
newHashTable[i] = nullptr;
710
}
711
712
uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
713
Data* newData = alloc.template pod_malloc<Data>(newCapacity);
714
if (!newData) {
715
alloc.free_(newHashTable, newHashBuckets);
716
return false;
717
}
718
719
Data* wp = newData;
720
Data* end = data + dataLength;
721
for (Data* p = data; p != end; p++) {
722
if (!Ops::isEmpty(Ops::getKey(p->element))) {
723
HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
724
new (wp) Data(std::move(p->element), newHashTable[h]);
725
newHashTable[h] = wp;
726
wp++;
727
}
728
}
729
MOZ_ASSERT(wp == newData + liveCount);
730
731
alloc.free_(hashTable, hashBuckets());
732
freeData(data, dataLength, dataCapacity);
733
734
hashTable = newHashTable;
735
data = newData;
736
dataLength = liveCount;
737
dataCapacity = newCapacity;
738
hashShift = newHashShift;
739
MOZ_ASSERT(hashBuckets() == newHashBuckets);
740
741
compacted();
742
return true;
743
}
744
745
// Not copyable.
746
OrderedHashTable& operator=(const OrderedHashTable&) = delete;
747
OrderedHashTable(const OrderedHashTable&) = delete;
748
};
749
750
} // namespace detail
751
752
template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
753
class OrderedHashMap {
754
public:
755
class Entry {
756
template <class, class, class>
757
friend class detail::OrderedHashTable;
758
void operator=(const Entry& rhs) {
759
const_cast<Key&>(key) = rhs.key;
760
value = rhs.value;
761
}
762
763
void operator=(Entry&& rhs) {
764
MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
765
const_cast<Key&>(key) = std::move(rhs.key);
766
value = std::move(rhs.value);
767
}
768
769
public:
770
Entry() : key(), value() {}
771
template <typename V>
772
Entry(const Key& k, V&& v) : key(k), value(std::forward<V>(v)) {}
773
Entry(Entry&& rhs) : key(std::move(rhs.key)), value(std::move(rhs.value)) {}
774
775
const Key key;
776
Value value;
777
778
static size_t offsetOfKey() { return offsetof(Entry, key); }
779
static size_t offsetOfValue() { return offsetof(Entry, value); }
780
};
781
782
private:
783
struct MapOps : OrderedHashPolicy {
784
using KeyType = Key;
785
static void makeEmpty(Entry* e) {
786
OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
787
788
// Clear the value. Destroying it is another possibility, but that
789
// would complicate class Entry considerably.
790
e->value = Value();
791
}
792
static const Key& getKey(const Entry& e) { return e.key; }
793
static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
794
};
795
796
typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
797
Impl impl;
798
799
public:
800
using Range = typename Impl::Range;
801
802
OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
803
: impl(std::move(ap), hcs) {}
804
MOZ_MUST_USE bool init() { return impl.init(); }
805
uint32_t count() const { return impl.count(); }
806
bool has(const Key& key) const { return impl.has(key); }
807
Range all() { return impl.all(); }
808
const Entry* get(const Key& key) const { return impl.get(key); }
809
Entry* get(const Key& key) { return impl.get(key); }
810
bool remove(const Key& key, bool* foundp) { return impl.remove(key, foundp); }
811
MOZ_MUST_USE bool clear() { return impl.clear(); }
812
813
template <typename V>
814
MOZ_MUST_USE bool put(const Key& key, V&& value) {
815
return impl.put(Entry(key, std::forward<V>(value)));
816
}
817
818
HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
819
820
void rekeyOneEntry(const Key& current, const Key& newKey) {
821
const Entry* e = get(current);
822
if (!e) {
823
return;
824
}
825
return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
826
}
827
828
Range* createRange(void* buffer, bool inNursery) {
829
return impl.createRange(buffer, inNursery);
830
}
831
832
void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
833
834
static size_t offsetOfEntryKey() { return Entry::offsetOfKey(); }
835
static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
836
static size_t offsetOfImplData() { return Impl::offsetOfData(); }
837
static constexpr size_t offsetOfImplDataElement() {
838
return Impl::offsetOfDataElement();
839
}
840
static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
841
};
842
843
template <class T, class OrderedHashPolicy, class AllocPolicy>
844
class OrderedHashSet {
845
private:
846
struct SetOps : OrderedHashPolicy {
847
using KeyType = const T;
848
static const T& getKey(const T& v) { return v; }
849
static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
850
};
851
852
typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
853
Impl impl;
854
855
public:
856
using Range = typename Impl::Range;
857
858
explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
859
: impl(std::move(ap), hcs) {}
860
MOZ_MUST_USE bool init() { return impl.init(); }
861
uint32_t count() const { return impl.count(); }
862
bool has(const T& value) const { return impl.has(value); }
863
Range all() { return impl.all(); }
864
MOZ_MUST_USE bool put(const T& value) { return impl.put(value); }
865
bool remove(const T& value, bool* foundp) {
866
return impl.remove(value, foundp);
867
}
868
MOZ_MUST_USE bool clear() { return impl.clear(); }
869
870
HashNumber hash(const T& value) const { return impl.prepareHash(value); }
871
872
void rekeyOneEntry(const T& current, const T& newKey) {
873
return impl.rekeyOneEntry(current, newKey, newKey);
874
}
875
876
Range* createRange(void* buffer, bool inNursery) {
877
return impl.createRange(buffer, inNursery);
878
}
879
880
void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
881
882
static size_t offsetOfEntryKey() { return 0; }
883
static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
884
static size_t offsetOfImplData() { return Impl::offsetOfData(); }
885
static constexpr size_t offsetOfImplDataElement() {
886
return Impl::offsetOfDataElement();
887
}
888
static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
889
};
890
891
} // namespace js
892
893
#endif /* ds_OrderedHashTable_h */