Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#ifndef ds_OrderedHashTable_h
8
#define ds_OrderedHashTable_h
9
10
/*
11
* Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
12
* They are like js::HashMap and js::HashSet except that:
13
*
14
* - Iterating over an Ordered hash table visits the entries in the order in
15
* which they were inserted. This means that unlike a HashMap, the behavior
16
* of an OrderedHashMap is deterministic (as long as the HashPolicy methods
17
* are effect-free and consistent); the hashing is a pure performance
18
* optimization.
19
*
20
* - Range objects over Ordered tables remain valid even when entries are
21
* added or removed or the table is resized. (However in the case of
22
* removing entries, note the warning on class Range below.)
23
*
24
* - The API is a little different, so it's not a drop-in replacement.
25
* In particular, the hash policy is a little different.
26
* Also, the Ordered templates lack the Ptr and AddPtr types.
27
*
28
* Hash policies
29
*
30
* See the comment about "Hash policy" in HashTable.h for general features that
31
* hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
32
* differ in that the hash() method takes an extra argument:
33
* static js::HashNumber hash(Lookup, const HashCodeScrambler&);
34
* They must additionally provide a distinguished "empty" key value and the
35
* following static member functions:
36
* bool isEmpty(const Key&);
37
* void makeEmpty(Key*);
38
*/
39
40
#include "mozilla/HashFunctions.h"
41
#include "mozilla/Move.h"
42
43
#include "js/HashTable.h"
44
45
namespace js {
46
47
namespace detail {
48
49
/*
50
* detail::OrderedHashTable is the underlying data structure used to implement
51
* both OrderedHashMap and OrderedHashSet. Programs should use one of those two
52
* templates rather than OrderedHashTable.
53
*/
54
template <class T, class Ops, class AllocPolicy>
55
class OrderedHashTable {
56
public:
57
typedef typename Ops::KeyType Key;
58
typedef typename Ops::Lookup Lookup;
59
60
struct Data {
61
T element;
62
Data* chain;
63
64
Data(const T& e, Data* c) : element(e), chain(c) {}
65
Data(T&& e, Data* c) : element(std::move(e)), chain(c) {}
66
};
67
68
class Range;
69
friend class Range;
70
71
private:
72
Data** hashTable; // hash table (has hashBuckets() elements)
73
Data* data; // data vector, an array of Data objects
74
// data[0:dataLength] are constructed
75
uint32_t dataLength; // number of constructed elements in data
76
uint32_t dataCapacity; // size of data, in elements
77
uint32_t liveCount; // dataLength less empty (removed) entries
78
uint32_t hashShift; // multiplicative hash shift
79
Range* ranges; // list of all live Ranges on this table in malloc memory
80
Range*
81
nurseryRanges; // list of all live Ranges on this table in the GC nursery
82
AllocPolicy alloc;
83
mozilla::HashCodeScrambler hcs; // don't reveal pointer hash codes
84
85
// TODO: This should be templated on a functor type and receive lambda
86
// arguments but this causes problems for the hazard analysis builds. See
87
// bug 1398213.
88
template <void (*f)(Range* range, uint32_t arg)>
89
void forEachRange(uint32_t arg = 0) {
90
Range* next;
91
for (Range* r = ranges; r; r = next) {
92
next = r->next;
93
f(r, arg);
94
}
95
for (Range* r = nurseryRanges; r; r = next) {
96
next = r->next;
97
f(r, arg);
98
}
99
}
100
101
public:
102
OrderedHashTable(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
103
: hashTable(nullptr),
104
data(nullptr),
105
dataLength(0),
106
dataCapacity(0),
107
liveCount(0),
108
hashShift(0),
109
ranges(nullptr),
110
nurseryRanges(nullptr),
111
alloc(std::move(ap)),
112
hcs(hcs) {}
113
114
MOZ_MUST_USE bool init() {
115
MOZ_ASSERT(!hashTable, "init must be called at most once");
116
117
uint32_t buckets = initialBuckets();
118
Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
119
if (!tableAlloc) {
120
return false;
121
}
122
for (uint32_t i = 0; i < buckets; i++) {
123
tableAlloc[i] = nullptr;
124
}
125
126
uint32_t capacity = uint32_t(buckets * fillFactor());
127
Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
128
if (!dataAlloc) {
129
alloc.free_(tableAlloc, buckets);
130
return false;
131
}
132
133
// clear() requires that members are assigned only after all allocation
134
// has succeeded, and that this->ranges is left untouched.
135
hashTable = tableAlloc;
136
data = dataAlloc;
137
dataLength = 0;
138
dataCapacity = capacity;
139
liveCount = 0;
140
hashShift = js::kHashNumberBits - initialBucketsLog2();
141
MOZ_ASSERT(hashBuckets() == buckets);
142
return true;
143
}
144
145
~OrderedHashTable() {
146
forEachRange<Range::onTableDestroyed>();
147
if (hashTable) {
148
// |hashBuckets()| isn't valid when |hashTable| hasn't been created.
149
alloc.free_(hashTable, hashBuckets());
150
}
151
freeData(data, dataLength, dataCapacity);
152
}
153
154
/* Return the number of elements in the table. */
155
uint32_t count() const { return liveCount; }
156
157
/* True if any element matches l. */
158
bool has(const Lookup& l) const { return lookup(l) != nullptr; }
159
160
/* Return a pointer to the element, if any, that matches l, or nullptr. */
161
T* get(const Lookup& l) {
162
Data* e = lookup(l, prepareHash(l));
163
return e ? &e->element : nullptr;
164
}
165
166
/* Return a pointer to the element, if any, that matches l, or nullptr. */
167
const T* get(const Lookup& l) const {
168
return const_cast<OrderedHashTable*>(this)->get(l);
169
}
170
171
/*
172
* If the table already contains an entry that matches |element|,
173
* replace that entry with |element|. Otherwise add a new entry.
174
*
175
* On success, return true, whether there was already a matching element or
176
* not. On allocation failure, return false. If this returns false, it
177
* means the element was not added to the table.
178
*/
179
template <typename ElementInput>
180
MOZ_MUST_USE bool put(ElementInput&& element) {
181
HashNumber h = prepareHash(Ops::getKey(element));
182
if (Data* e = lookup(Ops::getKey(element), h)) {
183
e->element = std::forward<ElementInput>(element);
184
return true;
185
}
186
187
if (dataLength == dataCapacity) {
188
// If the hashTable is more than 1/4 deleted data, simply rehash in
189
// place to free up some space. Otherwise, grow the table.
190
uint32_t newHashShift =
191
liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
192
if (!rehash(newHashShift)) {
193
return false;
194
}
195
}
196
197
h >>= hashShift;
198
liveCount++;
199
Data* e = &data[dataLength++];
200
new (e) Data(std::forward<ElementInput>(element), hashTable[h]);
201
hashTable[h] = e;
202
return true;
203
}
204
205
/*
206
* If the table contains an element matching l, remove it and set *foundp
207
* to true. Otherwise set *foundp to false.
208
*
209
* Return true on success, false if we tried to shrink the table and hit an
210
* allocation failure. Even if this returns false, *foundp is set correctly
211
* and the matching element was removed. Shrinking is an optimization and
212
* it's OK for it to fail.
213
*/
214
bool remove(const Lookup& l, bool* foundp) {
215
// Note: This could be optimized so that removing the last entry,
216
// data[dataLength - 1], decrements dataLength. LIFO use cases would
217
// benefit.
218
219
// If a matching entry exists, empty it.
220
Data* e = lookup(l, prepareHash(l));
221
if (e == nullptr) {
222
*foundp = false;
223
return true;
224
}
225
226
*foundp = true;
227
liveCount--;
228
Ops::makeEmpty(&e->element);
229
230
// Update active Ranges.
231
uint32_t pos = e - data;
232
forEachRange<&Range::onRemove>(pos);
233
234
// If many entries have been removed, try to shrink the table.
235
if (hashBuckets() > initialBuckets() &&
236
liveCount < dataLength * minDataFill()) {
237
if (!rehash(hashShift + 1)) {
238
return false;
239
}
240
}
241
return true;
242
}
243
244
/*
245
* Remove all entries.
246
*
247
* Returns false on OOM, leaving the OrderedHashTable and any live Ranges
248
* in the old state.
249
*
250
* The effect on live Ranges is the same as removing all entries; in
251
* particular, those Ranges are still live and will see any entries added
252
* after a successful clear().
253
*/
254
MOZ_MUST_USE bool clear() {
255
if (dataLength != 0) {
256
Data** oldHashTable = hashTable;
257
Data* oldData = data;
258
uint32_t oldHashBuckets = hashBuckets();
259
uint32_t oldDataLength = dataLength;
260
uint32_t oldDataCapacity = dataCapacity;
261
262
hashTable = nullptr;
263
if (!init()) {
264
// init() only mutates members on success; see comment above.
265
hashTable = oldHashTable;
266
return false;
267
}
268
269
alloc.free_(oldHashTable, oldHashBuckets);
270
freeData(oldData, oldDataLength, oldDataCapacity);
271
forEachRange<&Range::onClear>();
272
}
273
274
MOZ_ASSERT(hashTable);
275
MOZ_ASSERT(data);
276
MOZ_ASSERT(dataLength == 0);
277
MOZ_ASSERT(liveCount == 0);
278
return true;
279
}
280
281
/*
282
* Ranges are used to iterate over OrderedHashTables.
283
*
284
* Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
285
* Then you can walk all the key-value pairs like this:
286
*
287
* for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
288
* Map::Entry& pair = r.front();
289
* ... do something with pair ...
290
* }
291
*
292
* Ranges remain valid for the lifetime of the OrderedHashTable, even if
293
* entries are added or removed or the table is resized. Don't do anything
294
* to a Range, except destroy it, after the OrderedHashTable has been
295
* destroyed. (We support destroying the two objects in either order to
296
* humor the GC, bless its nondeterministic heart.)
297
*
298
* Warning: The behavior when the current front() entry is removed from the
299
* table is subtly different from js::HashTable<>::Enum::removeFront()!
300
* HashTable::Enum doesn't skip any entries when you removeFront() and then
301
* popFront(). OrderedHashTable::Range does! (This is useful for using a
302
* Range to implement JS Map.prototype.iterator.)
303
*
304
* The workaround is to call popFront() as soon as possible,
305
* before there's any possibility of modifying the table:
306
*
307
* for (Map::Range r = map.all(); !r.empty(); ) {
308
* Key key = r.front().key; // this won't modify map
309
* Value val = r.front().value; // this won't modify map
310
* r.popFront();
311
* // ...do things that might modify map...
312
* }
313
*/
314
class Range {
315
friend class OrderedHashTable;
316
317
// Cannot be a reference since we need to be able to do
318
// |offsetof(Range, ht)|.
319
OrderedHashTable* ht;
320
321
/* The index of front() within ht->data. */
322
uint32_t i;
323
324
/*
325
* The number of nonempty entries in ht->data to the left of front().
326
* This is used when the table is resized or compacted.
327
*/
328
uint32_t count;
329
330
/*
331
* Links in the doubly-linked list of active Ranges on ht.
332
*
333
* prevp points to the previous Range's .next field;
334
* or to ht->ranges if this is the first Range in the list.
335
* next points to the next Range;
336
* or nullptr if this is the last Range in the list.
337
*
338
* Invariant: *prevp == this.
339
*/
340
Range** prevp;
341
Range* next;
342
343
/*
344
* Create a Range over all the entries in ht.
345
* (This is private on purpose. End users must use ht->all().)
346
*/
347
Range(OrderedHashTable* ht, Range** listp)
348
: ht(ht), i(0), count(0), prevp(listp), next(*listp) {
349
*prevp = this;
350
if (next) {
351
next->prevp = &next;
352
}
353
seek();
354
}
355
356
public:
357
Range(const Range& other)
358
: ht(other.ht),
359
i(other.i),
360
count(other.count),
361
prevp(&ht->ranges),
362
next(ht->ranges) {
363
*prevp = this;
364
if (next) {
365
next->prevp = &next;
366
}
367
}
368
369
~Range() {
370
*prevp = next;
371
if (next) {
372
next->prevp = prevp;
373
}
374
}
375
376
private:
377
// Prohibit copy assignment.
378
Range& operator=(const Range& other) = delete;
379
380
void seek() {
381
while (i < ht->dataLength &&
382
Ops::isEmpty(Ops::getKey(ht->data[i].element))) {
383
i++;
384
}
385
}
386
387
/*
388
* The hash table calls this when an entry is removed.
389
* j is the index of the removed entry.
390
*/
391
void onRemove(uint32_t j) {
392
MOZ_ASSERT(valid());
393
if (j < i) {
394
count--;
395
}
396
if (j == i) {
397
seek();
398
}
399
}
400
401
/*
402
* The hash table calls this when the table is resized or compacted.
403
* Since |count| is the number of nonempty entries to the left of
404
* front(), discarding the empty entries will not affect count, and it
405
* will make i and count equal.
406
*/
407
void onCompact() {
408
MOZ_ASSERT(valid());
409
i = count;
410
}
411
412
/* The hash table calls this when cleared. */
413
void onClear() {
414
MOZ_ASSERT(valid());
415
i = count = 0;
416
}
417
418
bool valid() const { return next != this; }
419
420
void onTableDestroyed() {
421
MOZ_ASSERT(valid());
422
prevp = &next;
423
next = this;
424
}
425
426
public:
427
bool empty() const {
428
MOZ_ASSERT(valid());
429
return i >= ht->dataLength;
430
}
431
432
/*
433
* Return the first element in the range. This must not be called if
434
* this->empty().
435
*
436
* Warning: Removing an entry from the table also removes it from any
437
* live Ranges, and a Range can become empty that way, rendering
438
* front() invalid. If in doubt, check empty() before calling front().
439
*/
440
T& front() {
441
MOZ_ASSERT(valid());
442
MOZ_ASSERT(!empty());
443
return ht->data[i].element;
444
}
445
446
/*
447
* Remove the first element from this range.
448
* This must not be called if this->empty().
449
*
450
* Warning: Removing an entry from the table also removes it from any
451
* live Ranges, and a Range can become empty that way, rendering
452
* popFront() invalid. If in doubt, check empty() before calling
453
* popFront().
454
*/
455
void popFront() {
456
MOZ_ASSERT(valid());
457
MOZ_ASSERT(!empty());
458
MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
459
count++;
460
i++;
461
seek();
462
}
463
464
/*
465
* Change the key of the front entry.
466
*
467
* This calls Ops::hash on both the current key and the new key.
468
* Ops::hash on the current key must return the same hash code as
469
* when the entry was added to the table.
470
*/
471
void rekeyFront(const Key& k) {
472
MOZ_ASSERT(valid());
473
Data& entry = ht->data[i];
474
HashNumber oldHash =
475
ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
476
HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
477
Ops::setKey(entry.element, k);
478
if (newHash != oldHash) {
479
// Remove this entry from its old hash chain. (If this crashes
480
// reading nullptr, it would mean we did not find this entry on
481
// the hash chain where we expected it. That probably means the
482
// key's hash code changed since it was inserted, breaking the
483
// hash code invariant.)
484
Data** ep = &ht->hashTable[oldHash];
485
while (*ep != &entry) {
486
ep = &(*ep)->chain;
487
}
488
*ep = entry.chain;
489
490
// Add it to the new hash chain. We could just insert it at the
491
// beginning of the chain. Instead, we do a bit of work to
492
// preserve the invariant that hash chains always go in reverse
493
// insertion order (descending memory order). No code currently
494
// depends on this invariant, so it's fine to kill it if
495
// needed.
496
ep = &ht->hashTable[newHash];
497
while (*ep && *ep > &entry) {
498
ep = &(*ep)->chain;
499
}
500
entry.chain = *ep;
501
*ep = &entry;
502
}
503
}
504
505
static size_t offsetOfHashTable() { return offsetof(Range, ht); }
506
static size_t offsetOfI() { return offsetof(Range, i); }
507
static size_t offsetOfCount() { return offsetof(Range, count); }
508
static size_t offsetOfPrevP() { return offsetof(Range, prevp); }
509
static size_t offsetOfNext() { return offsetof(Range, next); }
510
511
static void onTableDestroyed(Range* range, uint32_t arg) {
512
range->onTableDestroyed();
513
}
514
static void onRemove(Range* range, uint32_t arg) { range->onRemove(arg); }
515
static void onClear(Range* range, uint32_t arg) { range->onClear(); }
516
static void onCompact(Range* range, uint32_t arg) { range->onCompact(); }
517
};
518
519
Range all() { return Range(this, &ranges); }
520
521
/*
522
* Allocate a new Range, possibly in nursery memory. The buffer must be
523
* large enough to hold a Range object.
524
*
525
* All nursery-allocated ranges can be freed in one go by calling
526
* destroyNurseryRanges().
527
*/
528
Range* createRange(void* buffer, bool inNursery) {
529
auto range = static_cast<Range*>(buffer);
530
new (range) Range(this, inNursery ? &nurseryRanges : &ranges);
531
return range;
532
}
533
534
void destroyNurseryRanges() { nurseryRanges = nullptr; }
535
536
/*
537
* Change the value of the given key.
538
*
539
* This calls Ops::hash on both the current key and the new key.
540
* Ops::hash on the current key must return the same hash code as
541
* when the entry was added to the table.
542
*/
543
void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
544
if (current == newKey) {
545
return;
546
}
547
548
Data* entry = lookup(current, prepareHash(current));
549
if (!entry) {
550
return;
551
}
552
553
HashNumber oldHash = prepareHash(current) >> hashShift;
554
HashNumber newHash = prepareHash(newKey) >> hashShift;
555
556
entry->element = element;
557
558
// Remove this entry from its old hash chain. (If this crashes
559
// reading nullptr, it would mean we did not find this entry on
560
// the hash chain where we expected it. That probably means the
561
// key's hash code changed since it was inserted, breaking the
562
// hash code invariant.)
563
Data** ep = &hashTable[oldHash];
564
while (*ep != entry) {
565
ep = &(*ep)->chain;
566
}
567
*ep = entry->chain;
568
569
// Add it to the new hash chain. We could just insert it at the
570
// beginning of the chain. Instead, we do a bit of work to
571
// preserve the invariant that hash chains always go in reverse
572
// insertion order (descending memory order). No code currently
573
// depends on this invariant, so it's fine to kill it if
574
// needed.
575
ep = &hashTable[newHash];
576
while (*ep && *ep > entry) {
577
ep = &(*ep)->chain;
578
}
579
entry->chain = *ep;
580
*ep = entry;
581
}
582
583
static size_t offsetOfDataLength() {
584
return offsetof(OrderedHashTable, dataLength);
585
}
586
static size_t offsetOfData() { return offsetof(OrderedHashTable, data); }
587
static constexpr size_t offsetOfDataElement() {
588
static_assert(offsetof(Data, element) == 0,
589
"RangeFront and RangePopFront depend on offsetof(Data, "
590
"element) being 0");
591
return offsetof(Data, element);
592
}
593
static constexpr size_t sizeofData() { return sizeof(Data); }
594
595
private:
596
/* Logarithm base 2 of the number of buckets in the hash table initially. */
597
static uint32_t initialBucketsLog2() { return 1; }
598
static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
599
600
/*
601
* The maximum load factor (mean number of entries per bucket).
602
* It is an invariant that
603
* dataCapacity == floor(hashBuckets() * fillFactor()).
604
*
605
* The fill factor should be between 2 and 4, and it should be chosen so that
606
* the fill factor times sizeof(Data) is close to but <= a power of 2.
607
* This fixed fill factor was chosen to make the size of the data
608
* array, in bytes, close to a power of two when sizeof(T) is 16.
609
*/
610
static double fillFactor() { return 8.0 / 3.0; }
611
612
/*
613
* The minimum permitted value of (liveCount / dataLength).
614
* If that ratio drops below this value, we shrink the table.
615
*/
616
static double minDataFill() { return 0.25; }
617
618
public:
619
HashNumber prepareHash(const Lookup& l) const {
620
return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
621
}
622
623
private:
624
/* The size of hashTable, in elements. Always a power of two. */
625
uint32_t hashBuckets() const {
626
return 1 << (js::kHashNumberBits - hashShift);
627
}
628
629
static void destroyData(Data* data, uint32_t length) {
630
for (Data* p = data + length; p != data;) {
631
(--p)->~Data();
632
}
633
}
634
635
void freeData(Data* data, uint32_t length, uint32_t capacity) {
636
destroyData(data, length);
637
alloc.free_(data, capacity);
638
}
639
640
Data* lookup(const Lookup& l, HashNumber h) {
641
for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
642
if (Ops::match(Ops::getKey(e->element), l)) {
643
return e;
644
}
645
}
646
return nullptr;
647
}
648
649
const Data* lookup(const Lookup& l) const {
650
return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
651
}
652
653
/* This is called after rehashing the table. */
654
void compacted() {
655
// If we had any empty entries, compacting may have moved live entries
656
// to the left within |data|. Notify all live Ranges of the change.
657
forEachRange<&Range::onCompact>();
658
}
659
660
/* Compact the entries in |data| and rehash them. */
661
void rehashInPlace() {
662
for (uint32_t i = 0, N = hashBuckets(); i < N; i++) {
663
hashTable[i] = nullptr;
664
}
665
Data* wp = data;
666
Data* end = data + dataLength;
667
for (Data* rp = data; rp != end; rp++) {
668
if (!Ops::isEmpty(Ops::getKey(rp->element))) {
669
HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
670
if (rp != wp) {
671
wp->element = std::move(rp->element);
672
}
673
wp->chain = hashTable[h];
674
hashTable[h] = wp;
675
wp++;
676
}
677
}
678
MOZ_ASSERT(wp == data + liveCount);
679
680
while (wp != end) {
681
(--end)->~Data();
682
}
683
dataLength = liveCount;
684
compacted();
685
}
686
687
/*
688
* Grow, shrink, or compact both |hashTable| and |data|.
689
*
690
* On success, this returns true, dataLength == liveCount, and there are no
691
* empty elements in data[0:dataLength]. On allocation failure, this
692
* leaves everything as it was and returns false.
693
*/
694
MOZ_MUST_USE bool rehash(uint32_t newHashShift) {
695
// If the size of the table is not changing, rehash in place to avoid
696
// allocating memory.
697
if (newHashShift == hashShift) {
698
rehashInPlace();
699
return true;
700
}
701
702
size_t newHashBuckets = size_t(1) << (js::kHashNumberBits - newHashShift);
703
Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
704
if (!newHashTable) {
705
return false;
706
}
707
for (uint32_t i = 0; i < newHashBuckets; i++) {
708
newHashTable[i] = nullptr;
709
}
710
711
uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
712
Data* newData = alloc.template pod_malloc<Data>(newCapacity);
713
if (!newData) {
714
alloc.free_(newHashTable, newHashBuckets);
715
return false;
716
}
717
718
Data* wp = newData;
719
Data* end = data + dataLength;
720
for (Data* p = data; p != end; p++) {
721
if (!Ops::isEmpty(Ops::getKey(p->element))) {
722
HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
723
new (wp) Data(std::move(p->element), newHashTable[h]);
724
newHashTable[h] = wp;
725
wp++;
726
}
727
}
728
MOZ_ASSERT(wp == newData + liveCount);
729
730
alloc.free_(hashTable, hashBuckets());
731
freeData(data, dataLength, dataCapacity);
732
733
hashTable = newHashTable;
734
data = newData;
735
dataLength = liveCount;
736
dataCapacity = newCapacity;
737
hashShift = newHashShift;
738
MOZ_ASSERT(hashBuckets() == newHashBuckets);
739
740
compacted();
741
return true;
742
}
743
744
// Not copyable.
745
OrderedHashTable& operator=(const OrderedHashTable&) = delete;
746
OrderedHashTable(const OrderedHashTable&) = delete;
747
};
748
749
} // namespace detail
750
751
template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
752
class OrderedHashMap {
753
public:
754
class Entry {
755
template <class, class, class>
756
friend class detail::OrderedHashTable;
757
void operator=(const Entry& rhs) {
758
const_cast<Key&>(key) = rhs.key;
759
value = rhs.value;
760
}
761
762
void operator=(Entry&& rhs) {
763
MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
764
const_cast<Key&>(key) = std::move(rhs.key);
765
value = std::move(rhs.value);
766
}
767
768
public:
769
Entry() : key(), value() {}
770
template <typename V>
771
Entry(const Key& k, V&& v) : key(k), value(std::forward<V>(v)) {}
772
Entry(Entry&& rhs) : key(std::move(rhs.key)), value(std::move(rhs.value)) {}
773
774
const Key key;
775
Value value;
776
777
static size_t offsetOfKey() { return offsetof(Entry, key); }
778
static size_t offsetOfValue() { return offsetof(Entry, value); }
779
};
780
781
private:
782
struct MapOps : OrderedHashPolicy {
783
typedef Key KeyType;
784
static void makeEmpty(Entry* e) {
785
OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
786
787
// Clear the value. Destroying it is another possibility, but that
788
// would complicate class Entry considerably.
789
e->value = Value();
790
}
791
static const Key& getKey(const Entry& e) { return e.key; }
792
static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
793
};
794
795
typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
796
Impl impl;
797
798
public:
799
typedef typename Impl::Range Range;
800
801
OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
802
: impl(std::move(ap), hcs) {}
803
MOZ_MUST_USE bool init() { return impl.init(); }
804
uint32_t count() const { return impl.count(); }
805
bool has(const Key& key) const { return impl.has(key); }
806
Range all() { return impl.all(); }
807
const Entry* get(const Key& key) const { return impl.get(key); }
808
Entry* get(const Key& key) { return impl.get(key); }
809
bool remove(const Key& key, bool* foundp) { return impl.remove(key, foundp); }
810
MOZ_MUST_USE bool clear() { return impl.clear(); }
811
812
template <typename V>
813
MOZ_MUST_USE bool put(const Key& key, V&& value) {
814
return impl.put(Entry(key, std::forward<V>(value)));
815
}
816
817
HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
818
819
void rekeyOneEntry(const Key& current, const Key& newKey) {
820
const Entry* e = get(current);
821
if (!e) {
822
return;
823
}
824
return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
825
}
826
827
Range* createRange(void* buffer, bool inNursery) {
828
return impl.createRange(buffer, inNursery);
829
}
830
831
void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
832
833
static size_t offsetOfEntryKey() { return Entry::offsetOfKey(); }
834
static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
835
static size_t offsetOfImplData() { return Impl::offsetOfData(); }
836
static constexpr size_t offsetOfImplDataElement() {
837
return Impl::offsetOfDataElement();
838
}
839
static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
840
};
841
842
template <class T, class OrderedHashPolicy, class AllocPolicy>
843
class OrderedHashSet {
844
private:
845
struct SetOps : OrderedHashPolicy {
846
typedef const T KeyType;
847
static const T& getKey(const T& v) { return v; }
848
static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
849
};
850
851
typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
852
Impl impl;
853
854
public:
855
typedef typename Impl::Range Range;
856
857
explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
858
: impl(std::move(ap), hcs) {}
859
MOZ_MUST_USE bool init() { return impl.init(); }
860
uint32_t count() const { return impl.count(); }
861
bool has(const T& value) const { return impl.has(value); }
862
Range all() { return impl.all(); }
863
MOZ_MUST_USE bool put(const T& value) { return impl.put(value); }
864
bool remove(const T& value, bool* foundp) {
865
return impl.remove(value, foundp);
866
}
867
MOZ_MUST_USE bool clear() { return impl.clear(); }
868
869
HashNumber hash(const T& value) const { return impl.prepareHash(value); }
870
871
void rekeyOneEntry(const T& current, const T& newKey) {
872
return impl.rekeyOneEntry(current, newKey, newKey);
873
}
874
875
Range* createRange(void* buffer, bool inNursery) {
876
return impl.createRange(buffer, inNursery);
877
}
878
879
void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
880
881
static size_t offsetOfEntryKey() { return 0; }
882
static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
883
static size_t offsetOfImplData() { return Impl::offsetOfData(); }
884
static constexpr size_t offsetOfImplDataElement() {
885
return Impl::offsetOfDataElement();
886
}
887
static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
888
};
889
890
} // namespace js
891
892
#endif /* ds_OrderedHashTable_h */