2 * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "ExceptionHelpers.h"
33 JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyClassInfo();
34 JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyValueClassInfo();
35 JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyClassInfo();
36 JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyValueClassInfo();
38 enum class HashTableType {
43 struct HashMapBucketDataKey {
44 static const HashTableType Type = HashTableType::Key;
45 WriteBarrier<Unknown> key;
48 struct HashMapBucketDataKeyValue {
49 static const HashTableType Type = HashTableType::KeyValue;
50 WriteBarrier<Unknown> key;
51 WriteBarrier<Unknown> value;
54 template <typename Data>
55 class HashMapBucket : public JSCell {
58 template <typename T = Data>
59 static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, Structure*>::type selectStructure(VM& vm)
61 return vm.hashMapBucketSetStructure.get();
64 template <typename T = Data>
65 static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, Structure*>::type selectStructure(VM& vm)
67 return vm.hashMapBucketMapStructure.get();
71 static const HashTableType Type = Data::Type;
72 static const ClassInfo s_info; // This is never accessed directly, since that would break linkage on some compilers.
75 static const ClassInfo* info()
78 case HashTableType::Key:
79 return getHashMapBucketKeyClassInfo();
80 case HashTableType::KeyValue:
81 return getHashMapBucketKeyValueClassInfo();
83 RELEASE_ASSERT_NOT_REACHED();
86 static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
88 return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
91 static HashMapBucket* create(VM& vm)
93 HashMapBucket* bucket = new (NotNull, allocateCell<HashMapBucket<Data>>(vm.heap)) HashMapBucket(vm, selectStructure(vm));
94 bucket->finishCreation(vm);
95 ASSERT(!bucket->next());
96 ASSERT(!bucket->prev());
100 static HashMapBucket* createSentinel(VM& vm)
102 auto* bucket = create(vm);
103 bucket->setKey(vm, jsUndefined());
104 bucket->setValue(vm, jsUndefined());
105 ASSERT(!bucket->deleted());
109 HashMapBucket(VM& vm, Structure* structure)
110 : Base(vm, structure)
115 ALWAYS_INLINE void setNext(VM& vm, HashMapBucket* bucket)
117 m_next.set(vm, this, bucket);
119 ALWAYS_INLINE void setPrev(VM& vm, HashMapBucket* bucket)
121 m_prev.set(vm, this, bucket);
124 ALWAYS_INLINE void setKey(VM& vm, JSValue key)
126 m_data.key.set(vm, this, key);
129 template <typename T = Data>
130 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value>::type setValue(VM& vm, JSValue value)
132 m_data.value.set(vm, this, value);
134 template <typename T = Data>
135 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value>::type setValue(VM&, JSValue) { }
137 ALWAYS_INLINE JSValue key() const { return m_data.key.get(); }
139 template <typename T = Data>
140 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type value() const
142 return m_data.value.get();
145 static void visitChildren(JSCell*, SlotVisitor&);
147 ALWAYS_INLINE HashMapBucket* next() const { return m_next.get(); }
148 ALWAYS_INLINE HashMapBucket* prev() const { return m_prev.get(); }
150 ALWAYS_INLINE bool deleted() const { return !key(); }
151 ALWAYS_INLINE void makeDeleted(VM& vm)
153 setKey(vm, JSValue());
154 setValue(vm, JSValue());
157 static ptrdiff_t offsetOfKey()
159 return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, key);
162 template <typename T = Data>
163 static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, ptrdiff_t>::type offsetOfValue()
165 return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, value);
168 static ptrdiff_t offsetOfNext()
170 return OBJECT_OFFSETOF(HashMapBucket, m_next);
173 template <typename T = Data>
174 ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type extractValue(const HashMapBucket& bucket)
176 return bucket.value();
179 template <typename T = Data>
180 ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, JSValue>::type extractValue(const HashMapBucket&)
186 WriteBarrier<HashMapBucket> m_next;
187 WriteBarrier<HashMapBucket> m_prev;
191 template <typename BucketType>
192 class HashMapBuffer {
194 HashMapBuffer() = delete;
196 static size_t allocationSize(Checked<size_t> capacity)
198 return (capacity * sizeof(BucketType*)).unsafeGet();
201 ALWAYS_INLINE BucketType** buffer() const
203 return bitwise_cast<BucketType**>(this);
206 static HashMapBuffer* create(ExecState* exec, VM& vm, JSCell*, uint32_t capacity)
208 auto scope = DECLARE_THROW_SCOPE(vm);
209 size_t allocationSize = HashMapBuffer::allocationSize(capacity);
210 void* data = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(allocationSize, nullptr, AllocationFailureMode::ReturnNull);
212 throwOutOfMemoryError(exec, scope);
216 HashMapBuffer* buffer = static_cast<HashMapBuffer*>(data);
217 buffer->reset(capacity);
221 ALWAYS_INLINE void reset(uint32_t capacity)
223 memset(this, -1, allocationSize(capacity));
227 ALWAYS_INLINE static bool areKeysEqual(ExecState* exec, JSValue a, JSValue b)
229 // We want +0 and -0 to be compared to true here. sameValue() itself doesn't
230 // guarantee that, however, we normalize all keys before comparing and storing
231 // them in the map. The normalization will convert -0.0 and 0.0 to the integer
232 // representation for 0.
233 return sameValue(exec, a, b);
236 // Note that normalization is inlined in DFG's NormalizeMapKey.
237 // Keep in sync with the implementation of DFG and FTL normalization.
238 ALWAYS_INLINE JSValue normalizeMapKey(JSValue key)
246 double d = key.asDouble();
250 int i = static_cast<int>(d);
252 // When a key is -0, we convert it to positive zero.
253 // When a key is the double representation for an integer, we convert it to an integer.
256 // This means key is definitely not negative zero, and it's definitely not a double representation of an integer.
260 static ALWAYS_INLINE uint32_t wangsInt64Hash(uint64_t key)
270 return static_cast<unsigned>(key);
273 ALWAYS_INLINE uint32_t jsMapHash(ExecState* exec, VM& vm, JSValue value)
275 ASSERT_WITH_MESSAGE(normalizeMapKey(value) == value, "We expect normalized values flowing into this function.");
277 if (value.isString()) {
278 auto scope = DECLARE_THROW_SCOPE(vm);
279 const String& wtfString = asString(value)->value(exec);
280 RETURN_IF_EXCEPTION(scope, UINT_MAX);
281 return wtfString.impl()->hash();
284 return wangsInt64Hash(JSValue::encode(value));
287 ALWAYS_INLINE std::optional<uint32_t> concurrentJSMapHash(JSValue key)
289 key = normalizeMapKey(key);
290 if (key.isString()) {
291 JSString* string = asString(key);
292 if (string->length() > 10 * 1024)
294 const StringImpl* impl = string->tryGetValueImpl();
297 return impl->concurrentHash();
300 uint64_t rawValue = JSValue::encode(key);
301 return wangsInt64Hash(rawValue);
304 ALWAYS_INLINE uint32_t shouldShrink(uint32_t capacity, uint32_t keyCount)
306 return 8 * keyCount <= capacity && capacity > 4;
309 ALWAYS_INLINE uint32_t shouldRehashAfterAdd(uint32_t capacity, uint32_t keyCount, uint32_t deleteCount)
311 return 2 * (keyCount + deleteCount) >= capacity;
314 ALWAYS_INLINE uint32_t nextCapacity(uint32_t capacity, uint32_t keyCount)
316 if (shouldShrink(capacity, keyCount)) {
317 ASSERT((capacity / 2) >= 4);
321 if (3 * keyCount <= capacity && capacity > 64) {
322 // We stay at the same size if rehashing would cause us to be no more than
323 // 1/3rd full. This comes up for programs like this:
324 // Say the hash table grew to a key count of 64, causing it to grow to a capacity of 256.
325 // Then, the table added 63 items. The load is now 127. Then, 63 items are deleted.
326 // The load is still 127. Then, another item is added. The load is now 128, and we
327 // decide that we need to rehash. The key count is 65, almost exactly what it was
328 // when we grew to a capacity of 256. We don't really need to grow to a capacity
329 // of 512 in this situation. Instead, we choose to rehash at the same size. This
330 // will bring the load down to 65. We rehash into the same size when we determine
331 // that the new load ratio will be under 1/3rd. (We also pick a minumum capacity
332 // at which this rule kicks in because otherwise we will be too sensitive to rehashing
333 // at the same capacity).
336 return (Checked<uint32_t>(capacity) * 2).unsafeGet();
339 template <typename HashMapBucketType>
340 class HashMapImpl : public JSNonFinalObject {
341 using Base = JSNonFinalObject;
342 using HashMapBufferType = HashMapBuffer<HashMapBucketType>;
345 using BucketType = HashMapBucketType;
347 static void visitChildren(JSCell*, SlotVisitor&);
349 static size_t estimatedSize(JSCell*);
351 HashMapImpl(VM& vm, Structure* structure)
352 : Base(vm, structure)
359 HashMapImpl(VM& vm, Structure* structure, uint32_t sizeHint)
360 : Base(vm, structure)
364 uint32_t capacity = ((Checked<uint32_t>(sizeHint) * 2) + 1).unsafeGet();
365 capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U);
366 m_capacity = capacity;
369 ALWAYS_INLINE HashMapBucketType** buffer() const
371 return m_buffer->buffer();
374 void finishCreation(ExecState* exec, VM& vm)
376 ASSERT_WITH_MESSAGE(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey(), "We assume this to be true in the DFG and FTL JIT.");
378 auto scope = DECLARE_THROW_SCOPE(vm);
379 Base::finishCreation(vm);
381 makeAndSetNewBuffer(exec, vm);
382 RETURN_IF_EXCEPTION(scope, void());
384 setUpHeadAndTail(exec, vm);
387 void finishCreation(ExecState* exec, VM& vm, HashMapImpl* base)
389 auto scope = DECLARE_THROW_SCOPE(vm);
390 Base::finishCreation(vm);
392 // This size should be the same to the case when you clone the map by calling add() repeatedly.
393 uint32_t capacity = ((Checked<uint32_t>(base->m_keyCount) * 2) + 1).unsafeGet();
394 RELEASE_ASSERT(capacity <= (1U << 31));
395 capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U);
396 m_capacity = capacity;
397 makeAndSetNewBuffer(exec, vm);
398 RETURN_IF_EXCEPTION(scope, void());
400 setUpHeadAndTail(exec, vm);
402 HashMapBucketType* bucket = base->m_head.get()->next();
404 if (!bucket->deleted()) {
405 addNormalizedNonExistingForCloning(exec, bucket->key(), HashMapBucketType::extractValue(*bucket));
406 RETURN_IF_EXCEPTION(scope, void());
408 bucket = bucket->next();
413 static HashMapBucketType* emptyValue()
415 return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-1));
418 static ALWAYS_INLINE bool isEmpty(HashMapBucketType* bucket)
420 return bucket == emptyValue();
423 static HashMapBucketType* deletedValue()
425 return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-3));
428 static ALWAYS_INLINE bool isDeleted(HashMapBucketType* bucket)
430 return bucket == deletedValue();
433 ALWAYS_INLINE HashMapBucketType** findBucket(ExecState* exec, JSValue key)
436 auto scope = DECLARE_THROW_SCOPE(vm);
437 key = normalizeMapKey(key);
438 uint32_t hash = jsMapHash(exec, vm, key);
439 RETURN_IF_EXCEPTION(scope, nullptr);
440 return findBucket(exec, key, hash);
443 ALWAYS_INLINE HashMapBucketType** findBucket(ExecState* exec, JSValue key, uint32_t hash)
445 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
446 return findBucketAlreadyHashedAndNormalized(exec, key, hash);
449 template <typename T = HashMapBucketType>
450 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucket<HashMapBucketDataKeyValue>>::value, JSValue>::type get(ExecState* exec, JSValue key)
452 if (HashMapBucketType** bucket = findBucket(exec, key))
453 return (*bucket)->value();
454 return jsUndefined();
457 ALWAYS_INLINE bool has(ExecState* exec, JSValue key)
459 return !!findBucket(exec, key);
462 ALWAYS_INLINE void add(ExecState* exec, JSValue key, JSValue value = JSValue())
464 key = normalizeMapKey(key);
465 addNormalizedInternal(exec, key, value, [&] (HashMapBucketType* bucket) {
466 return !isDeleted(bucket) && areKeysEqual(exec, key, bucket->key());
468 if (shouldRehashAfterAdd())
472 ALWAYS_INLINE HashMapBucketType* addNormalized(ExecState* exec, JSValue key, JSValue value, uint32_t hash)
474 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
475 ASSERT_WITH_MESSAGE(jsMapHash(exec, exec->vm(), key) == hash, "We expect hash value is what we expect.");
477 auto* bucket = addNormalizedInternal(exec->vm(), key, value, hash, [&] (HashMapBucketType* bucket) {
478 return !isDeleted(bucket) && areKeysEqual(exec, key, bucket->key());
480 if (shouldRehashAfterAdd())
485 ALWAYS_INLINE bool remove(ExecState* exec, JSValue key)
487 HashMapBucketType** bucket = findBucket(exec, key);
492 HashMapBucketType* impl = *bucket;
493 impl->next()->setPrev(vm, impl->prev());
494 impl->prev()->setNext(vm, impl->next());
495 impl->makeDeleted(vm);
497 *bucket = deletedValue();
500 ASSERT(m_keyCount > 0);
509 ALWAYS_INLINE uint32_t size() const
514 ALWAYS_INLINE void clear(ExecState* exec)
519 HashMapBucketType* head = m_head.get();
520 HashMapBucketType* bucket = m_head->next();
521 HashMapBucketType* tail = m_tail.get();
522 while (bucket != tail) {
523 HashMapBucketType* next = bucket->next();
524 // We restart each iterator by pointing it to the head of the list.
525 bucket->setNext(vm, head);
526 bucket->makeDeleted(vm);
529 m_head->setNext(vm, m_tail.get());
530 m_tail->setPrev(vm, m_head.get());
532 makeAndSetNewBuffer(exec, vm);
536 ALWAYS_INLINE size_t bufferSizeInBytes() const
538 return m_capacity * sizeof(HashMapBucketType*);
541 static ptrdiff_t offsetOfHead()
543 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_head);
546 static ptrdiff_t offsetOfBuffer()
548 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_buffer);
551 static ptrdiff_t offsetOfCapacity()
553 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_capacity);
556 HashMapBucketType* head() { return m_head.get(); }
557 HashMapBucketType* tail() { return m_tail.get(); }
559 size_t approximateSize() const
561 size_t size = sizeof(HashMapImpl);
562 size += bufferSizeInBytes();
563 size += 2 * sizeof(HashMapBucketType); // Head and tail members.
564 size += m_keyCount * sizeof(HashMapBucketType); // Number of members that are on the list.
569 ALWAYS_INLINE uint32_t shouldRehashAfterAdd() const
571 return JSC::shouldRehashAfterAdd(m_capacity, m_keyCount, m_deleteCount);
574 ALWAYS_INLINE uint32_t shouldShrink() const
576 return JSC::shouldShrink(m_capacity, m_keyCount);
579 ALWAYS_INLINE void setUpHeadAndTail(ExecState*, VM& vm)
581 m_head.set(vm, this, HashMapBucketType::create(vm));
582 m_tail.set(vm, this, HashMapBucketType::create(vm));
584 m_head->setNext(vm, m_tail.get());
585 m_tail->setPrev(vm, m_head.get());
586 ASSERT(m_head->deleted());
587 ASSERT(m_tail->deleted());
590 ALWAYS_INLINE void addNormalizedNonExistingForCloning(ExecState* exec, JSValue key, JSValue value = JSValue())
592 addNormalizedInternal(exec, key, value, [&] (HashMapBucketType*) {
597 template<typename CanUseBucket>
598 ALWAYS_INLINE void addNormalizedInternal(ExecState* exec, JSValue key, JSValue value, const CanUseBucket& canUseBucket)
601 auto scope = DECLARE_THROW_SCOPE(vm);
603 uint32_t hash = jsMapHash(exec, vm, key);
604 RETURN_IF_EXCEPTION(scope, void());
606 addNormalizedInternal(vm, key, value, hash, canUseBucket);
609 template<typename CanUseBucket>
610 ALWAYS_INLINE HashMapBucketType* addNormalizedInternal(VM& vm, JSValue key, JSValue value, uint32_t hash, const CanUseBucket& canUseBucket)
612 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
614 const uint32_t mask = m_capacity - 1;
615 uint32_t index = hash & mask;
616 HashMapBucketType** buffer = this->buffer();
617 HashMapBucketType* bucket = buffer[index];
618 while (!isEmpty(bucket)) {
619 if (canUseBucket(bucket)) {
620 bucket->setValue(vm, value);
623 index = (index + 1) & mask;
624 bucket = buffer[index];
627 HashMapBucketType* newEntry = m_tail.get();
628 buffer[index] = newEntry;
629 newEntry->setKey(vm, key);
630 newEntry->setValue(vm, value);
631 ASSERT(!newEntry->deleted());
632 HashMapBucketType* newTail = HashMapBucketType::create(vm);
633 m_tail.set(vm, this, newTail);
634 newTail->setPrev(vm, newEntry);
635 ASSERT(newTail->deleted());
636 newEntry->setNext(vm, newTail);
642 ALWAYS_INLINE HashMapBucketType** findBucketAlreadyHashedAndNormalized(ExecState* exec, JSValue key, uint32_t hash)
644 const uint32_t mask = m_capacity - 1;
645 uint32_t index = hash & mask;
646 HashMapBucketType** buffer = this->buffer();
647 HashMapBucketType* bucket = buffer[index];
649 while (!isEmpty(bucket)) {
650 if (!isDeleted(bucket) && areKeysEqual(exec, key, bucket->key()))
651 return buffer + index;
652 index = (index + 1) & mask;
653 bucket = buffer[index];
658 void rehash(ExecState* exec)
661 auto scope = DECLARE_THROW_SCOPE(vm);
663 uint32_t oldCapacity = m_capacity;
664 m_capacity = nextCapacity(m_capacity, m_keyCount);
666 if (m_capacity != oldCapacity) {
667 makeAndSetNewBuffer(exec, vm);
668 RETURN_IF_EXCEPTION(scope, void());
670 m_buffer->reset(m_capacity);
671 assertBufferIsEmpty();
674 HashMapBucketType* iter = m_head->next();
675 HashMapBucketType* end = m_tail.get();
676 const uint32_t mask = m_capacity - 1;
677 RELEASE_ASSERT(!(m_capacity & (m_capacity - 1)));
678 HashMapBucketType** buffer = this->buffer();
679 while (iter != end) {
680 uint32_t index = jsMapHash(exec, vm, iter->key()) & mask;
681 EXCEPTION_ASSERT_WITH_MESSAGE(!scope.exception(), "All keys should already be hashed before, so this should not throw because it won't resolve ropes.");
683 HashMapBucketType* bucket = buffer[index];
684 while (!isEmpty(bucket)) {
685 index = (index + 1) & mask;
686 bucket = buffer[index];
689 buffer[index] = iter;
698 ALWAYS_INLINE void checkConsistency() const
700 if (!ASSERT_DISABLED) {
701 HashMapBucketType* iter = m_head->next();
702 HashMapBucketType* end = m_tail.get();
704 while (iter != end) {
708 ASSERT(size == m_keyCount);
712 void makeAndSetNewBuffer(ExecState* exec, VM& vm)
714 ASSERT(!(m_capacity & (m_capacity - 1)));
716 HashMapBufferType* buffer = HashMapBufferType::create(exec, vm, this, m_capacity);
717 if (UNLIKELY(!buffer))
720 m_buffer.set(vm, this, buffer);
721 assertBufferIsEmpty();
724 ALWAYS_INLINE void assertBufferIsEmpty() const
726 if (!ASSERT_DISABLED) {
727 for (unsigned i = 0; i < m_capacity; i++)
728 ASSERT(isEmpty(buffer()[i]));
732 WriteBarrier<HashMapBucketType> m_head;
733 WriteBarrier<HashMapBucketType> m_tail;
734 AuxiliaryBarrier<HashMapBufferType*> m_buffer;
736 uint32_t m_deleteCount;