2 * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "MarkedBlock.h"
36 // WebKit has a good malloc that already knows what to do for large allocations. The GC shouldn't
37 // have to think about such things. That's where LargeAllocation comes in. We will allocate large
38 // objects directly using malloc, and put the LargeAllocation header just before them. We can detect
39 // when a HeapCell* is a LargeAllocation because it will have the MarkedBlock::atomSize / 2 bit set.
41 class LargeAllocation : public PackedRawSentinelNode<LargeAllocation> {
43 friend class LLIntOffsetsExtractor;
44 friend class IsoSubspace;
46 static LargeAllocation* tryCreate(Heap&, size_t, Subspace*, unsigned indexInSpace);
48 static LargeAllocation* createForLowerTier(Heap&, size_t, Subspace*, uint8_t lowerTierIndex);
49 LargeAllocation* reuseForLowerTier();
51 LargeAllocation* tryReallocate(size_t, Subspace*);
55 static LargeAllocation* fromCell(const void* cell)
57 return bitwise_cast<LargeAllocation*>(bitwise_cast<char*>(cell) - headerSize());
60 HeapCell* cell() const
62 return bitwise_cast<HeapCell*>(bitwise_cast<char*>(this) + headerSize());
65 static bool isLargeAllocation(HeapCell* cell)
67 return bitwise_cast<uintptr_t>(cell) & halfAlignment;
70 Subspace* subspace() const { return m_subspace; }
72 void lastChanceToFinalize();
74 Heap* heap() const { return m_weakSet.heap(); }
75 VM& vm() const { return m_weakSet.vm(); }
76 WeakSet& weakSet() { return m_weakSet; }
78 unsigned indexInSpace() { return m_indexInSpace; }
79 void setIndexInSpace(unsigned indexInSpace) { m_indexInSpace = indexInSpace; }
83 void visitWeakSet(SlotVisitor&);
86 void clearNewlyAllocated() { m_isNewlyAllocated = false; }
89 bool isNewlyAllocated() const { return m_isNewlyAllocated; }
90 ALWAYS_INLINE bool isMarked() { return m_isMarked.load(std::memory_order_relaxed); }
91 ALWAYS_INLINE bool isMarked(HeapCell*) { return isMarked(); }
92 ALWAYS_INLINE bool isMarked(HeapCell*, Dependency) { return isMarked(); }
93 ALWAYS_INLINE bool isMarked(HeapVersion, HeapCell*) { return isMarked(); }
94 bool isLive() { return isMarked() || isNewlyAllocated(); }
96 bool hasValidCell() const { return m_hasValidCell; }
100 size_t cellSize() const { return m_cellSize; }
102 uint8_t lowerTierIndex() const { return m_lowerTierIndex; }
104 bool aboveLowerBound(const void* rawPtr)
106 char* ptr = bitwise_cast<char*>(rawPtr);
107 char* begin = bitwise_cast<char*>(cell());
111 bool belowUpperBound(const void* rawPtr)
113 char* ptr = bitwise_cast<char*>(rawPtr);
114 char* begin = bitwise_cast<char*>(cell());
115 char* end = begin + cellSize();
116 // We cannot #include IndexingHeader.h because reasons. The fact that IndexingHeader is 8
117 // bytes is wired deep into our engine, so this isn't so bad.
118 size_t sizeOfIndexingHeader = 8;
119 return ptr <= end + sizeOfIndexingHeader;
122 bool contains(const void* rawPtr)
124 return aboveLowerBound(rawPtr) && belowUpperBound(rawPtr);
127 const CellAttributes& attributes() const { return m_attributes; }
129 Dependency aboutToMark(HeapVersion) { return Dependency(); }
131 ALWAYS_INLINE bool testAndSetMarked()
133 // This method is usually called when the object is already marked. This avoids us
134 // having to CAS in that case. It's profitable to reduce the total amount of CAS
138 return m_isMarked.compareExchangeStrong(false, true);
140 ALWAYS_INLINE bool testAndSetMarked(HeapCell*, Dependency) { return testAndSetMarked(); }
141 void clearMarked() { m_isMarked.store(false); }
143 void noteMarked() { }
146 void assertValidCell(VM&, HeapCell*) const { }
148 void assertValidCell(VM&, HeapCell*) const;
155 void dump(PrintStream&) const;
157 bool isLowerTier() const { return m_lowerTierIndex != UINT8_MAX; }
159 static constexpr unsigned alignment = MarkedBlock::atomSize;
160 static constexpr unsigned halfAlignment = alignment / 2;
161 static constexpr unsigned headerSize() { return ((sizeof(LargeAllocation) + halfAlignment - 1) & ~(halfAlignment - 1)) | halfAlignment; }
164 LargeAllocation(Heap&, size_t, Subspace*, unsigned indexInSpace, bool adjustedAlignment);
166 void* basePointer() const;
168 unsigned m_indexInSpace { 0 };
170 bool m_isNewlyAllocated : 1;
171 bool m_hasValidCell : 1;
172 bool m_adjustedAlignment : 1;
173 Atomic<bool> m_isMarked;
174 CellAttributes m_attributes;
175 uint8_t m_lowerTierIndex { UINT8_MAX };
176 Subspace* m_subspace;
180 inline void* LargeAllocation::basePointer() const
182 if (m_adjustedAlignment)
183 return bitwise_cast<char*>(this) - halfAlignment;
184 return bitwise_cast<void*>(this);