6c4d0b4583b706df2990d7886869360b2ce0c78f
[WebKit-https.git] / Source / JavaScriptCore / heap / HeapInlines.h
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #ifndef HeapInlines_h
27 #define HeapInlines_h
28
29 #include "Heap.h"
30 #include "HeapCellInlines.h"
31 #include "IndexingHeader.h"
32 #include "JSCallee.h"
33 #include "JSCell.h"
34 #include "Structure.h"
35 #include <type_traits>
36 #include <wtf/Assertions.h>
37 #include <wtf/MainThread.h>
38 #include <wtf/RandomNumber.h>
39
40 namespace JSC {
41
42 inline bool Heap::shouldCollect()
43 {
44     if (isDeferred())
45         return false;
46     if (!m_isSafeToCollect)
47         return false;
48     if (m_operationInProgress != NoOperation)
49         return false;
50     if (Options::gcMaxHeapSize())
51         return m_bytesAllocatedThisCycle > Options::gcMaxHeapSize();
52     return m_bytesAllocatedThisCycle > m_maxEdenSize;
53 }
54
55 inline bool Heap::isBusy()
56 {
57     return m_operationInProgress != NoOperation;
58 }
59
60 inline bool Heap::isCollecting()
61 {
62     return m_operationInProgress == FullCollection || m_operationInProgress == EdenCollection;
63 }
64
65 ALWAYS_INLINE Heap* Heap::heap(const HeapCell* cell)
66 {
67     return cell->heap();
68 }
69
70 inline Heap* Heap::heap(const JSValue v)
71 {
72     if (!v.isCell())
73         return 0;
74     return heap(v.asCell());
75 }
76
77 ALWAYS_INLINE bool Heap::isMarked(const void* rawCell)
78 {
79     ASSERT(!mayBeGCThread());
80     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
81     if (cell->isLargeAllocation())
82         return cell->largeAllocation().isMarked();
83     MarkedBlock& block = cell->markedBlock();
84     if (block.needsFlip(block.vm()->heap.objectSpace().version()))
85         return false;
86     return block.isMarked(cell);
87 }
88
89 ALWAYS_INLINE bool Heap::isMarkedConcurrently(const void* rawCell)
90 {
91     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
92     if (cell->isLargeAllocation())
93         return cell->largeAllocation().isMarked();
94     MarkedBlock& block = cell->markedBlock();
95     if (block.needsFlip(block.vm()->heap.objectSpace().version()))
96         return false;
97     WTF::loadLoadFence();
98     return block.isMarked(cell);
99 }
100
101 ALWAYS_INLINE bool Heap::testAndSetMarked(HeapVersion version, const void* rawCell)
102 {
103     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
104     if (cell->isLargeAllocation())
105         return cell->largeAllocation().testAndSetMarked();
106     MarkedBlock& block = cell->markedBlock();
107     block.flipIfNecessaryDuringMarking(version);
108     return block.testAndSetMarked(cell);
109 }
110
111 inline void Heap::setMarked(const void* rawCell)
112 {
113     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
114     if (cell->isLargeAllocation()) {
115         cell->largeAllocation().setMarked();
116         return;
117     }
118     MarkedBlock& block = cell->markedBlock();
119     block.flipIfNecessary(block.vm()->heap.objectSpace().version());
120     block.setMarked(cell);
121 }
122
123 ALWAYS_INLINE size_t Heap::cellSize(const void* rawCell)
124 {
125     return bitwise_cast<HeapCell*>(rawCell)->cellSize();
126 }
127
128 inline void Heap::writeBarrier(const JSCell* from, JSValue to)
129 {
130 #if ENABLE(WRITE_BARRIER_PROFILING)
131     WriteBarrierCounters::countWriteBarrier();
132 #endif
133     if (!to.isCell())
134         return;
135     writeBarrier(from, to.asCell());
136 }
137
138 inline void Heap::writeBarrier(const JSCell* from, JSCell* to)
139 {
140 #if ENABLE(WRITE_BARRIER_PROFILING)
141     WriteBarrierCounters::countWriteBarrier();
142 #endif
143     if (!from || from->cellState() != CellState::OldBlack)
144         return;
145     if (!to || to->cellState() != CellState::NewWhite)
146         return;
147     addToRememberedSet(from);
148 }
149
150 inline void Heap::writeBarrier(const JSCell* from)
151 {
152     ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
153     if (!from || from->cellState() != CellState::OldBlack)
154         return;
155     addToRememberedSet(from);
156 }
157
158 inline void Heap::reportExtraMemoryAllocated(size_t size)
159 {
160     if (size > minExtraMemory) 
161         reportExtraMemoryAllocatedSlowCase(size);
162 }
163
164 inline void Heap::reportExtraMemoryVisited(CellState dataBeforeVisiting, size_t size)
165 {
166     // We don't want to double-count the extra memory that was reported in previous collections.
167     if (operationInProgress() == EdenCollection && dataBeforeVisiting == CellState::OldGrey)
168         return;
169
170     size_t* counter = &m_extraMemorySize;
171     
172     for (;;) {
173         size_t oldSize = *counter;
174         if (WTF::weakCompareAndSwap(counter, oldSize, oldSize + size))
175             return;
176     }
177 }
178
179 #if ENABLE(RESOURCE_USAGE)
180 inline void Heap::reportExternalMemoryVisited(CellState dataBeforeVisiting, size_t size)
181 {
182     // We don't want to double-count the external memory that was reported in previous collections.
183     if (operationInProgress() == EdenCollection && dataBeforeVisiting == CellState::OldGrey)
184         return;
185
186     size_t* counter = &m_externalMemorySize;
187
188     for (;;) {
189         size_t oldSize = *counter;
190         if (WTF::weakCompareAndSwap(counter, oldSize, oldSize + size))
191             return;
192     }
193 }
194 #endif
195
196 inline void Heap::deprecatedReportExtraMemory(size_t size)
197 {
198     if (size > minExtraMemory) 
199         deprecatedReportExtraMemorySlowCase(size);
200 }
201
202 template<typename Functor> inline void Heap::forEachCodeBlock(const Functor& func)
203 {
204     forEachCodeBlockImpl(scopedLambdaRef<bool(CodeBlock*)>(func));
205 }
206
207 template<typename Functor> inline void Heap::forEachProtectedCell(const Functor& functor)
208 {
209     for (auto& pair : m_protectedValues)
210         functor(pair.key);
211     m_handleSet.forEachStrongHandle(functor, m_protectedValues);
212 }
213
214 inline void* Heap::allocateWithDestructor(size_t bytes)
215 {
216 #if ENABLE(ALLOCATION_LOGGING)
217     dataLogF("JSC GC allocating %lu bytes with normal destructor.\n", bytes);
218 #endif
219     ASSERT(isValidAllocation(bytes));
220     return m_objectSpace.allocateWithDestructor(bytes);
221 }
222
223 inline void* Heap::allocateWithoutDestructor(size_t bytes)
224 {
225 #if ENABLE(ALLOCATION_LOGGING)
226     dataLogF("JSC GC allocating %lu bytes without destructor.\n", bytes);
227 #endif
228     ASSERT(isValidAllocation(bytes));
229     return m_objectSpace.allocateWithoutDestructor(bytes);
230 }
231
232 template<typename ClassType>
233 inline void* Heap::allocateObjectOfType(size_t bytes)
234 {
235     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
236     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
237
238     if (ClassType::needsDestruction)
239         return allocateWithDestructor(bytes);
240     return allocateWithoutDestructor(bytes);
241 }
242
243 template<typename ClassType>
244 inline MarkedSpace::Subspace& Heap::subspaceForObjectOfType()
245 {
246     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
247     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
248     
249     if (ClassType::needsDestruction)
250         return subspaceForObjectDestructor();
251     return subspaceForObjectWithoutDestructor();
252 }
253
254 template<typename ClassType>
255 inline MarkedAllocator* Heap::allocatorForObjectOfType(size_t bytes)
256 {
257     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
258     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
259
260     MarkedAllocator* result;
261     if (ClassType::needsDestruction)
262         result = allocatorForObjectWithDestructor(bytes);
263     else
264         result = allocatorForObjectWithoutDestructor(bytes);
265     
266     ASSERT(result || !ClassType::info()->isSubClassOf(JSCallee::info()));
267     return result;
268 }
269
270 inline void* Heap::allocateAuxiliary(JSCell* intendedOwner, size_t bytes)
271 {
272     void* result = m_objectSpace.allocateAuxiliary(bytes);
273 #if ENABLE(ALLOCATION_LOGGING)
274     dataLogF("JSC GC allocating %lu bytes of auxiliary for %p: %p.\n", bytes, intendedOwner, result);
275 #else
276     UNUSED_PARAM(intendedOwner);
277 #endif
278     return result;
279 }
280
281 inline void* Heap::tryAllocateAuxiliary(JSCell* intendedOwner, size_t bytes)
282 {
283     void* result = m_objectSpace.tryAllocateAuxiliary(bytes);
284 #if ENABLE(ALLOCATION_LOGGING)
285     dataLogF("JSC GC allocating %lu bytes of auxiliary for %p: %p.\n", bytes, intendedOwner, result);
286 #else
287     UNUSED_PARAM(intendedOwner);
288 #endif
289     return result;
290 }
291
292 inline void* Heap::tryReallocateAuxiliary(JSCell* intendedOwner, void* oldBase, size_t oldSize, size_t newSize)
293 {
294     void* newBase = tryAllocateAuxiliary(intendedOwner, newSize);
295     if (!newBase)
296         return nullptr;
297     memcpy(newBase, oldBase, oldSize);
298     return newBase;
299 }
300
301 inline void Heap::ascribeOwner(JSCell* intendedOwner, void* storage)
302 {
303 #if ENABLE(ALLOCATION_LOGGING)
304     dataLogF("JSC GC ascribing %p as owner of storage %p.\n", intendedOwner, storage);
305 #else
306     UNUSED_PARAM(intendedOwner);
307     UNUSED_PARAM(storage);
308 #endif
309 }
310
311 #if USE(FOUNDATION)
312 template <typename T>
313 inline void Heap::releaseSoon(RetainPtr<T>&& object)
314 {
315     m_delayedReleaseObjects.append(WTFMove(object));
316 }
317 #endif
318
319 inline void Heap::incrementDeferralDepth()
320 {
321     RELEASE_ASSERT(m_deferralDepth < 100); // Sanity check to make sure this doesn't get ridiculous.
322     m_deferralDepth++;
323 }
324
325 inline void Heap::decrementDeferralDepth()
326 {
327     RELEASE_ASSERT(m_deferralDepth >= 1);
328     m_deferralDepth--;
329 }
330
331 inline bool Heap::collectIfNecessaryOrDefer()
332 {
333     if (!shouldCollect())
334         return false;
335
336     collect();
337     return true;
338 }
339
340 inline void Heap::collectAccordingToDeferGCProbability()
341 {
342     if (isDeferred() || !m_isSafeToCollect || m_operationInProgress != NoOperation)
343         return;
344
345     if (randomNumber() < Options::deferGCProbability()) {
346         collect();
347         return;
348     }
349
350     // If our coin flip told us not to GC, we still might GC,
351     // but we GC according to our memory pressure markers.
352     collectIfNecessaryOrDefer();
353 }
354
355 inline void Heap::decrementDeferralDepthAndGCIfNeeded()
356 {
357     decrementDeferralDepth();
358     if (UNLIKELY(Options::deferGCShouldCollectWithProbability()))
359         collectAccordingToDeferGCProbability();
360     else
361         collectIfNecessaryOrDefer();
362 }
363
364 inline HashSet<MarkedArgumentBuffer*>& Heap::markListSet()
365 {
366     if (!m_markListSet)
367         m_markListSet = std::make_unique<HashSet<MarkedArgumentBuffer*>>();
368     return *m_markListSet;
369 }
370
371 inline void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
372 {
373     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
374 }
375
376 inline void Heap::unregisterWeakGCMap(void* weakGCMap)
377 {
378     m_weakGCMaps.remove(weakGCMap);
379 }
380
381 inline void Heap::didAllocateBlock(size_t capacity)
382 {
383 #if ENABLE(RESOURCE_USAGE)
384     m_blockBytesAllocated += capacity;
385 #else
386     UNUSED_PARAM(capacity);
387 #endif
388 }
389
390 inline void Heap::didFreeBlock(size_t capacity)
391 {
392 #if ENABLE(RESOURCE_USAGE)
393     m_blockBytesAllocated -= capacity;
394 #else
395     UNUSED_PARAM(capacity);
396 #endif
397 }
398
399 } // namespace JSC
400
401 #endif // HeapInlines_h