17725f9c1577045bc5c6fca0ad55125c33790604
[WebKit-https.git] / Source / JavaScriptCore / heap / HeapInlines.h
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #ifndef HeapInlines_h
27 #define HeapInlines_h
28
29 #include "Heap.h"
30 #include "HeapCellInlines.h"
31 #include "IndexingHeader.h"
32 #include "JSCallee.h"
33 #include "JSCell.h"
34 #include "Structure.h"
35 #include <type_traits>
36 #include <wtf/Assertions.h>
37 #include <wtf/MainThread.h>
38 #include <wtf/RandomNumber.h>
39
40 namespace JSC {
41
42 inline bool Heap::shouldCollect()
43 {
44     if (isDeferred())
45         return false;
46     if (!m_isSafeToCollect)
47         return false;
48     if (m_operationInProgress != NoOperation)
49         return false;
50     if (Options::gcMaxHeapSize())
51         return m_bytesAllocatedThisCycle > Options::gcMaxHeapSize();
52     return m_bytesAllocatedThisCycle > m_maxEdenSize;
53 }
54
55 inline bool Heap::isBusy()
56 {
57     return m_operationInProgress != NoOperation;
58 }
59
60 inline bool Heap::isCollecting()
61 {
62     return m_operationInProgress == FullCollection || m_operationInProgress == EdenCollection;
63 }
64
65 ALWAYS_INLINE Heap* Heap::heap(const HeapCell* cell)
66 {
67     return cell->heap();
68 }
69
70 inline Heap* Heap::heap(const JSValue v)
71 {
72     if (!v.isCell())
73         return 0;
74     return heap(v.asCell());
75 }
76
77 inline bool Heap::isLive(const void* rawCell)
78 {
79     ASSERT(!mayBeGCThread());
80     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
81     if (cell->isLargeAllocation())
82         return cell->largeAllocation().isLive();
83     MarkedBlock& block = cell->markedBlock();
84     block.flipIfNecessary(block.vm()->heap.objectSpace().version());
85     return block.handle().isLiveCell(cell);
86 }
87
88 ALWAYS_INLINE bool Heap::isMarked(const void* rawCell)
89 {
90     ASSERT(!mayBeGCThread());
91     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
92     if (cell->isLargeAllocation())
93         return cell->largeAllocation().isMarked();
94     MarkedBlock& block = cell->markedBlock();
95     if (block.needsFlip(block.vm()->heap.objectSpace().version()))
96         return false;
97     return block.isMarked(cell);
98 }
99
100 ALWAYS_INLINE bool Heap::isMarkedConcurrently(const void* rawCell)
101 {
102     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
103     if (cell->isLargeAllocation())
104         return cell->largeAllocation().isMarked();
105     MarkedBlock& block = cell->markedBlock();
106     if (block.needsFlip(block.vm()->heap.objectSpace().version()))
107         return false;
108     WTF::loadLoadFence();
109     return block.isMarked(cell);
110 }
111
112 ALWAYS_INLINE bool Heap::testAndSetMarked(HeapVersion version, const void* rawCell)
113 {
114     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
115     if (cell->isLargeAllocation())
116         return cell->largeAllocation().testAndSetMarked();
117     MarkedBlock& block = cell->markedBlock();
118     block.flipIfNecessaryDuringMarking(version);
119     return block.testAndSetMarked(cell);
120 }
121
122 inline void Heap::setMarked(const void* rawCell)
123 {
124     HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
125     if (cell->isLargeAllocation()) {
126         cell->largeAllocation().setMarked();
127         return;
128     }
129     MarkedBlock& block = cell->markedBlock();
130     block.flipIfNecessary(block.vm()->heap.objectSpace().version());
131     block.setMarked(cell);
132 }
133
134 ALWAYS_INLINE size_t Heap::cellSize(const void* rawCell)
135 {
136     return bitwise_cast<HeapCell*>(rawCell)->cellSize();
137 }
138
139 inline void Heap::writeBarrier(const JSCell* from, JSValue to)
140 {
141 #if ENABLE(WRITE_BARRIER_PROFILING)
142     WriteBarrierCounters::countWriteBarrier();
143 #endif
144     if (!to.isCell())
145         return;
146     writeBarrier(from, to.asCell());
147 }
148
149 inline void Heap::writeBarrier(const JSCell* from, JSCell* to)
150 {
151 #if ENABLE(WRITE_BARRIER_PROFILING)
152     WriteBarrierCounters::countWriteBarrier();
153 #endif
154     if (!from || from->cellState() != CellState::OldBlack)
155         return;
156     if (!to || to->cellState() != CellState::NewWhite)
157         return;
158     addToRememberedSet(from);
159 }
160
161 inline void Heap::writeBarrier(const JSCell* from)
162 {
163     ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
164     if (!from || from->cellState() != CellState::OldBlack)
165         return;
166     addToRememberedSet(from);
167 }
168
169 inline void Heap::reportExtraMemoryAllocated(size_t size)
170 {
171     if (size > minExtraMemory) 
172         reportExtraMemoryAllocatedSlowCase(size);
173 }
174
175 inline void Heap::reportExtraMemoryVisited(CellState dataBeforeVisiting, size_t size)
176 {
177     // We don't want to double-count the extra memory that was reported in previous collections.
178     if (operationInProgress() == EdenCollection && dataBeforeVisiting == CellState::OldGrey)
179         return;
180
181     size_t* counter = &m_extraMemorySize;
182     
183     for (;;) {
184         size_t oldSize = *counter;
185         if (WTF::weakCompareAndSwap(counter, oldSize, oldSize + size))
186             return;
187     }
188 }
189
190 #if ENABLE(RESOURCE_USAGE)
191 inline void Heap::reportExternalMemoryVisited(CellState dataBeforeVisiting, size_t size)
192 {
193     // We don't want to double-count the external memory that was reported in previous collections.
194     if (operationInProgress() == EdenCollection && dataBeforeVisiting == CellState::OldGrey)
195         return;
196
197     size_t* counter = &m_externalMemorySize;
198
199     for (;;) {
200         size_t oldSize = *counter;
201         if (WTF::weakCompareAndSwap(counter, oldSize, oldSize + size))
202             return;
203     }
204 }
205 #endif
206
207 inline void Heap::deprecatedReportExtraMemory(size_t size)
208 {
209     if (size > minExtraMemory) 
210         deprecatedReportExtraMemorySlowCase(size);
211 }
212
213 template<typename Functor> inline void Heap::forEachCodeBlock(const Functor& func)
214 {
215     forEachCodeBlockImpl(scopedLambdaRef<bool(CodeBlock*)>(func));
216 }
217
218 template<typename Functor> inline void Heap::forEachProtectedCell(const Functor& functor)
219 {
220     for (auto& pair : m_protectedValues)
221         functor(pair.key);
222     m_handleSet.forEachStrongHandle(functor, m_protectedValues);
223 }
224
225 inline void* Heap::allocateWithDestructor(size_t bytes)
226 {
227 #if ENABLE(ALLOCATION_LOGGING)
228     dataLogF("JSC GC allocating %lu bytes with normal destructor.\n", bytes);
229 #endif
230     ASSERT(isValidAllocation(bytes));
231     return m_objectSpace.allocateWithDestructor(bytes);
232 }
233
234 inline void* Heap::allocateWithoutDestructor(size_t bytes)
235 {
236 #if ENABLE(ALLOCATION_LOGGING)
237     dataLogF("JSC GC allocating %lu bytes without destructor.\n", bytes);
238 #endif
239     ASSERT(isValidAllocation(bytes));
240     return m_objectSpace.allocateWithoutDestructor(bytes);
241 }
242
243 template<typename ClassType>
244 inline void* Heap::allocateObjectOfType(size_t bytes)
245 {
246     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
247     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
248
249     if (ClassType::needsDestruction)
250         return allocateWithDestructor(bytes);
251     return allocateWithoutDestructor(bytes);
252 }
253
254 template<typename ClassType>
255 inline MarkedSpace::Subspace& Heap::subspaceForObjectOfType()
256 {
257     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
258     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
259     
260     if (ClassType::needsDestruction)
261         return subspaceForObjectDestructor();
262     return subspaceForObjectWithoutDestructor();
263 }
264
265 template<typename ClassType>
266 inline MarkedAllocator* Heap::allocatorForObjectOfType(size_t bytes)
267 {
268     // JSCell::classInfo() expects objects allocated with normal destructor to derive from JSDestructibleObject.
269     ASSERT((!ClassType::needsDestruction || (ClassType::StructureFlags & StructureIsImmortal) || std::is_convertible<ClassType, JSDestructibleObject>::value));
270
271     MarkedAllocator* result;
272     if (ClassType::needsDestruction)
273         result = allocatorForObjectWithDestructor(bytes);
274     else
275         result = allocatorForObjectWithoutDestructor(bytes);
276     
277     ASSERT(result || !ClassType::info()->isSubClassOf(JSCallee::info()));
278     return result;
279 }
280
281 inline void* Heap::allocateAuxiliary(JSCell* intendedOwner, size_t bytes)
282 {
283     void* result = m_objectSpace.allocateAuxiliary(bytes);
284 #if ENABLE(ALLOCATION_LOGGING)
285     dataLogF("JSC GC allocating %lu bytes of auxiliary for %p: %p.\n", bytes, intendedOwner, result);
286 #else
287     UNUSED_PARAM(intendedOwner);
288 #endif
289     return result;
290 }
291
292 inline void* Heap::tryAllocateAuxiliary(JSCell* intendedOwner, size_t bytes)
293 {
294     void* result = m_objectSpace.tryAllocateAuxiliary(bytes);
295 #if ENABLE(ALLOCATION_LOGGING)
296     dataLogF("JSC GC allocating %lu bytes of auxiliary for %p: %p.\n", bytes, intendedOwner, result);
297 #else
298     UNUSED_PARAM(intendedOwner);
299 #endif
300     return result;
301 }
302
303 inline void* Heap::tryReallocateAuxiliary(JSCell* intendedOwner, void* oldBase, size_t oldSize, size_t newSize)
304 {
305     void* newBase = tryAllocateAuxiliary(intendedOwner, newSize);
306     if (!newBase)
307         return nullptr;
308     memcpy(newBase, oldBase, oldSize);
309     return newBase;
310 }
311
312 inline void Heap::ascribeOwner(JSCell* intendedOwner, void* storage)
313 {
314 #if ENABLE(ALLOCATION_LOGGING)
315     dataLogF("JSC GC ascribing %p as owner of storage %p.\n", intendedOwner, storage);
316 #else
317     UNUSED_PARAM(intendedOwner);
318     UNUSED_PARAM(storage);
319 #endif
320 }
321
322 #if USE(FOUNDATION)
323 template <typename T>
324 inline void Heap::releaseSoon(RetainPtr<T>&& object)
325 {
326     m_delayedReleaseObjects.append(WTFMove(object));
327 }
328 #endif
329
330 inline void Heap::incrementDeferralDepth()
331 {
332     RELEASE_ASSERT(m_deferralDepth < 100); // Sanity check to make sure this doesn't get ridiculous.
333     m_deferralDepth++;
334 }
335
336 inline void Heap::decrementDeferralDepth()
337 {
338     RELEASE_ASSERT(m_deferralDepth >= 1);
339     m_deferralDepth--;
340 }
341
342 inline bool Heap::collectIfNecessaryOrDefer()
343 {
344     if (!shouldCollect())
345         return false;
346
347     collect();
348     return true;
349 }
350
351 inline void Heap::collectAccordingToDeferGCProbability()
352 {
353     if (isDeferred() || !m_isSafeToCollect || m_operationInProgress != NoOperation)
354         return;
355
356     if (randomNumber() < Options::deferGCProbability()) {
357         collect();
358         return;
359     }
360
361     // If our coin flip told us not to GC, we still might GC,
362     // but we GC according to our memory pressure markers.
363     collectIfNecessaryOrDefer();
364 }
365
366 inline void Heap::decrementDeferralDepthAndGCIfNeeded()
367 {
368     decrementDeferralDepth();
369     if (UNLIKELY(Options::deferGCShouldCollectWithProbability()))
370         collectAccordingToDeferGCProbability();
371     else
372         collectIfNecessaryOrDefer();
373 }
374
375 inline HashSet<MarkedArgumentBuffer*>& Heap::markListSet()
376 {
377     if (!m_markListSet)
378         m_markListSet = std::make_unique<HashSet<MarkedArgumentBuffer*>>();
379     return *m_markListSet;
380 }
381
382 inline void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
383 {
384     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
385 }
386
387 inline void Heap::unregisterWeakGCMap(void* weakGCMap)
388 {
389     m_weakGCMaps.remove(weakGCMap);
390 }
391
392 inline void Heap::didAllocateBlock(size_t capacity)
393 {
394 #if ENABLE(RESOURCE_USAGE)
395     m_blockBytesAllocated += capacity;
396 #else
397     UNUSED_PARAM(capacity);
398 #endif
399 }
400
401 inline void Heap::didFreeBlock(size_t capacity)
402 {
403 #if ENABLE(RESOURCE_USAGE)
404     m_blockBytesAllocated -= capacity;
405 #else
406     UNUSED_PARAM(capacity);
407 #endif
408 }
409
410 } // namespace JSC
411
412 #endif // HeapInlines_h