Refactored heap tear-down to use normal value semantics (i.e., destructors)
[WebKit-https.git] / Source / JavaScriptCore / heap / CopiedSpace.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "CopiedSpace.h"
28
29 #include "CopiedSpaceInlineMethods.h"
30 #include "GCActivityCallback.h"
31
32 namespace JSC {
33
34 CopiedSpace::CopiedSpace(Heap* heap)
35     : m_heap(heap)
36     , m_toSpace(0)
37     , m_fromSpace(0)
38     , m_inCopyingPhase(false)
39     , m_numberOfLoanedBlocks(0)
40 {
41 }
42
43 CopiedSpace::~CopiedSpace()
44 {
45     while (!m_toSpace->isEmpty())
46         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_toSpace->removeHead())));
47
48     while (!m_fromSpace->isEmpty())
49         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_fromSpace->removeHead())));
50
51     while (!m_oversizeBlocks.isEmpty())
52         CopiedBlock::destroy(static_cast<CopiedBlock*>(m_oversizeBlocks.removeHead())).deallocate();
53 }
54
55 void CopiedSpace::init()
56 {
57     m_toSpace = &m_blocks1;
58     m_fromSpace = &m_blocks2;
59     
60     if (!addNewBlock())
61         CRASH();
62 }   
63
64 CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
65 {
66     if (isOversize(bytes))
67         return tryAllocateOversize(bytes, outPtr);
68     
69     m_heap->didAllocate(m_allocator.currentCapacity());
70
71     if (!addNewBlock()) {
72         *outPtr = 0;
73         return false;
74     }
75     *outPtr = m_allocator.allocate(bytes);
76     ASSERT(*outPtr);
77     return true;
78 }
79
80 CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
81 {
82     ASSERT(isOversize(bytes));
83     
84     size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
85
86     PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
87     if (!static_cast<bool>(allocation)) {
88         *outPtr = 0;
89         return false;
90     }
91
92     CopiedBlock* block = CopiedBlock::create(allocation);
93     m_oversizeBlocks.push(block);
94     m_blockFilter.add(reinterpret_cast<Bits>(block));
95     m_blockSet.add(block);
96     
97     *outPtr = allocateFromBlock(block, bytes);
98
99     m_heap->didAllocate(blockSize);
100
101     return true;
102 }
103
104 CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
105 {
106     if (oldSize >= newSize)
107         return true;
108     
109     void* oldPtr = *ptr;
110     ASSERT(!m_heap->globalData()->isInitializingObject());
111
112     if (isOversize(oldSize) || isOversize(newSize))
113         return tryReallocateOversize(ptr, oldSize, newSize);
114
115     if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
116         size_t delta = newSize - oldSize;
117         if (m_allocator.fitsInCurrentBlock(delta)) {
118             (void)m_allocator.allocate(delta);
119             return true;
120         }
121     }
122
123     void* result = 0;
124     if (!tryAllocate(newSize, &result)) {
125         *ptr = 0;
126         return false;
127     }
128     memcpy(result, oldPtr, oldSize);
129     *ptr = result;
130     return true;
131 }
132
133 CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
134 {
135     ASSERT(isOversize(oldSize) || isOversize(newSize));
136     ASSERT(newSize > oldSize);
137
138     void* oldPtr = *ptr;
139     
140     void* newPtr = 0;
141     if (!tryAllocateOversize(newSize, &newPtr)) {
142         *ptr = 0;
143         return false;
144     }
145
146     memcpy(newPtr, oldPtr, oldSize);
147
148     if (isOversize(oldSize)) {
149         CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
150         m_oversizeBlocks.remove(oldBlock);
151         m_blockSet.remove(oldBlock);
152         CopiedBlock::destroy(oldBlock).deallocate();
153     }
154     
155     *ptr = newPtr;
156     return true;
157 }
158
159 void CopiedSpace::doneFillingBlock(CopiedBlock* block)
160 {
161     ASSERT(block);
162     ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
163     ASSERT(m_inCopyingPhase);
164
165     if (block->m_offset == block->payload()) {
166         recycleBlock(block);
167         return;
168     }
169
170     {
171         MutexLocker locker(m_toSpaceLock);
172         m_toSpace->push(block);
173         m_blockSet.add(block);
174         m_blockFilter.add(reinterpret_cast<Bits>(block));
175     }
176
177     {
178         MutexLocker locker(m_loanedBlocksLock);
179         ASSERT(m_numberOfLoanedBlocks > 0);
180         m_numberOfLoanedBlocks--;
181         if (!m_numberOfLoanedBlocks)
182             m_loanedBlocksCondition.signal();
183     }
184 }
185
186 void CopiedSpace::doneCopying()
187 {
188     {
189         MutexLocker locker(m_loanedBlocksLock);
190         while (m_numberOfLoanedBlocks > 0)
191             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
192     }
193
194     ASSERT(m_inCopyingPhase);
195     m_inCopyingPhase = false;
196     while (!m_fromSpace->isEmpty()) {
197         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
198         if (block->m_isPinned) {
199             block->m_isPinned = false;
200             // We don't add the block to the blockSet because it was never removed.
201             ASSERT(m_blockSet.contains(block));
202             m_blockFilter.add(reinterpret_cast<Bits>(block));
203             m_toSpace->push(block);
204             continue;
205         }
206
207         m_blockSet.remove(block);
208         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
209     }
210
211     CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
212     while (curr) {
213         CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
214         if (!curr->m_isPinned) {
215             m_oversizeBlocks.remove(curr);
216             m_blockSet.remove(curr);
217             CopiedBlock::destroy(curr).deallocate();
218         } else {
219             m_blockFilter.add(reinterpret_cast<Bits>(curr));
220             curr->m_isPinned = false;
221         }
222         curr = next;
223     }
224
225     if (!m_toSpace->head()) {
226         if (!addNewBlock())
227             CRASH();
228     } else
229         m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
230 }
231
232 CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock)
233 {
234     CopiedBlock* block = 0;
235     if (allocationEffort == AllocationMustSucceed)
236         block = CopiedBlock::create(m_heap->blockAllocator().allocate());
237     else {
238         ASSERT(allocationEffort == AllocationCanFail);
239         if (m_heap->shouldCollect())
240             m_heap->collect(Heap::DoNotSweep);
241         
242         if (!getFreshBlock(AllocationMustSucceed, &block)) {
243             *outBlock = 0;
244             ASSERT_NOT_REACHED();
245             return false;
246         }
247     }
248     ASSERT(block);
249     ASSERT(is8ByteAligned(block->m_offset));
250     *outBlock = block;
251     return true;
252 }
253
254 size_t CopiedSpace::size()
255 {
256     size_t calculatedSize = 0;
257
258     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
259         calculatedSize += block->size();
260
261     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
262         calculatedSize += block->size();
263
264     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
265         calculatedSize += block->size();
266
267     return calculatedSize;
268 }
269
270 size_t CopiedSpace::capacity()
271 {
272     size_t calculatedCapacity = 0;
273
274     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
275         calculatedCapacity += block->capacity();
276
277     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
278         calculatedCapacity += block->capacity();
279
280     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
281         calculatedCapacity += block->capacity();
282
283     return calculatedCapacity;
284 }
285
286 static bool isBlockListPagedOut(double deadline, DoublyLinkedList<HeapBlock>* list)
287 {
288     unsigned itersSinceLastTimeCheck = 0;
289     HeapBlock* current = list->head();
290     while (current) {
291         current = current->next();
292         ++itersSinceLastTimeCheck;
293         if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
294             double currentTime = WTF::monotonicallyIncreasingTime();
295             if (currentTime > deadline)
296                 return true;
297             itersSinceLastTimeCheck = 0;
298         }
299     }
300
301     return false;
302 }
303
304 bool CopiedSpace::isPagedOut(double deadline)
305 {
306     return isBlockListPagedOut(deadline, m_toSpace) 
307         || isBlockListPagedOut(deadline, m_fromSpace) 
308         || isBlockListPagedOut(deadline, &m_oversizeBlocks);
309 }
310
311 } // namespace JSC