Heap::destroy leaks CopiedSpace
[WebKit-https.git] / Source / JavaScriptCore / heap / CopiedSpace.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "CopiedSpace.h"
28
29 #include "CopiedSpaceInlineMethods.h"
30
31 namespace JSC {
32
33 CopiedSpace::CopiedSpace(Heap* heap)
34     : m_heap(heap)
35     , m_toSpace(0)
36     , m_fromSpace(0)
37     , m_totalMemoryAllocated(0)
38     , m_totalMemoryUtilized(0)
39     , m_inCopyingPhase(false)
40     , m_numberOfLoanedBlocks(0)
41 {
42 }
43
44 void CopiedSpace::init()
45 {
46     m_toSpace = &m_blocks1;
47     m_fromSpace = &m_blocks2;
48     
49     m_totalMemoryAllocated += HeapBlock::s_blockSize * s_initialBlockNum;
50
51     if (!addNewBlock())
52         CRASH();
53 }   
54
55 CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
56 {
57     if (isOversize(bytes))
58         return tryAllocateOversize(bytes, outPtr);
59     
60     m_totalMemoryUtilized += m_allocator.currentUtilization();
61     if (!addNewBlock()) {
62         *outPtr = 0;
63         return false;
64     }
65     *outPtr = m_allocator.allocate(bytes);
66     ASSERT(*outPtr);
67     return true;
68 }
69
70 CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
71 {
72     ASSERT(isOversize(bytes));
73     
74     size_t blockSize = WTF::roundUpToMultipleOf<s_pageSize>(sizeof(CopiedBlock) + bytes);
75     PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, s_pageSize, OSAllocator::JSGCHeapPages);
76     if (!static_cast<bool>(allocation)) {
77         *outPtr = 0;
78         return false;
79     }
80     CopiedBlock* block = new (NotNull, allocation.base()) CopiedBlock(allocation);
81     m_oversizeBlocks.push(block);
82     ASSERT(is8ByteAligned(block->m_offset));
83
84     m_oversizeFilter.add(reinterpret_cast<Bits>(block));
85     
86     m_totalMemoryAllocated += blockSize;
87     m_totalMemoryUtilized += bytes;
88
89     *outPtr = block->m_offset;
90     return true;
91 }
92
93 CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
94 {
95     if (oldSize >= newSize)
96         return true;
97     
98     void* oldPtr = *ptr;
99     ASSERT(!m_heap->globalData()->isInitializingObject());
100
101     if (isOversize(oldSize) || isOversize(newSize))
102         return tryReallocateOversize(ptr, oldSize, newSize);
103
104     if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
105         m_allocator.resetLastAllocation(oldPtr);
106         if (m_allocator.fitsInCurrentBlock(newSize)) {
107             m_totalMemoryUtilized += newSize - oldSize;
108             return m_allocator.allocate(newSize);
109         }
110     }
111     m_totalMemoryUtilized -= oldSize;
112
113     void* result = 0;
114     if (!tryAllocate(newSize, &result)) {
115         *ptr = 0;
116         return false;
117     }
118     memcpy(result, oldPtr, oldSize);
119     *ptr = result;
120     return true;
121 }
122
123 CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
124 {
125     ASSERT(isOversize(oldSize) || isOversize(newSize));
126     ASSERT(newSize > oldSize);
127
128     void* oldPtr = *ptr;
129     
130     void* newPtr = 0;
131     if (!tryAllocateOversize(newSize, &newPtr)) {
132         *ptr = 0;
133         return false;
134     }
135     memcpy(newPtr, oldPtr, oldSize);
136
137     if (isOversize(oldSize)) {
138         CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
139         m_oversizeBlocks.remove(oldBlock);
140         oldBlock->m_allocation.deallocate();
141         m_totalMemoryAllocated -= oldSize + sizeof(CopiedBlock);
142     }
143     
144     m_totalMemoryUtilized -= oldSize;
145
146     *ptr = newPtr;
147     return true;
148 }
149
150 void CopiedSpace::doneFillingBlock(CopiedBlock* block)
151 {
152     ASSERT(block);
153     ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
154     ASSERT(m_inCopyingPhase);
155
156     if (block->m_offset == block->payload()) {
157         recycleBlock(block);
158         return;
159     }
160
161     {
162         MutexLocker locker(m_toSpaceLock);
163         m_toSpace->push(block);
164         m_toSpaceSet.add(block);
165         m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
166     }
167
168     {
169         MutexLocker locker(m_memoryStatsLock);
170         m_totalMemoryUtilized += static_cast<size_t>(static_cast<char*>(block->m_offset) - block->payload());
171     }
172
173     {
174         MutexLocker locker(m_loanedBlocksLock);
175         ASSERT(m_numberOfLoanedBlocks > 0);
176         m_numberOfLoanedBlocks--;
177         if (!m_numberOfLoanedBlocks)
178             m_loanedBlocksCondition.signal();
179     }
180 }
181
182 void CopiedSpace::doneCopying()
183 {
184     {
185         MutexLocker locker(m_loanedBlocksLock);
186         while (m_numberOfLoanedBlocks > 0)
187             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
188     }
189
190     ASSERT(m_inCopyingPhase);
191     m_inCopyingPhase = false;
192     while (!m_fromSpace->isEmpty()) {
193         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
194         if (block->m_isPinned) {
195             block->m_isPinned = false;
196             m_toSpace->push(block);
197             continue;
198         }
199
200         m_toSpaceSet.remove(block);
201         {
202             MutexLocker locker(m_heap->m_freeBlockLock);
203             m_heap->m_freeBlocks.push(block);
204             m_heap->m_numberOfFreeBlocks++;
205         }
206     }
207
208     CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
209     while (curr) {
210         CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
211         if (!curr->m_isPinned) {
212             m_oversizeBlocks.remove(curr);
213             m_totalMemoryAllocated -= curr->m_allocation.size();
214             m_totalMemoryUtilized -= curr->m_allocation.size() - sizeof(CopiedBlock);
215             curr->m_allocation.deallocate();
216         } else
217             curr->m_isPinned = false;
218         curr = next;
219     }
220
221     if (!m_toSpace->head()) {
222         if (!addNewBlock())
223             CRASH();
224     } else
225         m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
226 }
227
228 CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock)
229 {
230     HeapBlock* heapBlock = 0;
231     CopiedBlock* block = 0;
232     {
233         MutexLocker locker(m_heap->m_freeBlockLock);
234         if (!m_heap->m_freeBlocks.isEmpty()) {
235             heapBlock = m_heap->m_freeBlocks.removeHead();
236             m_heap->m_numberOfFreeBlocks--;
237         }
238     }
239     if (heapBlock)
240         block = new (NotNull, heapBlock) CopiedBlock(heapBlock->m_allocation);
241     else if (allocationEffort == AllocationMustSucceed) {
242         if (!allocateNewBlock(&block)) {
243             *outBlock = 0;
244             ASSERT_NOT_REACHED();
245             return false;
246         }
247     } else {
248         ASSERT(allocationEffort == AllocationCanFail);
249         if (m_heap->waterMark() >= m_heap->highWaterMark() && m_heap->m_isSafeToCollect)
250             m_heap->collect(Heap::DoNotSweep);
251         
252         if (!getFreshBlock(AllocationMustSucceed, &block)) {
253             *outBlock = 0;
254             ASSERT_NOT_REACHED();
255             return false;
256         }
257     }
258     ASSERT(block);
259     ASSERT(is8ByteAligned(block->m_offset));
260     *outBlock = block;
261     return true;
262 }
263
264 void CopiedSpace::destroy()
265 {
266     while (!m_toSpace->isEmpty()) {
267         CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->removeHead());
268         MutexLocker locker(m_heap->m_freeBlockLock);
269         m_heap->m_freeBlocks.append(block);
270         m_heap->m_numberOfFreeBlocks++;
271     }
272
273     while (!m_fromSpace->isEmpty()) {
274         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
275         MutexLocker locker(m_heap->m_freeBlockLock);
276         m_heap->m_freeBlocks.append(block);
277         m_heap->m_numberOfFreeBlocks++;
278     }
279
280     while (!m_oversizeBlocks.isEmpty()) {
281         CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.removeHead());
282         block->m_allocation.deallocate();
283     }
284 }
285
286 } // namespace JSC