tryReallocate could break the zero-ed memory invariant of CopiedBlocks
[WebKit-https.git] / Source / JavaScriptCore / heap / CopiedSpace.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "CopiedSpace.h"
28
29 #include "CopiedSpaceInlineMethods.h"
30
31 namespace JSC {
32
33 CopiedSpace::CopiedSpace(Heap* heap)
34     : m_heap(heap)
35     , m_toSpace(0)
36     , m_fromSpace(0)
37     , m_inCopyingPhase(false)
38     , m_numberOfLoanedBlocks(0)
39     , m_waterMark(0)
40 {
41 }
42
43 void CopiedSpace::init()
44 {
45     m_toSpace = &m_blocks1;
46     m_fromSpace = &m_blocks2;
47     
48     if (!addNewBlock())
49         CRASH();
50 }   
51
52 CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
53 {
54     if (isOversize(bytes))
55         return tryAllocateOversize(bytes, outPtr);
56     
57     m_waterMark += m_allocator.currentCapacity();
58
59     if (!addNewBlock()) {
60         *outPtr = 0;
61         return false;
62     }
63     *outPtr = m_allocator.allocate(bytes);
64     ASSERT(*outPtr);
65     return true;
66 }
67
68 CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
69 {
70     ASSERT(isOversize(bytes));
71     
72     size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
73
74     PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
75     if (!static_cast<bool>(allocation)) {
76         *outPtr = 0;
77         return false;
78     }
79
80     CopiedBlock* block = new (NotNull, allocation.base()) CopiedBlock(allocation);
81     m_oversizeBlocks.push(block);
82     m_oversizeFilter.add(reinterpret_cast<Bits>(block));
83     
84     *outPtr = allocateFromBlock(block, bytes);
85
86     m_waterMark += block->capacity();
87
88     return true;
89 }
90
91 CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
92 {
93     if (oldSize >= newSize)
94         return true;
95     
96     void* oldPtr = *ptr;
97     ASSERT(!m_heap->globalData()->isInitializingObject());
98
99     if (isOversize(oldSize) || isOversize(newSize))
100         return tryReallocateOversize(ptr, oldSize, newSize);
101
102     if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
103         size_t delta = newSize - oldSize;
104         if (m_allocator.fitsInCurrentBlock(delta)) {
105             (void)m_allocator.allocate(delta);
106             return true;
107         }
108     }
109
110     void* result = 0;
111     if (!tryAllocate(newSize, &result)) {
112         *ptr = 0;
113         return false;
114     }
115     memcpy(result, oldPtr, oldSize);
116     *ptr = result;
117     return true;
118 }
119
120 CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
121 {
122     ASSERT(isOversize(oldSize) || isOversize(newSize));
123     ASSERT(newSize > oldSize);
124
125     void* oldPtr = *ptr;
126     
127     void* newPtr = 0;
128     if (!tryAllocateOversize(newSize, &newPtr)) {
129         *ptr = 0;
130         return false;
131     }
132
133     memcpy(newPtr, oldPtr, oldSize);
134
135     if (isOversize(oldSize)) {
136         CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
137         m_oversizeBlocks.remove(oldBlock);
138         m_waterMark -= oldBlock->capacity();
139         oldBlock->m_allocation.deallocate();
140     }
141     
142     *ptr = newPtr;
143     return true;
144 }
145
146 void CopiedSpace::doneFillingBlock(CopiedBlock* block)
147 {
148     ASSERT(block);
149     ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
150     ASSERT(m_inCopyingPhase);
151
152     if (block->m_offset == block->payload()) {
153         recycleBlock(block);
154         return;
155     }
156
157     {
158         MutexLocker locker(m_toSpaceLock);
159         m_toSpace->push(block);
160         m_toSpaceSet.add(block);
161         m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
162     }
163
164     {
165         MutexLocker locker(m_memoryStatsLock);
166         m_waterMark += block->capacity();
167     }
168
169     {
170         MutexLocker locker(m_loanedBlocksLock);
171         ASSERT(m_numberOfLoanedBlocks > 0);
172         m_numberOfLoanedBlocks--;
173         if (!m_numberOfLoanedBlocks)
174             m_loanedBlocksCondition.signal();
175     }
176 }
177
178 void CopiedSpace::doneCopying()
179 {
180     {
181         MutexLocker locker(m_loanedBlocksLock);
182         while (m_numberOfLoanedBlocks > 0)
183             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
184     }
185
186     ASSERT(m_inCopyingPhase);
187     m_inCopyingPhase = false;
188     while (!m_fromSpace->isEmpty()) {
189         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
190         if (block->m_isPinned) {
191             block->m_isPinned = false;
192             m_toSpace->push(block);
193             m_waterMark += block->capacity();
194             continue;
195         }
196
197         m_toSpaceSet.remove(block);
198         {
199             MutexLocker locker(m_heap->m_freeBlockLock);
200             m_heap->m_freeBlocks.push(block);
201             m_heap->m_numberOfFreeBlocks++;
202         }
203     }
204
205     CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
206     while (curr) {
207         CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
208         if (!curr->m_isPinned) {
209             m_oversizeBlocks.remove(curr);
210             curr->m_allocation.deallocate();
211         } else {
212             curr->m_isPinned = false;
213             m_waterMark += curr->capacity();
214         }
215         curr = next;
216     }
217
218     if (!m_toSpace->head()) {
219         if (!addNewBlock())
220             CRASH();
221     } else
222         m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
223 }
224
225 CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock)
226 {
227     HeapBlock* heapBlock = 0;
228     CopiedBlock* block = 0;
229     {
230         MutexLocker locker(m_heap->m_freeBlockLock);
231         if (!m_heap->m_freeBlocks.isEmpty()) {
232             heapBlock = m_heap->m_freeBlocks.removeHead();
233             m_heap->m_numberOfFreeBlocks--;
234         }
235     }
236     if (heapBlock)
237         block = new (NotNull, heapBlock) CopiedBlock(heapBlock->m_allocation);
238     else if (allocationEffort == AllocationMustSucceed) {
239         if (!allocateNewBlock(&block)) {
240             *outBlock = 0;
241             ASSERT_NOT_REACHED();
242             return false;
243         }
244     } else {
245         ASSERT(allocationEffort == AllocationCanFail);
246         if (m_heap->shouldCollect())
247             m_heap->collect(Heap::DoNotSweep);
248         
249         if (!getFreshBlock(AllocationMustSucceed, &block)) {
250             *outBlock = 0;
251             ASSERT_NOT_REACHED();
252             return false;
253         }
254     }
255     ASSERT(block);
256     ASSERT(is8ByteAligned(block->m_offset));
257     *outBlock = block;
258     return true;
259 }
260
261 void CopiedSpace::destroy()
262 {
263     while (!m_toSpace->isEmpty()) {
264         CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->removeHead());
265         MutexLocker locker(m_heap->m_freeBlockLock);
266         m_heap->m_freeBlocks.append(block);
267         m_heap->m_numberOfFreeBlocks++;
268     }
269
270     while (!m_fromSpace->isEmpty()) {
271         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
272         MutexLocker locker(m_heap->m_freeBlockLock);
273         m_heap->m_freeBlocks.append(block);
274         m_heap->m_numberOfFreeBlocks++;
275     }
276
277     while (!m_oversizeBlocks.isEmpty()) {
278         CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.removeHead());
279         block->m_allocation.deallocate();
280     }
281
282     m_waterMark = 0;
283 }
284
285 size_t CopiedSpace::size()
286 {
287     size_t calculatedSize = 0;
288
289     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
290         calculatedSize += block->size();
291
292     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
293         calculatedSize += block->size();
294
295     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
296         calculatedSize += block->size();
297
298     return calculatedSize;
299 }
300
301 size_t CopiedSpace::capacity()
302 {
303     size_t calculatedCapacity = 0;
304
305     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
306         calculatedCapacity += block->capacity();
307
308     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
309         calculatedCapacity += block->capacity();
310
311     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
312         calculatedCapacity += block->capacity();
313
314     return calculatedCapacity;
315 }
316
317 } // namespace JSC