147dfa4b3208dab19bba94e5425914e1b2d71fb5
[WebKit-https.git] / Source / JavaScriptCore / heap / CopiedSpace.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "CopiedSpace.h"
28
29 #include "CopiedSpaceInlineMethods.h"
30 #include "GCActivityCallback.h"
31
32 namespace JSC {
33
34 CopiedSpace::CopiedSpace(Heap* heap)
35     : m_heap(heap)
36     , m_toSpace(0)
37     , m_fromSpace(0)
38     , m_inCopyingPhase(false)
39     , m_numberOfLoanedBlocks(0)
40 {
41     m_toSpaceLock.Init();
42 }
43
44 CopiedSpace::~CopiedSpace()
45 {
46     while (!m_toSpace->isEmpty())
47         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_toSpace->removeHead())));
48
49     while (!m_fromSpace->isEmpty())
50         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_fromSpace->removeHead())));
51
52     while (!m_oversizeBlocks.isEmpty())
53         CopiedBlock::destroy(static_cast<CopiedBlock*>(m_oversizeBlocks.removeHead())).deallocate();
54 }
55
56 void CopiedSpace::init()
57 {
58     m_toSpace = &m_blocks1;
59     m_fromSpace = &m_blocks2;
60     
61     allocateBlock();
62 }   
63
64 CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
65 {
66     if (isOversize(bytes))
67         return tryAllocateOversize(bytes, outPtr);
68     
69     ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock());
70     m_heap->didAllocate(m_allocator.currentCapacity());
71
72     allocateBlock();
73
74     *outPtr = m_allocator.forceAllocate(bytes);
75     return true;
76 }
77
78 CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
79 {
80     ASSERT(isOversize(bytes));
81     
82     size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
83
84     PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
85     if (!static_cast<bool>(allocation)) {
86         *outPtr = 0;
87         return false;
88     }
89
90     CopiedBlock* block = CopiedBlock::create(allocation);
91     m_oversizeBlocks.push(block);
92     m_blockFilter.add(reinterpret_cast<Bits>(block));
93     m_blockSet.add(block);
94     
95     CopiedAllocator allocator;
96     allocator.setCurrentBlock(block);
97     *outPtr = allocator.forceAllocate(bytes);
98     allocator.resetCurrentBlock();
99
100     m_heap->didAllocate(blockSize);
101
102     return true;
103 }
104
105 CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
106 {
107     if (oldSize >= newSize)
108         return true;
109     
110     void* oldPtr = *ptr;
111     ASSERT(!m_heap->globalData()->isInitializingObject());
112     
113     if (isOversize(oldSize) || isOversize(newSize))
114         return tryReallocateOversize(ptr, oldSize, newSize);
115     
116     if (m_allocator.tryReallocate(oldPtr, oldSize, newSize))
117         return true;
118
119     void* result = 0;
120     if (!tryAllocate(newSize, &result)) {
121         *ptr = 0;
122         return false;
123     }
124     memcpy(result, oldPtr, oldSize);
125     *ptr = result;
126     return true;
127 }
128
129 CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
130 {
131     ASSERT(isOversize(oldSize) || isOversize(newSize));
132     ASSERT(newSize > oldSize);
133
134     void* oldPtr = *ptr;
135     
136     void* newPtr = 0;
137     if (!tryAllocateOversize(newSize, &newPtr)) {
138         *ptr = 0;
139         return false;
140     }
141
142     memcpy(newPtr, oldPtr, oldSize);
143
144     if (isOversize(oldSize)) {
145         CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
146         m_oversizeBlocks.remove(oldBlock);
147         m_blockSet.remove(oldBlock);
148         CopiedBlock::destroy(oldBlock).deallocate();
149     }
150     
151     *ptr = newPtr;
152     return true;
153 }
154
155 void CopiedSpace::doneFillingBlock(CopiedBlock* block)
156 {
157     ASSERT(m_inCopyingPhase);
158     
159     if (!block)
160         return;
161
162     if (!block->dataSize()) {
163         recycleBlock(block);
164         return;
165     }
166
167     block->zeroFillWilderness();
168
169     {
170         SpinLockHolder locker(&m_toSpaceLock);
171         m_toSpace->push(block);
172         m_blockSet.add(block);
173         m_blockFilter.add(reinterpret_cast<Bits>(block));
174     }
175
176     {
177         MutexLocker locker(m_loanedBlocksLock);
178         ASSERT(m_numberOfLoanedBlocks > 0);
179         m_numberOfLoanedBlocks--;
180         if (!m_numberOfLoanedBlocks)
181             m_loanedBlocksCondition.signal();
182     }
183 }
184
185 void CopiedSpace::doneCopying()
186 {
187     {
188         MutexLocker locker(m_loanedBlocksLock);
189         while (m_numberOfLoanedBlocks > 0)
190             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
191     }
192
193     ASSERT(m_inCopyingPhase);
194     m_inCopyingPhase = false;
195     while (!m_fromSpace->isEmpty()) {
196         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
197         if (block->m_isPinned) {
198             block->m_isPinned = false;
199             // We don't add the block to the blockSet because it was never removed.
200             ASSERT(m_blockSet.contains(block));
201             m_blockFilter.add(reinterpret_cast<Bits>(block));
202             m_toSpace->push(block);
203             continue;
204         }
205
206         m_blockSet.remove(block);
207         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
208     }
209
210     CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
211     while (curr) {
212         CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
213         if (!curr->m_isPinned) {
214             m_oversizeBlocks.remove(curr);
215             m_blockSet.remove(curr);
216             CopiedBlock::destroy(curr).deallocate();
217         } else {
218             m_blockFilter.add(reinterpret_cast<Bits>(curr));
219             curr->m_isPinned = false;
220         }
221         curr = next;
222     }
223
224     if (!m_toSpace->head())
225         allocateBlock();
226     else
227         m_allocator.setCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
228 }
229
230 size_t CopiedSpace::size()
231 {
232     size_t calculatedSize = 0;
233
234     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
235         calculatedSize += block->size();
236
237     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
238         calculatedSize += block->size();
239
240     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
241         calculatedSize += block->size();
242
243     return calculatedSize;
244 }
245
246 size_t CopiedSpace::capacity()
247 {
248     size_t calculatedCapacity = 0;
249
250     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
251         calculatedCapacity += block->capacity();
252
253     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
254         calculatedCapacity += block->capacity();
255
256     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
257         calculatedCapacity += block->capacity();
258
259     return calculatedCapacity;
260 }
261
262 static bool isBlockListPagedOut(double deadline, DoublyLinkedList<HeapBlock>* list)
263 {
264     unsigned itersSinceLastTimeCheck = 0;
265     HeapBlock* current = list->head();
266     while (current) {
267         current = current->next();
268         ++itersSinceLastTimeCheck;
269         if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
270             double currentTime = WTF::monotonicallyIncreasingTime();
271             if (currentTime > deadline)
272                 return true;
273             itersSinceLastTimeCheck = 0;
274         }
275     }
276
277     return false;
278 }
279
280 bool CopiedSpace::isPagedOut(double deadline)
281 {
282     return isBlockListPagedOut(deadline, m_toSpace) 
283         || isBlockListPagedOut(deadline, m_fromSpace) 
284         || isBlockListPagedOut(deadline, &m_oversizeBlocks);
285 }
286
287 } // namespace JSC