JSLock should be per-JSGlobalData
[WebKit-https.git] / Source / JavaScriptCore / heap / CopiedSpace.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "CopiedSpace.h"
28
29 #include "CopiedSpaceInlineMethods.h"
30 #include "GCActivityCallback.h"
31
32 namespace JSC {
33
34 CopiedSpace::CopiedSpace(Heap* heap)
35     : m_heap(heap)
36     , m_toSpace(0)
37     , m_fromSpace(0)
38     , m_inCopyingPhase(false)
39     , m_numberOfLoanedBlocks(0)
40 {
41     m_toSpaceLock.Init();
42 }
43
44 CopiedSpace::~CopiedSpace()
45 {
46     while (!m_toSpace->isEmpty())
47         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_toSpace->removeHead())));
48
49     while (!m_fromSpace->isEmpty())
50         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(static_cast<CopiedBlock*>(m_fromSpace->removeHead())));
51
52     while (!m_oversizeBlocks.isEmpty())
53         CopiedBlock::destroy(static_cast<CopiedBlock*>(m_oversizeBlocks.removeHead())).deallocate();
54 }
55
56 void CopiedSpace::init()
57 {
58     m_toSpace = &m_blocks1;
59     m_fromSpace = &m_blocks2;
60     
61     allocateBlock();
62 }   
63
64 CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
65 {
66     if (isOversize(bytes))
67         return tryAllocateOversize(bytes, outPtr);
68     
69     ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock());
70     m_heap->didAllocate(m_allocator.currentCapacity());
71
72     allocateBlock();
73
74     *outPtr = m_allocator.allocate(bytes);
75     ASSERT(*outPtr);
76     return true;
77 }
78
79 CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
80 {
81     ASSERT(isOversize(bytes));
82     
83     size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
84
85     PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
86     if (!static_cast<bool>(allocation)) {
87         *outPtr = 0;
88         return false;
89     }
90
91     CopiedBlock* block = CopiedBlock::create(allocation);
92     m_oversizeBlocks.push(block);
93     m_blockFilter.add(reinterpret_cast<Bits>(block));
94     m_blockSet.add(block);
95     
96     *outPtr = allocateFromBlock(block, bytes);
97
98     m_heap->didAllocate(blockSize);
99
100     return true;
101 }
102
103 CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
104 {
105     if (oldSize >= newSize)
106         return true;
107     
108     void* oldPtr = *ptr;
109     ASSERT(!m_heap->globalData()->isInitializingObject());
110
111     if (isOversize(oldSize) || isOversize(newSize))
112         return tryReallocateOversize(ptr, oldSize, newSize);
113
114     if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
115         size_t delta = newSize - oldSize;
116         if (m_allocator.fitsInCurrentBlock(delta)) {
117             (void)m_allocator.allocate(delta);
118             return true;
119         }
120     }
121
122     void* result = 0;
123     if (!tryAllocate(newSize, &result)) {
124         *ptr = 0;
125         return false;
126     }
127     memcpy(result, oldPtr, oldSize);
128     *ptr = result;
129     return true;
130 }
131
132 CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
133 {
134     ASSERT(isOversize(oldSize) || isOversize(newSize));
135     ASSERT(newSize > oldSize);
136
137     void* oldPtr = *ptr;
138     
139     void* newPtr = 0;
140     if (!tryAllocateOversize(newSize, &newPtr)) {
141         *ptr = 0;
142         return false;
143     }
144
145     memcpy(newPtr, oldPtr, oldSize);
146
147     if (isOversize(oldSize)) {
148         CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
149         m_oversizeBlocks.remove(oldBlock);
150         m_blockSet.remove(oldBlock);
151         CopiedBlock::destroy(oldBlock).deallocate();
152     }
153     
154     *ptr = newPtr;
155     return true;
156 }
157
158 void CopiedSpace::doneFillingBlock(CopiedBlock* block)
159 {
160     ASSERT(block);
161     ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
162     ASSERT(m_inCopyingPhase);
163
164     if (block->m_offset == block->payload()) {
165         recycleBlock(block);
166         return;
167     }
168
169     block->zeroFillToEnd();
170
171     {
172         SpinLockHolder locker(&m_toSpaceLock);
173         m_toSpace->push(block);
174         m_blockSet.add(block);
175         m_blockFilter.add(reinterpret_cast<Bits>(block));
176     }
177
178     {
179         MutexLocker locker(m_loanedBlocksLock);
180         ASSERT(m_numberOfLoanedBlocks > 0);
181         m_numberOfLoanedBlocks--;
182         if (!m_numberOfLoanedBlocks)
183             m_loanedBlocksCondition.signal();
184     }
185 }
186
187 void CopiedSpace::doneCopying()
188 {
189     {
190         MutexLocker locker(m_loanedBlocksLock);
191         while (m_numberOfLoanedBlocks > 0)
192             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
193     }
194
195     ASSERT(m_inCopyingPhase);
196     m_inCopyingPhase = false;
197     while (!m_fromSpace->isEmpty()) {
198         CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
199         if (block->m_isPinned) {
200             block->m_isPinned = false;
201             // We don't add the block to the blockSet because it was never removed.
202             ASSERT(m_blockSet.contains(block));
203             m_blockFilter.add(reinterpret_cast<Bits>(block));
204             m_toSpace->push(block);
205             continue;
206         }
207
208         m_blockSet.remove(block);
209         m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
210     }
211
212     CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
213     while (curr) {
214         CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
215         if (!curr->m_isPinned) {
216             m_oversizeBlocks.remove(curr);
217             m_blockSet.remove(curr);
218             CopiedBlock::destroy(curr).deallocate();
219         } else {
220             m_blockFilter.add(reinterpret_cast<Bits>(curr));
221             curr->m_isPinned = false;
222         }
223         curr = next;
224     }
225
226     if (!m_toSpace->head())
227         allocateBlock();
228     else
229         m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
230 }
231
232 size_t CopiedSpace::size()
233 {
234     size_t calculatedSize = 0;
235
236     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
237         calculatedSize += block->size();
238
239     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
240         calculatedSize += block->size();
241
242     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
243         calculatedSize += block->size();
244
245     return calculatedSize;
246 }
247
248 size_t CopiedSpace::capacity()
249 {
250     size_t calculatedCapacity = 0;
251
252     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
253         calculatedCapacity += block->capacity();
254
255     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
256         calculatedCapacity += block->capacity();
257
258     for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
259         calculatedCapacity += block->capacity();
260
261     return calculatedCapacity;
262 }
263
264 static bool isBlockListPagedOut(double deadline, DoublyLinkedList<HeapBlock>* list)
265 {
266     unsigned itersSinceLastTimeCheck = 0;
267     HeapBlock* current = list->head();
268     while (current) {
269         current = current->next();
270         ++itersSinceLastTimeCheck;
271         if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
272             double currentTime = WTF::monotonicallyIncreasingTime();
273             if (currentTime > deadline)
274                 return true;
275             itersSinceLastTimeCheck = 0;
276         }
277     }
278
279     return false;
280 }
281
282 bool CopiedSpace::isPagedOut(double deadline)
283 {
284     return isBlockListPagedOut(deadline, m_toSpace) 
285         || isBlockListPagedOut(deadline, m_fromSpace) 
286         || isBlockListPagedOut(deadline, &m_oversizeBlocks);
287 }
288
289 } // namespace JSC