Stop using ThreadCondition in BlockAllocator
[WebKit-https.git] / Source / JavaScriptCore / heap / BlockAllocator.h
1 /*
2  * Copyright (C) 2012 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #ifndef BlockAllocator_h
27 #define BlockAllocator_h
28
29 #include "GCActivityCallback.h"
30 #include "HeapBlock.h"
31 #include "Region.h"
32 #include <mutex>
33 #include <wtf/DoublyLinkedList.h>
34 #include <wtf/Forward.h>
35 #include <wtf/PageAllocationAligned.h>
36 #include <wtf/TCSpinLock.h>
37 #include <wtf/Threading.h>
38
39 namespace JSC {
40
41 class BlockAllocator;
42 class CopiedBlock;
43 class CopyWorkListSegment;
44 class HandleBlock;
45 class VM;
46 class MarkStackSegment;
47 class MarkedBlock;
48 class WeakBlock;
49
50 // Simple allocator to reduce VM cost by holding onto blocks of memory for
51 // short periods of time and then freeing them on a secondary thread.
52
53 class BlockAllocator {
54 public:
55     BlockAllocator();
56     ~BlockAllocator();
57
58     template <typename T> DeadBlock* allocate();
59     DeadBlock* allocateCustomSize(size_t blockSize, size_t blockAlignment);
60     template <typename T> void deallocate(T*);
61     template <typename T> void deallocateCustomSize(T*);
62
63 private:
64     void waitForDuration(std::chrono::milliseconds);
65
66     friend ThreadIdentifier createBlockFreeingThread(BlockAllocator*);
67     void blockFreeingThreadMain();
68     static void blockFreeingThreadStartFunc(void* heap);
69
70     struct RegionSet {
71         RegionSet(size_t blockSize)
72             : m_numberOfPartialRegions(0)
73             , m_blockSize(blockSize)
74         {
75         }
76
77         bool isEmpty() const
78         {
79             return m_fullRegions.isEmpty() && m_partialRegions.isEmpty();
80         }
81
82         DoublyLinkedList<Region> m_fullRegions;
83         DoublyLinkedList<Region> m_partialRegions;
84         size_t m_numberOfPartialRegions;
85         size_t m_blockSize;
86     };
87
88     DeadBlock* tryAllocateFromRegion(RegionSet&, DoublyLinkedList<Region>&, size_t&);
89
90     bool allRegionSetsAreEmpty() const;
91     void releaseFreeRegions();
92
93     template <typename T> RegionSet& regionSetFor();
94
95     SuperRegion m_superRegion;
96     RegionSet m_copiedRegionSet;
97     RegionSet m_markedRegionSet;
98     // WeakBlocks and MarkStackSegments use the same RegionSet since they're the same size.
99     RegionSet m_fourKBBlockRegionSet;
100     RegionSet m_workListRegionSet;
101
102     DoublyLinkedList<Region> m_emptyRegions;
103     size_t m_numberOfEmptyRegions;
104
105     bool m_isCurrentlyAllocating;
106     bool m_blockFreeingThreadShouldQuit;
107     SpinLock m_regionLock;
108     std::mutex m_emptyRegionConditionMutex;
109     std::condition_variable m_emptyRegionCondition;
110     ThreadIdentifier m_blockFreeingThread;
111 };
112
113 inline DeadBlock* BlockAllocator::tryAllocateFromRegion(RegionSet& set, DoublyLinkedList<Region>& regions, size_t& numberOfRegions)
114 {
115     if (numberOfRegions) {
116         ASSERT(!regions.isEmpty());
117         Region* region = regions.head();
118         ASSERT(!region->isFull());
119
120         if (region->isEmpty()) {
121             ASSERT(region == m_emptyRegions.head());
122             m_numberOfEmptyRegions--;
123             set.m_numberOfPartialRegions++;
124             region = m_emptyRegions.removeHead()->reset(set.m_blockSize);
125             set.m_partialRegions.push(region);
126         }
127
128         DeadBlock* block = region->allocate();
129
130         if (region->isFull()) {
131             set.m_numberOfPartialRegions--;
132             set.m_fullRegions.push(set.m_partialRegions.removeHead());
133         }
134
135         return block;
136     }
137     return 0;
138 }
139
140 template<typename T>
141 inline DeadBlock* BlockAllocator::allocate()
142 {
143     RegionSet& set = regionSetFor<T>();
144     DeadBlock* block;
145     m_isCurrentlyAllocating = true;
146     {
147         SpinLockHolder locker(&m_regionLock);
148         if ((block = tryAllocateFromRegion(set, set.m_partialRegions, set.m_numberOfPartialRegions)))
149             return block;
150         if ((block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions)))
151             return block;
152     }
153
154     Region* newRegion = Region::create(&m_superRegion, T::blockSize);
155
156     SpinLockHolder locker(&m_regionLock);
157     m_emptyRegions.push(newRegion);
158     m_numberOfEmptyRegions++;
159     block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions);
160     ASSERT(block);
161     return block;
162 }
163
164 inline DeadBlock* BlockAllocator::allocateCustomSize(size_t blockSize, size_t blockAlignment)
165 {
166     size_t realSize = WTF::roundUpToMultipleOf(blockAlignment, blockSize);
167     Region* newRegion = Region::createCustomSize(&m_superRegion, realSize, blockAlignment);
168     DeadBlock* block = newRegion->allocate();
169     ASSERT(block);
170     return block;
171 }
172
173 template<typename T>
174 inline void BlockAllocator::deallocate(T* block)
175 {
176     RegionSet& set = regionSetFor<T>();
177     bool shouldWakeBlockFreeingThread = false;
178     {
179         SpinLockHolder locker(&m_regionLock);
180         Region* region = block->region();
181         ASSERT(!region->isEmpty());
182         if (region->isFull())
183             set.m_fullRegions.remove(region);
184         else {
185             set.m_partialRegions.remove(region);
186             set.m_numberOfPartialRegions--;
187         }
188
189         region->deallocate(block);
190
191         if (region->isEmpty()) {
192             m_emptyRegions.push(region);
193             shouldWakeBlockFreeingThread = !m_numberOfEmptyRegions;
194             m_numberOfEmptyRegions++;
195         } else {
196             set.m_partialRegions.push(region);
197             set.m_numberOfPartialRegions++;
198         }
199     }
200
201     if (shouldWakeBlockFreeingThread) {
202         std::lock_guard<std::mutex> lock(m_emptyRegionConditionMutex);
203         m_emptyRegionCondition.notify_one();
204     }
205
206     if (!m_blockFreeingThread)
207         releaseFreeRegions();
208 }
209
210 template<typename T>
211 inline void BlockAllocator::deallocateCustomSize(T* block)
212 {
213     Region* region = block->region();
214     ASSERT(region->isCustomSize());
215     region->deallocate(block);
216     region->destroy();
217 }
218
219 template <>
220 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopiedBlock>()
221 {
222     return m_copiedRegionSet;
223 }
224
225 template <>
226 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkedBlock>()
227 {
228     return m_markedRegionSet;
229 }
230
231 template <>
232 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<WeakBlock>()
233 {
234     return m_fourKBBlockRegionSet;
235 }
236
237 template <>
238 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkStackSegment>()
239 {
240     return m_fourKBBlockRegionSet;
241 }
242
243 template <>
244 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopyWorkListSegment>()
245 {
246     return m_workListRegionSet;
247 }
248
249 template <>
250 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HandleBlock>()
251 {
252     return m_fourKBBlockRegionSet;
253 }
254
255 template <>
256 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopiedBlock>>()
257 {
258     return m_copiedRegionSet;
259 }
260
261 template <>
262 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkedBlock>>()
263 {
264     return m_markedRegionSet;
265 }
266
267 template <>
268 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<WeakBlock>>()
269 {
270     return m_fourKBBlockRegionSet;
271 }
272
273 template <>
274 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkStackSegment>>()
275 {
276     return m_fourKBBlockRegionSet;
277 }
278
279 template <>
280 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopyWorkListSegment>>()
281 {
282     return m_workListRegionSet;
283 }
284
285 template <>
286 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<HandleBlock>>()
287 {
288     return m_fourKBBlockRegionSet;
289 }
290
291 template <typename T>
292 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor()
293 {
294     RELEASE_ASSERT_NOT_REACHED();
295     return *(RegionSet*)0;
296 }
297
298 } // namespace JSC
299
300 #endif // BlockAllocator_h