GC constraint solving should be parallel
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkingConstraintSolver.cpp
1 /*
2  * Copyright (C) 2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "MarkingConstraintSolver.h"
28
29 #include "JSCInlines.h"
30
31 namespace JSC { 
32
33 MarkingConstraintSolver::MarkingConstraintSolver(MarkingConstraintSet& set)
34     : m_heap(set.m_heap)
35     , m_mainVisitor(m_heap.collectorSlotVisitor())
36     , m_set(set)
37 {
38     m_heap.forEachSlotVisitor(
39         [&] (SlotVisitor& visitor) {
40             m_visitCounters.append(VisitCounter(visitor));
41         });
42 }
43
44 MarkingConstraintSolver::~MarkingConstraintSolver()
45 {
46 }
47
48 bool MarkingConstraintSolver::didVisitSomething() const
49 {
50     for (const VisitCounter& visitCounter : m_visitCounters) {
51         if (visitCounter.visitCount())
52             return true;
53     }
54     return false;
55 }
56
57 void MarkingConstraintSolver::execute(SchedulerPreference preference, ScopedLambda<std::optional<unsigned>()> pickNext)
58 {
59     m_pickNextIsStillActive = true;
60     RELEASE_ASSERT(!m_numThreadsThatMayProduceWork);
61     
62     if (Options::useParallelMarkingConstraintSolver()) {
63         if (Options::logGC())
64             dataLog(preference == ParallelWorkFirst ? "P" : "N", "<");
65         
66         m_heap.runFunctionInParallel(
67             [&] (SlotVisitor& visitor) { runExecutionThread(visitor, preference, pickNext); });
68         
69         if (Options::logGC())
70             dataLog(">");
71     } else
72         runExecutionThread(m_mainVisitor, preference, pickNext);
73     
74     RELEASE_ASSERT(!m_pickNextIsStillActive);
75     RELEASE_ASSERT(!m_numThreadsThatMayProduceWork);
76         
77     for (unsigned indexToRun : m_didExecuteInParallel)
78         m_set.m_set[indexToRun]->finishParallelWork(m_mainVisitor);
79     m_didExecuteInParallel.clear();
80     
81     if (!m_toExecuteSequentially.isEmpty()) {
82         for (unsigned indexToRun : m_toExecuteSequentially)
83             execute(*m_set.m_set[indexToRun]);
84         m_toExecuteSequentially.clear();
85     }
86         
87     RELEASE_ASSERT(m_toExecuteInParallel.isEmpty());
88     RELEASE_ASSERT(!m_toExecuteInParallelSet.bitCount());
89 }
90
91 void MarkingConstraintSolver::drain(BitVector& unexecuted)
92 {
93     auto iter = unexecuted.begin();
94     auto end = unexecuted.end();
95     if (iter == end)
96         return;
97     auto pickNext = scopedLambda<std::optional<unsigned>()>(
98         [&] () -> std::optional<unsigned> {
99             if (iter == end)
100                 return std::nullopt;
101             return *iter++;
102         });
103     execute(NextConstraintFirst, pickNext);
104     unexecuted.clearAll();
105 }
106
107 void MarkingConstraintSolver::converge(const Vector<MarkingConstraint*>& order)
108 {
109     if (didVisitSomething())
110         return;
111     
112     if (order.isEmpty())
113         return;
114         
115     size_t index = 0;
116
117     // We want to execute the first constraint sequentially if we think it will quickly give us a
118     // result. If we ran it in parallel to other constraints, then we might end up having to wait for
119     // those other constraints to finish, which would be a waste of time since during convergence it's
120     // empirically most optimal to return to draining as soon as a constraint generates work. Most
121     // constraints don't generate any work most of the time, and when they do generate work, they tend
122     // to generate enough of it to feed a decent draining cycle. Therefore, pause times are lowest if
123     // we get the heck out of here as soon as a constraint generates work. I think that part of what
124     // makes this optimal is that we also never abort running a constraint early, so when we do run
125     // one, it has an opportunity to generate as much work as it possibly can.
126     if (order[index]->quickWorkEstimate(m_mainVisitor) > 0.) {
127         execute(*order[index++]);
128         
129         if (m_toExecuteInParallel.isEmpty()
130             && (order.isEmpty() || didVisitSomething()))
131             return;
132     }
133     
134     auto pickNext = scopedLambda<std::optional<unsigned>()>(
135         [&] () -> std::optional<unsigned> {
136             if (didVisitSomething())
137                 return std::nullopt;
138             
139             if (index >= order.size())
140                 return std::nullopt;
141             
142             MarkingConstraint& constraint = *order[index++];
143             return constraint.index();
144         });
145     
146     execute(ParallelWorkFirst, pickNext);
147 }
148
149 void MarkingConstraintSolver::execute(MarkingConstraint& constraint)
150 {
151     if (m_executed.get(constraint.index()))
152         return;
153     
154     constraint.prepareToExecute(NoLockingNecessary, m_mainVisitor);
155     ConstraintParallelism parallelism = constraint.execute(m_mainVisitor);
156     didExecute(parallelism, constraint.index());
157 }
158
159 void MarkingConstraintSolver::runExecutionThread(SlotVisitor& visitor, SchedulerPreference preference, ScopedLambda<std::optional<unsigned>()> pickNext)
160 {
161     for (;;) {
162         bool doParallelWorkMode;
163         unsigned indexToRun;
164         {
165             auto locker = holdLock(m_lock);
166                         
167             for (;;) {
168                 auto tryParallelWork = [&] () -> bool {
169                     if (m_toExecuteInParallel.isEmpty())
170                         return false;
171                     
172                     indexToRun = m_toExecuteInParallel.first();
173                     doParallelWorkMode = true;
174                     return true;
175                 };
176                             
177                 auto tryNextConstraint = [&] () -> bool {
178                     if (!m_pickNextIsStillActive)
179                         return false;
180                     
181                     for (;;) {
182                         std::optional<unsigned> pickResult = pickNext();
183                         if (!pickResult) {
184                             m_pickNextIsStillActive = false;
185                             return false;
186                         }
187                         
188                         if (m_executed.get(*pickResult))
189                             continue;
190                                     
191                         MarkingConstraint& constraint = *m_set.m_set[*pickResult];
192                         if (constraint.concurrency() == ConstraintConcurrency::Sequential) {
193                             m_toExecuteSequentially.append(*pickResult);
194                             continue;
195                         }
196                         if (constraint.parallelism() == ConstraintParallelism::Parallel)
197                             m_numThreadsThatMayProduceWork++;
198                         indexToRun = *pickResult;
199                         doParallelWorkMode = false;
200                         constraint.prepareToExecute(locker, visitor);
201                         return true;
202                     }
203                 };
204                 
205                 if (preference == ParallelWorkFirst) {
206                     if (tryParallelWork() || tryNextConstraint())
207                         break;
208                 } else {
209                     if (tryNextConstraint() || tryParallelWork())
210                         break;
211                 }
212                 
213                 // This means that we have nothing left to run. The only way for us to have more work is
214                 // if someone is running a constraint that may produce parallel work.
215                 
216                 if (!m_numThreadsThatMayProduceWork)
217                     return;
218                 
219                 // FIXME: Any waiting could be replaced with just running the SlotVisitor.
220                 // I wonder if that would be profitable.
221                 m_condition.wait(m_lock);
222             }
223         }
224                     
225         ConstraintParallelism parallelism = ConstraintParallelism::Sequential;
226                     
227         MarkingConstraint& constraint = *m_set.m_set[indexToRun];
228                     
229         if (doParallelWorkMode)
230             constraint.doParallelWork(visitor);
231         else
232             parallelism = constraint.execute(visitor);
233                     
234         {
235             auto locker = holdLock(m_lock);
236                         
237             if (doParallelWorkMode) {
238                 if (m_toExecuteInParallelSet.get(indexToRun)) {
239                     m_didExecuteInParallel.append(indexToRun);
240                                 
241                     m_toExecuteInParallel.takeFirst(
242                         [&] (unsigned value) { return value == indexToRun; });
243                     m_toExecuteInParallelSet.clear(indexToRun);
244                 }
245             } else {
246                 if (constraint.parallelism() == ConstraintParallelism::Parallel)
247                     m_numThreadsThatMayProduceWork--;
248                 m_executed.set(indexToRun);
249                 if (parallelism == ConstraintParallelism::Parallel) {
250                     m_toExecuteInParallel.append(indexToRun);
251                     m_toExecuteInParallelSet.set(indexToRun);
252                 }
253             }
254                         
255             m_condition.notifyAll();
256         }
257     }
258 }
259
260 void MarkingConstraintSolver::didExecute(ConstraintParallelism parallelism, unsigned index)
261 {
262     m_executed.set(index);
263     if (parallelism == ConstraintParallelism::Parallel) {
264         m_toExecuteInParallel.append(index);
265         m_toExecuteInParallelSet.set(index);
266     }
267 }
268
269 } // namespace JSC
270