2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGWorklist.h"
31 #include "CodeBlock.h"
33 #include "DFGLongLivedState.h"
34 #include "DFGSafepoint.h"
35 #include "JSCInlines.h"
38 namespace JSC { namespace DFG {
40 Worklist::Worklist(CString worklistName)
41 : m_threadName(toCString(worklistName, " Worker Thread"))
42 , m_numberOfActiveThreads(0)
49 LockHolder locker(m_lock);
50 for (unsigned i = m_threads.size(); i--;)
51 m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
52 m_planEnqueued.notifyAll();
54 for (unsigned i = m_threads.size(); i--;)
55 waitForThreadCompletion(m_threads[i]->m_identifier);
56 ASSERT(!m_numberOfActiveThreads);
59 void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
61 RELEASE_ASSERT(numberOfThreads);
62 for (unsigned i = numberOfThreads; i--;) {
63 std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
64 data->m_identifier = createThread(threadFunction, data.get(), m_threadName.data());
66 changeThreadPriority(data->m_identifier, relativePriority);
67 m_threads.append(WTFMove(data));
71 Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
73 Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
74 result->finishCreation(numberOfThreads, relativePriority);
78 bool Worklist::isActiveForVM(VM& vm) const
80 LockHolder locker(m_lock);
81 PlanMap::const_iterator end = m_plans.end();
82 for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
83 if (&iter->value->vm == &vm)
89 void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
91 RefPtr<Plan> plan = passedPlan;
92 LockHolder locker(m_lock);
93 if (Options::verboseCompilationQueue()) {
94 dump(locker, WTF::dataFile());
95 dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
97 ASSERT(m_plans.find(plan->key()) == m_plans.end());
98 m_plans.add(plan->key(), plan);
100 m_planEnqueued.notifyOne();
103 Worklist::State Worklist::compilationState(CompilationKey key)
105 LockHolder locker(m_lock);
106 PlanMap::iterator iter = m_plans.find(key);
107 if (iter == m_plans.end())
109 return iter->value->stage == Plan::Ready ? Compiled : Compiling;
112 void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
114 DeferGC deferGC(vm.heap);
115 // Wait for all of the plans for the given VM to complete. The idea here
116 // is that we want all of the caller VM's plans to be done. We don't care
117 // about any other VM's plans, and we won't attempt to wait on those.
118 // After we release this lock, we know that although other VMs may still
119 // be adding plans, our VM will not be.
121 LockHolder locker(m_lock);
123 if (Options::verboseCompilationQueue()) {
124 dump(locker, WTF::dataFile());
125 dataLog(": Waiting for all in VM to complete.\n");
129 bool allAreCompiled = true;
130 PlanMap::iterator end = m_plans.end();
131 for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
132 if (&iter->value->vm != &vm)
134 if (iter->value->stage != Plan::Ready) {
135 allAreCompiled = false;
143 m_planCompiled.wait(m_lock);
147 void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
149 DeferGC deferGC(vm.heap);
150 LockHolder locker(m_lock);
151 for (size_t i = 0; i < m_readyPlans.size(); ++i) {
152 RefPtr<Plan> plan = m_readyPlans[i];
153 if (&plan->vm != &vm)
155 if (plan->stage != Plan::Ready)
157 myReadyPlans.append(plan);
158 m_readyPlans[i--] = m_readyPlans.last();
159 m_readyPlans.removeLast();
160 m_plans.remove(plan->key());
164 void Worklist::removeAllReadyPlansForVM(VM& vm)
166 Vector<RefPtr<Plan>, 8> myReadyPlans;
167 removeAllReadyPlansForVM(vm, myReadyPlans);
170 Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey)
172 DeferGC deferGC(vm.heap);
173 Vector<RefPtr<Plan>, 8> myReadyPlans;
175 removeAllReadyPlansForVM(vm, myReadyPlans);
177 State resultingState = NotKnown;
179 while (!myReadyPlans.isEmpty()) {
180 RefPtr<Plan> plan = myReadyPlans.takeLast();
181 CompilationKey currentKey = plan->key();
183 if (Options::verboseCompilationQueue())
184 dataLog(*this, ": Completing ", currentKey, "\n");
186 RELEASE_ASSERT(plan->stage == Plan::Ready);
188 plan->finalizeAndNotifyCallback();
190 if (currentKey == requestedKey)
191 resultingState = Compiled;
194 if (!!requestedKey && resultingState == NotKnown) {
195 LockHolder locker(m_lock);
196 if (m_plans.contains(requestedKey))
197 resultingState = Compiling;
200 return resultingState;
203 void Worklist::completeAllPlansForVM(VM& vm)
205 DeferGC deferGC(vm.heap);
206 waitUntilAllPlansForVMAreReady(vm);
207 completeAllReadyPlansForVM(vm);
210 void Worklist::rememberCodeBlocks(VM& vm)
212 LockHolder locker(m_lock);
213 for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
214 Plan* plan = iter->value.get();
215 if (&plan->vm != &vm)
217 plan->rememberCodeBlocks();
221 void Worklist::suspendAllThreads()
223 m_suspensionLock.lock();
224 for (unsigned i = m_threads.size(); i--;)
225 m_threads[i]->m_rightToRun.lock();
228 void Worklist::resumeAllThreads()
230 for (unsigned i = m_threads.size(); i--;)
231 m_threads[i]->m_rightToRun.unlock();
232 m_suspensionLock.unlock();
235 void Worklist::visitWeakReferences(SlotVisitor& visitor)
237 VM* vm = visitor.heap()->vm();
239 LockHolder locker(m_lock);
240 for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
241 Plan* plan = iter->value.get();
244 plan->checkLivenessAndVisitChildren(visitor);
247 // This loop doesn't need locking because:
248 // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
249 // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
250 // holding here because of a prior call to suspendAllThreads().
251 for (unsigned i = m_threads.size(); i--;) {
252 ThreadData* data = m_threads[i].get();
253 Safepoint* safepoint = data->m_safepoint;
254 if (safepoint && &safepoint->vm() == vm)
255 safepoint->checkLivenessAndVisitChildren(visitor);
259 void Worklist::removeDeadPlans(VM& vm)
262 LockHolder locker(m_lock);
263 HashSet<CompilationKey> deadPlanKeys;
264 for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
265 Plan* plan = iter->value.get();
266 if (&plan->vm != &vm)
268 if (plan->isKnownToBeLiveDuringGC())
270 RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
271 ASSERT(!deadPlanKeys.contains(plan->key()));
272 deadPlanKeys.add(plan->key());
274 if (!deadPlanKeys.isEmpty()) {
275 for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
276 m_plans.take(*iter)->cancel();
277 Deque<RefPtr<Plan>> newQueue;
278 while (!m_queue.isEmpty()) {
279 RefPtr<Plan> plan = m_queue.takeFirst();
280 if (plan->stage != Plan::Cancelled)
281 newQueue.append(plan);
283 m_queue.swap(newQueue);
284 for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
285 if (m_readyPlans[i]->stage != Plan::Cancelled)
287 m_readyPlans[i] = m_readyPlans.last();
288 m_readyPlans.removeLast();
293 // No locking needed for this part, see comment in visitWeakReferences().
294 for (unsigned i = m_threads.size(); i--;) {
295 ThreadData* data = m_threads[i].get();
296 Safepoint* safepoint = data->m_safepoint;
299 if (&safepoint->vm() != &vm)
301 if (safepoint->isKnownToBeLiveDuringGC())
307 size_t Worklist::queueLength()
309 LockHolder locker(m_lock);
310 return m_queue.size();
313 void Worklist::dump(PrintStream& out) const
315 LockHolder locker(m_lock);
319 void Worklist::dump(const LockHolder&, PrintStream& out) const
322 "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
323 ", Map Size = ", m_plans.size(), ", Num Ready = ", m_readyPlans.size(),
324 ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
327 void Worklist::runThread(ThreadData* data)
329 CompilationScope compilationScope;
331 if (Options::verboseCompilationQueue())
332 dataLog(*this, ": Thread started\n");
334 LongLivedState longLivedState;
339 LockHolder locker(m_lock);
340 while (m_queue.isEmpty())
341 m_planEnqueued.wait(m_lock);
343 plan = m_queue.takeFirst();
345 m_numberOfActiveThreads++;
349 if (Options::verboseCompilationQueue())
350 dataLog(*this, ": Thread shutting down\n");
355 LockHolder locker(data->m_rightToRun);
357 LockHolder locker(m_lock);
358 if (plan->stage == Plan::Cancelled) {
359 m_numberOfActiveThreads--;
362 plan->notifyCompiling();
365 if (Options::verboseCompilationQueue())
366 dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n");
368 RELEASE_ASSERT(!plan->vm.heap.isCollecting());
369 plan->compileInThread(longLivedState, data);
370 RELEASE_ASSERT(plan->stage == Plan::Cancelled || !plan->vm.heap.isCollecting());
373 LockHolder locker(m_lock);
374 if (plan->stage == Plan::Cancelled) {
375 m_numberOfActiveThreads--;
378 plan->notifyCompiled();
380 RELEASE_ASSERT(!plan->vm.heap.isCollecting());
384 LockHolder locker(m_lock);
386 // We could have been cancelled between releasing rightToRun and acquiring m_lock.
387 // This would mean that we might be in the middle of GC right now.
388 if (plan->stage == Plan::Cancelled) {
389 m_numberOfActiveThreads--;
395 if (Options::verboseCompilationQueue()) {
396 dump(locker, WTF::dataFile());
397 dataLog(": Compiled ", plan->key(), " asynchronously\n");
400 m_readyPlans.append(plan);
402 m_planCompiled.notifyAll();
403 m_numberOfActiveThreads--;
408 void Worklist::threadFunction(void* argument)
410 ThreadData* data = static_cast<ThreadData*>(argument);
411 data->m_worklist->runThread(data);
414 static Worklist* theGlobalDFGWorklist;
416 Worklist* ensureGlobalDFGWorklist()
418 static std::once_flag initializeGlobalWorklistOnceFlag;
419 std::call_once(initializeGlobalWorklistOnceFlag, [] {
420 theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
422 return theGlobalDFGWorklist;
425 Worklist* existingGlobalDFGWorklistOrNull()
427 return theGlobalDFGWorklist;
430 static Worklist* theGlobalFTLWorklist;
432 Worklist* ensureGlobalFTLWorklist()
434 static std::once_flag initializeGlobalWorklistOnceFlag;
435 std::call_once(initializeGlobalWorklistOnceFlag, [] {
436 theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
438 return theGlobalFTLWorklist;
441 Worklist* existingGlobalFTLWorklistOrNull()
443 return theGlobalFTLWorklist;
446 Worklist* ensureGlobalWorklistFor(CompilationMode mode)
449 case InvalidCompilationMode:
450 RELEASE_ASSERT_NOT_REACHED();
453 return ensureGlobalDFGWorklist();
455 case FTLForOSREntryMode:
456 return ensureGlobalFTLWorklist();
458 RELEASE_ASSERT_NOT_REACHED();
462 void completeAllPlansForVM(VM& vm)
464 for (unsigned i = DFG::numberOfWorklists(); i--;) {
465 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i))
466 worklist->completeAllPlansForVM(vm);
470 void rememberCodeBlocks(VM& vm)
472 for (unsigned i = DFG::numberOfWorklists(); i--;) {
473 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i))
474 worklist->rememberCodeBlocks(vm);
478 } } // namespace JSC::DFG
480 #endif // ENABLE(DFG_JIT)