Something tiny left out of the last patch.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGWorklist.cpp
1 /*
2  * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGWorklist.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DeferGC.h"
33 #include "DFGLongLivedState.h"
34 #include "DFGSafepoint.h"
35 #include "JSCInlines.h"
36 #include <mutex>
37
38 namespace JSC { namespace DFG {
39
40 Worklist::Worklist(CString worklistName)
41     : m_threadName(toCString(worklistName, " Worker Thread"))
42     , m_numberOfActiveThreads(0)
43 {
44 }
45
46 Worklist::~Worklist()
47 {
48     {
49         LockHolder locker(m_lock);
50         for (unsigned i = m_threads.size(); i--;)
51             m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
52         m_planEnqueued.notifyAll();
53     }
54     for (unsigned i = m_threads.size(); i--;)
55         waitForThreadCompletion(m_threads[i]->m_identifier);
56     ASSERT(!m_numberOfActiveThreads);
57 }
58
59 void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
60 {
61     RELEASE_ASSERT(numberOfThreads);
62     for (unsigned i = numberOfThreads; i--;) {
63         std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
64         data->m_identifier = createThread(threadFunction, data.get(), m_threadName.data());
65         if (relativePriority)
66             changeThreadPriority(data->m_identifier, relativePriority);
67         m_threads.append(WTFMove(data));
68     }
69 }
70
71 Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
72 {
73     Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
74     result->finishCreation(numberOfThreads, relativePriority);
75     return result;
76 }
77
78 bool Worklist::isActiveForVM(VM& vm) const
79 {
80     LockHolder locker(m_lock);
81     PlanMap::const_iterator end = m_plans.end();
82     for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
83         if (&iter->value->vm == &vm)
84             return true;
85     }
86     return false;
87 }
88
89 void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
90 {
91     RefPtr<Plan> plan = passedPlan;
92     LockHolder locker(m_lock);
93     if (Options::verboseCompilationQueue()) {
94         dump(locker, WTF::dataFile());
95         dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
96     }
97     ASSERT(m_plans.find(plan->key()) == m_plans.end());
98     m_plans.add(plan->key(), plan);
99     m_queue.append(plan);
100     m_planEnqueued.notifyOne();
101 }
102
103 Worklist::State Worklist::compilationState(CompilationKey key)
104 {
105     LockHolder locker(m_lock);
106     PlanMap::iterator iter = m_plans.find(key);
107     if (iter == m_plans.end())
108         return NotKnown;
109     return iter->value->stage == Plan::Ready ? Compiled : Compiling;
110 }
111
112 void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
113 {
114     DeferGC deferGC(vm.heap);
115     // Wait for all of the plans for the given VM to complete. The idea here
116     // is that we want all of the caller VM's plans to be done. We don't care
117     // about any other VM's plans, and we won't attempt to wait on those.
118     // After we release this lock, we know that although other VMs may still
119     // be adding plans, our VM will not be.
120     
121     LockHolder locker(m_lock);
122     
123     if (Options::verboseCompilationQueue()) {
124         dump(locker, WTF::dataFile());
125         dataLog(": Waiting for all in VM to complete.\n");
126     }
127     
128     for (;;) {
129         bool allAreCompiled = true;
130         PlanMap::iterator end = m_plans.end();
131         for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
132             if (&iter->value->vm != &vm)
133                 continue;
134             if (iter->value->stage != Plan::Ready) {
135                 allAreCompiled = false;
136                 break;
137             }
138         }
139         
140         if (allAreCompiled)
141             break;
142         
143         m_planCompiled.wait(m_lock);
144     }
145 }
146
147 void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
148 {
149     DeferGC deferGC(vm.heap);
150     LockHolder locker(m_lock);
151     for (size_t i = 0; i < m_readyPlans.size(); ++i) {
152         RefPtr<Plan> plan = m_readyPlans[i];
153         if (&plan->vm != &vm)
154             continue;
155         if (plan->stage != Plan::Ready)
156             continue;
157         myReadyPlans.append(plan);
158         m_readyPlans[i--] = m_readyPlans.last();
159         m_readyPlans.removeLast();
160         m_plans.remove(plan->key());
161     }
162 }
163
164 void Worklist::removeAllReadyPlansForVM(VM& vm)
165 {
166     Vector<RefPtr<Plan>, 8> myReadyPlans;
167     removeAllReadyPlansForVM(vm, myReadyPlans);
168 }
169
170 Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey)
171 {
172     DeferGC deferGC(vm.heap);
173     Vector<RefPtr<Plan>, 8> myReadyPlans;
174     
175     removeAllReadyPlansForVM(vm, myReadyPlans);
176     
177     State resultingState = NotKnown;
178
179     while (!myReadyPlans.isEmpty()) {
180         RefPtr<Plan> plan = myReadyPlans.takeLast();
181         CompilationKey currentKey = plan->key();
182         
183         if (Options::verboseCompilationQueue())
184             dataLog(*this, ": Completing ", currentKey, "\n");
185         
186         RELEASE_ASSERT(plan->stage == Plan::Ready);
187         
188         plan->finalizeAndNotifyCallback();
189         
190         if (currentKey == requestedKey)
191             resultingState = Compiled;
192     }
193     
194     if (!!requestedKey && resultingState == NotKnown) {
195         LockHolder locker(m_lock);
196         if (m_plans.contains(requestedKey))
197             resultingState = Compiling;
198     }
199     
200     return resultingState;
201 }
202
203 void Worklist::completeAllPlansForVM(VM& vm)
204 {
205     DeferGC deferGC(vm.heap);
206     waitUntilAllPlansForVMAreReady(vm);
207     completeAllReadyPlansForVM(vm);
208 }
209
210 void Worklist::rememberCodeBlocks(VM& vm)
211 {
212     LockHolder locker(m_lock);
213     for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
214         Plan* plan = iter->value.get();
215         if (&plan->vm != &vm)
216             continue;
217         plan->rememberCodeBlocks();
218     }
219 }
220
221 void Worklist::suspendAllThreads()
222 {
223     m_suspensionLock.lock();
224     for (unsigned i = m_threads.size(); i--;)
225         m_threads[i]->m_rightToRun.lock();
226 }
227
228 void Worklist::resumeAllThreads()
229 {
230     for (unsigned i = m_threads.size(); i--;)
231         m_threads[i]->m_rightToRun.unlock();
232     m_suspensionLock.unlock();
233 }
234
235 void Worklist::visitWeakReferences(SlotVisitor& visitor)
236 {
237     VM* vm = visitor.heap()->vm();
238     {
239         LockHolder locker(m_lock);
240         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
241             Plan* plan = iter->value.get();
242             if (&plan->vm != vm)
243                 continue;
244             plan->checkLivenessAndVisitChildren(visitor);
245         }
246     }
247     // This loop doesn't need locking because:
248     // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
249     // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
250     //     holding here because of a prior call to suspendAllThreads().
251     for (unsigned i = m_threads.size(); i--;) {
252         ThreadData* data = m_threads[i].get();
253         Safepoint* safepoint = data->m_safepoint;
254         if (safepoint && &safepoint->vm() == vm)
255             safepoint->checkLivenessAndVisitChildren(visitor);
256     }
257 }
258
259 void Worklist::removeDeadPlans(VM& vm)
260 {
261     {
262         LockHolder locker(m_lock);
263         HashSet<CompilationKey> deadPlanKeys;
264         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
265             Plan* plan = iter->value.get();
266             if (&plan->vm != &vm)
267                 continue;
268             if (plan->isKnownToBeLiveDuringGC())
269                 continue;
270             RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
271             ASSERT(!deadPlanKeys.contains(plan->key()));
272             deadPlanKeys.add(plan->key());
273         }
274         if (!deadPlanKeys.isEmpty()) {
275             for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
276                 m_plans.take(*iter)->cancel();
277             Deque<RefPtr<Plan>> newQueue;
278             while (!m_queue.isEmpty()) {
279                 RefPtr<Plan> plan = m_queue.takeFirst();
280                 if (plan->stage != Plan::Cancelled)
281                     newQueue.append(plan);
282             }
283             m_queue.swap(newQueue);
284             for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
285                 if (m_readyPlans[i]->stage != Plan::Cancelled)
286                     continue;
287                 m_readyPlans[i] = m_readyPlans.last();
288                 m_readyPlans.removeLast();
289             }
290         }
291     }
292     
293     // No locking needed for this part, see comment in visitWeakReferences().
294     for (unsigned i = m_threads.size(); i--;) {
295         ThreadData* data = m_threads[i].get();
296         Safepoint* safepoint = data->m_safepoint;
297         if (!safepoint)
298             continue;
299         if (&safepoint->vm() != &vm)
300             continue;
301         if (safepoint->isKnownToBeLiveDuringGC())
302             continue;
303         safepoint->cancel();
304     }
305 }
306
307 size_t Worklist::queueLength()
308 {
309     LockHolder locker(m_lock);
310     return m_queue.size();
311 }
312
313 void Worklist::dump(PrintStream& out) const
314 {
315     LockHolder locker(m_lock);
316     dump(locker, out);
317 }
318
319 void Worklist::dump(const LockHolder&, PrintStream& out) const
320 {
321     out.print(
322         "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
323         ", Map Size = ", m_plans.size(), ", Num Ready = ", m_readyPlans.size(),
324         ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
325 }
326
327 void Worklist::runThread(ThreadData* data)
328 {
329     CompilationScope compilationScope;
330     
331     if (Options::verboseCompilationQueue())
332         dataLog(*this, ": Thread started\n");
333     
334     LongLivedState longLivedState;
335     
336     for (;;) {
337         RefPtr<Plan> plan;
338         {
339             LockHolder locker(m_lock);
340             while (m_queue.isEmpty())
341                 m_planEnqueued.wait(m_lock);
342             
343             plan = m_queue.takeFirst();
344             if (plan)
345                 m_numberOfActiveThreads++;
346         }
347         
348         if (!plan) {
349             if (Options::verboseCompilationQueue())
350                 dataLog(*this, ": Thread shutting down\n");
351             return;
352         }
353         
354         {
355             LockHolder locker(data->m_rightToRun);
356             {
357                 LockHolder locker(m_lock);
358                 if (plan->stage == Plan::Cancelled) {
359                     m_numberOfActiveThreads--;
360                     continue;
361                 }
362                 plan->notifyCompiling();
363             }
364         
365             if (Options::verboseCompilationQueue())
366                 dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n");
367         
368             RELEASE_ASSERT(!plan->vm.heap.isCollecting());
369             plan->compileInThread(longLivedState, data);
370             RELEASE_ASSERT(plan->stage == Plan::Cancelled || !plan->vm.heap.isCollecting());
371             
372             {
373                 LockHolder locker(m_lock);
374                 if (plan->stage == Plan::Cancelled) {
375                     m_numberOfActiveThreads--;
376                     continue;
377                 }
378                 plan->notifyCompiled();
379             }
380             RELEASE_ASSERT(!plan->vm.heap.isCollecting());
381         }
382
383         {
384             LockHolder locker(m_lock);
385             
386             // We could have been cancelled between releasing rightToRun and acquiring m_lock.
387             // This would mean that we might be in the middle of GC right now.
388             if (plan->stage == Plan::Cancelled) {
389                 m_numberOfActiveThreads--;
390                 continue;
391             }
392             
393             plan->notifyReady();
394             
395             if (Options::verboseCompilationQueue()) {
396                 dump(locker, WTF::dataFile());
397                 dataLog(": Compiled ", plan->key(), " asynchronously\n");
398             }
399             
400             m_readyPlans.append(plan);
401             
402             m_planCompiled.notifyAll();
403             m_numberOfActiveThreads--;
404         }
405     }
406 }
407
408 void Worklist::threadFunction(void* argument)
409 {
410     ThreadData* data = static_cast<ThreadData*>(argument);
411     data->m_worklist->runThread(data);
412 }
413
414 static Worklist* theGlobalDFGWorklist;
415
416 Worklist* ensureGlobalDFGWorklist()
417 {
418     static std::once_flag initializeGlobalWorklistOnceFlag;
419     std::call_once(initializeGlobalWorklistOnceFlag, [] {
420         theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
421     });
422     return theGlobalDFGWorklist;
423 }
424
425 Worklist* existingGlobalDFGWorklistOrNull()
426 {
427     return theGlobalDFGWorklist;
428 }
429
430 static Worklist* theGlobalFTLWorklist;
431
432 Worklist* ensureGlobalFTLWorklist()
433 {
434     static std::once_flag initializeGlobalWorklistOnceFlag;
435     std::call_once(initializeGlobalWorklistOnceFlag, [] {
436         theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
437     });
438     return theGlobalFTLWorklist;
439 }
440
441 Worklist* existingGlobalFTLWorklistOrNull()
442 {
443     return theGlobalFTLWorklist;
444 }
445
446 Worklist* ensureGlobalWorklistFor(CompilationMode mode)
447 {
448     switch (mode) {
449     case InvalidCompilationMode:
450         RELEASE_ASSERT_NOT_REACHED();
451         return 0;
452     case DFGMode:
453         return ensureGlobalDFGWorklist();
454     case FTLMode:
455     case FTLForOSREntryMode:
456         return ensureGlobalFTLWorklist();
457     }
458     RELEASE_ASSERT_NOT_REACHED();
459     return 0;
460 }
461
462 void completeAllPlansForVM(VM& vm)
463 {
464     for (unsigned i = DFG::numberOfWorklists(); i--;) {
465         if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i))
466             worklist->completeAllPlansForVM(vm);
467     }
468 }
469
470 void rememberCodeBlocks(VM& vm)
471 {
472     for (unsigned i = DFG::numberOfWorklists(); i--;) {
473         if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i))
474             worklist->rememberCodeBlocks(vm);
475     }
476 }
477
478 } } // namespace JSC::DFG
479
480 #endif // ENABLE(DFG_JIT)
481