GC constraint solving should be parallel
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGWorklist.cpp
1 /*
2  * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGWorklist.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSafepoint.h"
31 #include "DeferGC.h"
32 #include "JSCInlines.h"
33 #include "ReleaseHeapAccessScope.h"
34 #include <mutex>
35
36 namespace JSC { namespace DFG {
37
38 #if ENABLE(DFG_JIT)
39
40 class Worklist::ThreadBody : public AutomaticThread {
41 public:
42     ThreadBody(const AbstractLocker& locker, Worklist& worklist, ThreadData& data, Box<Lock> lock, RefPtr<AutomaticThreadCondition> condition, int relativePriority)
43         : AutomaticThread(locker, lock, condition)
44         , m_worklist(worklist)
45         , m_data(data)
46         , m_relativePriority(relativePriority)
47     {
48     }
49     
50 protected:
51     PollResult poll(const AbstractLocker& locker) override
52     {
53         if (m_worklist.m_queue.isEmpty())
54             return PollResult::Wait;
55         
56         m_plan = m_worklist.m_queue.takeFirst();
57         if (!m_plan) {
58             if (Options::verboseCompilationQueue()) {
59                 m_worklist.dump(locker, WTF::dataFile());
60                 dataLog(": Thread shutting down\n");
61             }
62             return PollResult::Stop;
63         }
64         RELEASE_ASSERT(m_plan->stage == Plan::Preparing);
65         m_worklist.m_numberOfActiveThreads++;
66         return PollResult::Work;
67     }
68     
69     class WorkScope;
70     friend class WorkScope;
71     class WorkScope {
72     public:
73         WorkScope(ThreadBody& thread)
74             : m_thread(thread)
75         {
76             RELEASE_ASSERT(m_thread.m_plan);
77             RELEASE_ASSERT(m_thread.m_worklist.m_numberOfActiveThreads);
78         }
79         
80         ~WorkScope()
81         {
82             LockHolder locker(*m_thread.m_worklist.m_lock);
83             m_thread.m_plan = nullptr;
84             m_thread.m_worklist.m_numberOfActiveThreads--;
85         }
86         
87     private:
88         ThreadBody& m_thread;
89     };
90     
91     WorkResult work() override
92     {
93         WorkScope workScope(*this);
94         
95         LockHolder locker(m_data.m_rightToRun);
96         {
97             LockHolder locker(*m_worklist.m_lock);
98             if (m_plan->stage == Plan::Cancelled)
99                 return WorkResult::Continue;
100             m_plan->notifyCompiling();
101         }
102         
103         if (Options::verboseCompilationQueue())
104             dataLog(m_worklist, ": Compiling ", m_plan->key(), " asynchronously\n");
105         
106         // There's no way for the GC to be safepointing since we own rightToRun.
107         if (m_plan->vm->heap.worldIsStopped()) {
108             dataLog("Heap is stoped but here we are! (1)\n");
109             RELEASE_ASSERT_NOT_REACHED();
110         }
111         m_plan->compileInThread(&m_data);
112         if (m_plan->stage != Plan::Cancelled) {
113             if (m_plan->vm->heap.worldIsStopped()) {
114                 dataLog("Heap is stopped but here we are! (2)\n");
115                 RELEASE_ASSERT_NOT_REACHED();
116             }
117         }
118         
119         {
120             LockHolder locker(*m_worklist.m_lock);
121             if (m_plan->stage == Plan::Cancelled)
122                 return WorkResult::Continue;
123             
124             m_plan->notifyReady();
125             
126             if (Options::verboseCompilationQueue()) {
127                 m_worklist.dump(locker, WTF::dataFile());
128                 dataLog(": Compiled ", m_plan->key(), " asynchronously\n");
129             }
130             
131             m_worklist.m_readyPlans.append(m_plan);
132             
133             RELEASE_ASSERT(!m_plan->vm->heap.worldIsStopped());
134             m_worklist.m_planCompiled.notifyAll();
135         }
136         
137         return WorkResult::Continue;
138     }
139     
140     void threadDidStart() override
141     {
142         if (Options::verboseCompilationQueue())
143             dataLog(m_worklist, ": Thread started\n");
144         
145         if (m_relativePriority)
146             Thread::current().changePriority(m_relativePriority);
147         
148         m_compilationScope = std::make_unique<CompilationScope>();
149     }
150     
151     void threadIsStopping(const AbstractLocker&) override
152     {
153         // We're holding the Worklist::m_lock, so we should be careful not to deadlock.
154         
155         if (Options::verboseCompilationQueue())
156             dataLog(m_worklist, ": Thread will stop\n");
157         
158         ASSERT(!m_plan);
159         
160         m_compilationScope = nullptr;
161         m_plan = nullptr;
162     }
163     
164 private:
165     Worklist& m_worklist;
166     ThreadData& m_data;
167     int m_relativePriority;
168     std::unique_ptr<CompilationScope> m_compilationScope;
169     RefPtr<Plan> m_plan;
170 };
171
172 Worklist::Worklist(CString worklistName)
173     : m_threadName(toCString(worklistName, " Worker Thread"))
174     , m_lock(Box<Lock>::create())
175     , m_planEnqueued(AutomaticThreadCondition::create())
176     , m_numberOfActiveThreads(0)
177 {
178 }
179
180 Worklist::~Worklist()
181 {
182     {
183         LockHolder locker(*m_lock);
184         for (unsigned i = m_threads.size(); i--;)
185             m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
186         m_planEnqueued->notifyAll(locker);
187     }
188     for (unsigned i = m_threads.size(); i--;)
189         m_threads[i]->m_thread->join();
190     ASSERT(!m_numberOfActiveThreads);
191 }
192
193 void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
194 {
195     RELEASE_ASSERT(numberOfThreads);
196     LockHolder locker(*m_lock);
197     for (unsigned i = numberOfThreads; i--;) {
198         std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
199         data->m_thread = adoptRef(new ThreadBody(locker, *this, *data, m_lock, m_planEnqueued, relativePriority));
200         m_threads.append(WTFMove(data));
201     }
202 }
203
204 Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
205 {
206     Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
207     result->finishCreation(numberOfThreads, relativePriority);
208     return result;
209 }
210
211 bool Worklist::isActiveForVM(VM& vm) const
212 {
213     LockHolder locker(*m_lock);
214     PlanMap::const_iterator end = m_plans.end();
215     for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
216         if (iter->value->vm == &vm)
217             return true;
218     }
219     return false;
220 }
221
222 void Worklist::enqueue(Ref<Plan>&& plan)
223 {
224     LockHolder locker(*m_lock);
225     if (Options::verboseCompilationQueue()) {
226         dump(locker, WTF::dataFile());
227         dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
228     }
229     ASSERT(m_plans.find(plan->key()) == m_plans.end());
230     m_plans.add(plan->key(), plan.copyRef());
231     m_queue.append(WTFMove(plan));
232     m_planEnqueued->notifyOne(locker);
233 }
234
235 Worklist::State Worklist::compilationState(CompilationKey key)
236 {
237     LockHolder locker(*m_lock);
238     PlanMap::iterator iter = m_plans.find(key);
239     if (iter == m_plans.end())
240         return NotKnown;
241     return iter->value->stage == Plan::Ready ? Compiled : Compiling;
242 }
243
244 void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
245 {
246     DeferGC deferGC(vm.heap);
247     
248     // While we are waiting for the compiler to finish, the collector might have already suspended
249     // the compiler and then it will be waiting for us to stop. That's a deadlock. We avoid that
250     // deadlock by relinquishing our heap access, so that the collector pretends that we are stopped
251     // even if we aren't.
252     ReleaseHeapAccessScope releaseHeapAccessScope(vm.heap);
253     
254     // Wait for all of the plans for the given VM to complete. The idea here
255     // is that we want all of the caller VM's plans to be done. We don't care
256     // about any other VM's plans, and we won't attempt to wait on those.
257     // After we release this lock, we know that although other VMs may still
258     // be adding plans, our VM will not be.
259     
260     LockHolder locker(*m_lock);
261     
262     if (Options::verboseCompilationQueue()) {
263         dump(locker, WTF::dataFile());
264         dataLog(": Waiting for all in VM to complete.\n");
265     }
266     
267     for (;;) {
268         bool allAreCompiled = true;
269         PlanMap::iterator end = m_plans.end();
270         for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
271             if (iter->value->vm != &vm)
272                 continue;
273             if (iter->value->stage != Plan::Ready) {
274                 allAreCompiled = false;
275                 break;
276             }
277         }
278         
279         if (allAreCompiled)
280             break;
281         
282         m_planCompiled.wait(*m_lock);
283     }
284 }
285
286 void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
287 {
288     DeferGC deferGC(vm.heap);
289     LockHolder locker(*m_lock);
290     for (size_t i = 0; i < m_readyPlans.size(); ++i) {
291         RefPtr<Plan> plan = m_readyPlans[i];
292         if (plan->vm != &vm)
293             continue;
294         if (plan->stage != Plan::Ready)
295             continue;
296         myReadyPlans.append(plan);
297         m_readyPlans[i--] = m_readyPlans.last();
298         m_readyPlans.removeLast();
299         m_plans.remove(plan->key());
300     }
301 }
302
303 void Worklist::removeAllReadyPlansForVM(VM& vm)
304 {
305     Vector<RefPtr<Plan>, 8> myReadyPlans;
306     removeAllReadyPlansForVM(vm, myReadyPlans);
307 }
308
309 Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey)
310 {
311     DeferGC deferGC(vm.heap);
312     Vector<RefPtr<Plan>, 8> myReadyPlans;
313     
314     removeAllReadyPlansForVM(vm, myReadyPlans);
315     
316     State resultingState = NotKnown;
317
318     while (!myReadyPlans.isEmpty()) {
319         RefPtr<Plan> plan = myReadyPlans.takeLast();
320         CompilationKey currentKey = plan->key();
321         
322         if (Options::verboseCompilationQueue())
323             dataLog(*this, ": Completing ", currentKey, "\n");
324         
325         RELEASE_ASSERT(plan->stage == Plan::Ready);
326         
327         plan->finalizeAndNotifyCallback();
328         
329         if (currentKey == requestedKey)
330             resultingState = Compiled;
331     }
332     
333     if (!!requestedKey && resultingState == NotKnown) {
334         LockHolder locker(*m_lock);
335         if (m_plans.contains(requestedKey))
336             resultingState = Compiling;
337     }
338     
339     return resultingState;
340 }
341
342 void Worklist::completeAllPlansForVM(VM& vm)
343 {
344     DeferGC deferGC(vm.heap);
345     waitUntilAllPlansForVMAreReady(vm);
346     completeAllReadyPlansForVM(vm);
347 }
348
349 void Worklist::suspendAllThreads()
350 {
351     m_suspensionLock.lock();
352     for (unsigned i = m_threads.size(); i--;)
353         m_threads[i]->m_rightToRun.lock();
354 }
355
356 void Worklist::resumeAllThreads()
357 {
358     for (unsigned i = m_threads.size(); i--;)
359         m_threads[i]->m_rightToRun.unlock();
360     m_suspensionLock.unlock();
361 }
362
363 void Worklist::visitWeakReferences(SlotVisitor& visitor)
364 {
365     VM* vm = visitor.heap()->vm();
366     {
367         LockHolder locker(*m_lock);
368         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
369             Plan* plan = iter->value.get();
370             if (plan->vm != vm)
371                 continue;
372             plan->checkLivenessAndVisitChildren(visitor);
373         }
374     }
375     // This loop doesn't need locking because:
376     // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
377     // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
378     //     holding here because of a prior call to suspendAllThreads().
379     for (unsigned i = m_threads.size(); i--;) {
380         ThreadData* data = m_threads[i].get();
381         Safepoint* safepoint = data->m_safepoint;
382         if (safepoint && safepoint->vm() == vm)
383             safepoint->checkLivenessAndVisitChildren(visitor);
384     }
385 }
386
387 void Worklist::removeDeadPlans(VM& vm)
388 {
389     {
390         LockHolder locker(*m_lock);
391         HashSet<CompilationKey> deadPlanKeys;
392         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
393             Plan* plan = iter->value.get();
394             if (plan->vm != &vm)
395                 continue;
396             if (plan->isKnownToBeLiveDuringGC())
397                 continue;
398             RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
399             ASSERT(!deadPlanKeys.contains(plan->key()));
400             deadPlanKeys.add(plan->key());
401         }
402         if (!deadPlanKeys.isEmpty()) {
403             for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
404                 m_plans.take(*iter)->cancel();
405             Deque<RefPtr<Plan>> newQueue;
406             while (!m_queue.isEmpty()) {
407                 RefPtr<Plan> plan = m_queue.takeFirst();
408                 if (plan->stage != Plan::Cancelled)
409                     newQueue.append(plan);
410             }
411             m_queue.swap(newQueue);
412             for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
413                 if (m_readyPlans[i]->stage != Plan::Cancelled)
414                     continue;
415                 m_readyPlans[i--] = m_readyPlans.last();
416                 m_readyPlans.removeLast();
417             }
418         }
419     }
420     
421     // No locking needed for this part, see comment in visitWeakReferences().
422     for (unsigned i = m_threads.size(); i--;) {
423         ThreadData* data = m_threads[i].get();
424         Safepoint* safepoint = data->m_safepoint;
425         if (!safepoint)
426             continue;
427         if (safepoint->vm() != &vm)
428             continue;
429         if (safepoint->isKnownToBeLiveDuringGC())
430             continue;
431         safepoint->cancel();
432     }
433 }
434
435 void Worklist::removeNonCompilingPlansForVM(VM& vm)
436 {
437     LockHolder locker(*m_lock);
438     HashSet<CompilationKey> deadPlanKeys;
439     Vector<RefPtr<Plan>> deadPlans;
440     for (auto& entry : m_plans) {
441         Plan* plan = entry.value.get();
442         if (plan->vm != &vm)
443             continue;
444         if (plan->stage == Plan::Compiling)
445             continue;
446         deadPlanKeys.add(plan->key());
447         deadPlans.append(plan);
448     }
449     for (CompilationKey key : deadPlanKeys)
450         m_plans.remove(key);
451     Deque<RefPtr<Plan>> newQueue;
452     while (!m_queue.isEmpty()) {
453         RefPtr<Plan> plan = m_queue.takeFirst();
454         if (!deadPlanKeys.contains(plan->key()))
455             newQueue.append(WTFMove(plan));
456     }
457     m_queue = WTFMove(newQueue);
458     m_readyPlans.removeAllMatching(
459         [&] (RefPtr<Plan>& plan) -> bool {
460             return deadPlanKeys.contains(plan->key());
461         });
462     for (auto& plan : deadPlans)
463         plan->cancel();
464 }
465
466 size_t Worklist::queueLength()
467 {
468     LockHolder locker(*m_lock);
469     return m_queue.size();
470 }
471
472 void Worklist::dump(PrintStream& out) const
473 {
474     LockHolder locker(*m_lock);
475     dump(locker, out);
476 }
477
478 void Worklist::dump(const AbstractLocker&, PrintStream& out) const
479 {
480     out.print(
481         "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
482         ", Map Size = ", m_plans.size(), ", Num Ready = ", m_readyPlans.size(),
483         ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
484 }
485
486 static Worklist* theGlobalDFGWorklist;
487
488 Worklist& ensureGlobalDFGWorklist()
489 {
490     static std::once_flag initializeGlobalWorklistOnceFlag;
491     std::call_once(initializeGlobalWorklistOnceFlag, [] {
492         theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
493     });
494     return *theGlobalDFGWorklist;
495 }
496
497 Worklist* existingGlobalDFGWorklistOrNull()
498 {
499     return theGlobalDFGWorklist;
500 }
501
502 static Worklist* theGlobalFTLWorklist;
503
504 Worklist& ensureGlobalFTLWorklist()
505 {
506     static std::once_flag initializeGlobalWorklistOnceFlag;
507     std::call_once(initializeGlobalWorklistOnceFlag, [] {
508         theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
509     });
510     return *theGlobalFTLWorklist;
511 }
512
513 Worklist* existingGlobalFTLWorklistOrNull()
514 {
515     return theGlobalFTLWorklist;
516 }
517
518 Worklist& ensureGlobalWorklistFor(CompilationMode mode)
519 {
520     switch (mode) {
521     case InvalidCompilationMode:
522         RELEASE_ASSERT_NOT_REACHED();
523         return ensureGlobalDFGWorklist();
524     case DFGMode:
525         return ensureGlobalDFGWorklist();
526     case FTLMode:
527     case FTLForOSREntryMode:
528         return ensureGlobalFTLWorklist();
529     }
530     RELEASE_ASSERT_NOT_REACHED();
531     return ensureGlobalDFGWorklist();
532 }
533
534 unsigned numberOfWorklists() { return 2; }
535
536 Worklist& ensureWorklistForIndex(unsigned index)
537 {
538     switch (index) {
539     case 0:
540         return ensureGlobalDFGWorklist();
541     case 1:
542         return ensureGlobalFTLWorklist();
543     default:
544         RELEASE_ASSERT_NOT_REACHED();
545         return ensureGlobalDFGWorklist();
546     }
547 }
548
549 Worklist* existingWorklistForIndexOrNull(unsigned index)
550 {
551     switch (index) {
552     case 0:
553         return existingGlobalDFGWorklistOrNull();
554     case 1:
555         return existingGlobalFTLWorklistOrNull();
556     default:
557         RELEASE_ASSERT_NOT_REACHED();
558         return 0;
559     }
560 }
561
562 Worklist& existingWorklistForIndex(unsigned index)
563 {
564     Worklist* result = existingWorklistForIndexOrNull(index);
565     RELEASE_ASSERT(result);
566     return *result;
567 }
568
569 void completeAllPlansForVM(VM& vm)
570 {
571     for (unsigned i = DFG::numberOfWorklists(); i--;) {
572         if (DFG::Worklist* worklist = DFG::existingWorklistForIndexOrNull(i))
573             worklist->completeAllPlansForVM(vm);
574     }
575 }
576
577 #else // ENABLE(DFG_JIT)
578
579 void completeAllPlansForVM(VM&)
580 {
581 }
582
583 void markCodeBlocks(VM&, SlotVisitor&)
584 {
585 }
586
587 #endif // ENABLE(DFG_JIT)
588
589 } } // namespace JSC::DFG
590
591