We should support CreateThis in the FTL
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGWorklist.cpp
1 /*
2  * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGWorklist.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSafepoint.h"
31 #include "DeferGC.h"
32 #include "JSCInlines.h"
33 #include "ReleaseHeapAccessScope.h"
34 #include <mutex>
35
36 namespace JSC { namespace DFG {
37
38 #if ENABLE(DFG_JIT)
39
40 class Worklist::ThreadBody : public AutomaticThread {
41 public:
42     ThreadBody(const AbstractLocker& locker, Worklist& worklist, ThreadData& data, Box<Lock> lock, Ref<AutomaticThreadCondition>&& condition, int relativePriority)
43         : AutomaticThread(locker, lock, WTFMove(condition))
44         , m_worklist(worklist)
45         , m_data(data)
46         , m_relativePriority(relativePriority)
47     {
48     }
49
50     const char* name() const override
51     {
52         return m_worklist.m_threadName.data();
53     }
54
55 protected:
56     PollResult poll(const AbstractLocker& locker) override
57     {
58         if (m_worklist.m_queue.isEmpty())
59             return PollResult::Wait;
60         
61         m_plan = m_worklist.m_queue.takeFirst();
62         if (!m_plan) {
63             if (Options::verboseCompilationQueue()) {
64                 m_worklist.dump(locker, WTF::dataFile());
65                 dataLog(": Thread shutting down\n");
66             }
67             return PollResult::Stop;
68         }
69         RELEASE_ASSERT(m_plan->stage == Plan::Preparing);
70         m_worklist.m_numberOfActiveThreads++;
71         return PollResult::Work;
72     }
73     
74     class WorkScope;
75     friend class WorkScope;
76     class WorkScope {
77     public:
78         WorkScope(ThreadBody& thread)
79             : m_thread(thread)
80         {
81             RELEASE_ASSERT(m_thread.m_plan);
82             RELEASE_ASSERT(m_thread.m_worklist.m_numberOfActiveThreads);
83         }
84         
85         ~WorkScope()
86         {
87             LockHolder locker(*m_thread.m_worklist.m_lock);
88             m_thread.m_plan = nullptr;
89             m_thread.m_worklist.m_numberOfActiveThreads--;
90         }
91         
92     private:
93         ThreadBody& m_thread;
94     };
95     
96     WorkResult work() override
97     {
98         WorkScope workScope(*this);
99         
100         LockHolder locker(m_data.m_rightToRun);
101         {
102             LockHolder locker(*m_worklist.m_lock);
103             if (m_plan->stage == Plan::Cancelled)
104                 return WorkResult::Continue;
105             m_plan->notifyCompiling();
106         }
107         
108         if (Options::verboseCompilationQueue())
109             dataLog(m_worklist, ": Compiling ", m_plan->key(), " asynchronously\n");
110         
111         // There's no way for the GC to be safepointing since we own rightToRun.
112         if (m_plan->vm->heap.worldIsStopped()) {
113             dataLog("Heap is stoped but here we are! (1)\n");
114             RELEASE_ASSERT_NOT_REACHED();
115         }
116         m_plan->compileInThread(&m_data);
117         if (m_plan->stage != Plan::Cancelled) {
118             if (m_plan->vm->heap.worldIsStopped()) {
119                 dataLog("Heap is stopped but here we are! (2)\n");
120                 RELEASE_ASSERT_NOT_REACHED();
121             }
122         }
123         
124         {
125             LockHolder locker(*m_worklist.m_lock);
126             if (m_plan->stage == Plan::Cancelled)
127                 return WorkResult::Continue;
128             
129             m_plan->notifyReady();
130             
131             if (Options::verboseCompilationQueue()) {
132                 m_worklist.dump(locker, WTF::dataFile());
133                 dataLog(": Compiled ", m_plan->key(), " asynchronously\n");
134             }
135             
136             m_worklist.m_readyPlans.append(m_plan);
137             
138             RELEASE_ASSERT(!m_plan->vm->heap.worldIsStopped());
139             m_worklist.m_planCompiled.notifyAll();
140         }
141         
142         return WorkResult::Continue;
143     }
144     
145     void threadDidStart() override
146     {
147         if (Options::verboseCompilationQueue())
148             dataLog(m_worklist, ": Thread started\n");
149         
150         if (m_relativePriority)
151             Thread::current().changePriority(m_relativePriority);
152         
153         m_compilationScope = std::make_unique<CompilationScope>();
154     }
155     
156     void threadIsStopping(const AbstractLocker&) override
157     {
158         // We're holding the Worklist::m_lock, so we should be careful not to deadlock.
159         
160         if (Options::verboseCompilationQueue())
161             dataLog(m_worklist, ": Thread will stop\n");
162         
163         ASSERT(!m_plan);
164         
165         m_compilationScope = nullptr;
166         m_plan = nullptr;
167     }
168
169 private:
170     Worklist& m_worklist;
171     ThreadData& m_data;
172     int m_relativePriority;
173     std::unique_ptr<CompilationScope> m_compilationScope;
174     RefPtr<Plan> m_plan;
175 };
176
177 Worklist::Worklist(CString worklistName)
178     : m_threadName(toCString(worklistName, " Worker Thread"))
179     , m_lock(Box<Lock>::create())
180     , m_planEnqueued(AutomaticThreadCondition::create())
181     , m_numberOfActiveThreads(0)
182 {
183 }
184
185 Worklist::~Worklist()
186 {
187     {
188         LockHolder locker(*m_lock);
189         for (unsigned i = m_threads.size(); i--;)
190             m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
191         m_planEnqueued->notifyAll(locker);
192     }
193     for (unsigned i = m_threads.size(); i--;)
194         m_threads[i]->m_thread->join();
195     ASSERT(!m_numberOfActiveThreads);
196 }
197
198 void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority)
199 {
200     RELEASE_ASSERT(numberOfThreads);
201     LockHolder locker(*m_lock);
202     for (unsigned i = numberOfThreads; i--;) {
203         createNewThread(locker, relativePriority);
204     }
205 }
206
207 void Worklist::createNewThread(const AbstractLocker& locker, int relativePriority)
208 {
209     std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this);
210     data->m_thread = adoptRef(new ThreadBody(locker, *this, *data, m_lock, m_planEnqueued.copyRef(), relativePriority));
211     m_threads.append(WTFMove(data));
212 }
213
214 Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority)
215 {
216     Ref<Worklist> result = adoptRef(*new Worklist(worklistName));
217     result->finishCreation(numberOfThreads, relativePriority);
218     return result;
219 }
220
221 bool Worklist::isActiveForVM(VM& vm) const
222 {
223     LockHolder locker(*m_lock);
224     PlanMap::const_iterator end = m_plans.end();
225     for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
226         if (iter->value->vm == &vm)
227             return true;
228     }
229     return false;
230 }
231
232 void Worklist::enqueue(Ref<Plan>&& plan)
233 {
234     LockHolder locker(*m_lock);
235     if (Options::verboseCompilationQueue()) {
236         dump(locker, WTF::dataFile());
237         dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
238     }
239     ASSERT(m_plans.find(plan->key()) == m_plans.end());
240     m_plans.add(plan->key(), plan.copyRef());
241     m_queue.append(WTFMove(plan));
242     m_planEnqueued->notifyOne(locker);
243 }
244
245 Worklist::State Worklist::compilationState(CompilationKey key)
246 {
247     LockHolder locker(*m_lock);
248     PlanMap::iterator iter = m_plans.find(key);
249     if (iter == m_plans.end())
250         return NotKnown;
251     return iter->value->stage == Plan::Ready ? Compiled : Compiling;
252 }
253
254 void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
255 {
256     DeferGC deferGC(vm.heap);
257     
258     // While we are waiting for the compiler to finish, the collector might have already suspended
259     // the compiler and then it will be waiting for us to stop. That's a deadlock. We avoid that
260     // deadlock by relinquishing our heap access, so that the collector pretends that we are stopped
261     // even if we aren't.
262     ReleaseHeapAccessScope releaseHeapAccessScope(vm.heap);
263     
264     // Wait for all of the plans for the given VM to complete. The idea here
265     // is that we want all of the caller VM's plans to be done. We don't care
266     // about any other VM's plans, and we won't attempt to wait on those.
267     // After we release this lock, we know that although other VMs may still
268     // be adding plans, our VM will not be.
269     
270     LockHolder locker(*m_lock);
271     
272     if (Options::verboseCompilationQueue()) {
273         dump(locker, WTF::dataFile());
274         dataLog(": Waiting for all in VM to complete.\n");
275     }
276     
277     for (;;) {
278         bool allAreCompiled = true;
279         PlanMap::iterator end = m_plans.end();
280         for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
281             if (iter->value->vm != &vm)
282                 continue;
283             if (iter->value->stage != Plan::Ready) {
284                 allAreCompiled = false;
285                 break;
286             }
287         }
288         
289         if (allAreCompiled)
290             break;
291         
292         m_planCompiled.wait(*m_lock);
293     }
294 }
295
296 void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
297 {
298     DeferGC deferGC(vm.heap);
299     LockHolder locker(*m_lock);
300     for (size_t i = 0; i < m_readyPlans.size(); ++i) {
301         RefPtr<Plan> plan = m_readyPlans[i];
302         if (plan->vm != &vm)
303             continue;
304         if (plan->stage != Plan::Ready)
305             continue;
306         myReadyPlans.append(plan);
307         m_readyPlans[i--] = m_readyPlans.last();
308         m_readyPlans.removeLast();
309         m_plans.remove(plan->key());
310     }
311 }
312
313 void Worklist::removeAllReadyPlansForVM(VM& vm)
314 {
315     Vector<RefPtr<Plan>, 8> myReadyPlans;
316     removeAllReadyPlansForVM(vm, myReadyPlans);
317 }
318
319 Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey)
320 {
321     DeferGC deferGC(vm.heap);
322     Vector<RefPtr<Plan>, 8> myReadyPlans;
323     
324     removeAllReadyPlansForVM(vm, myReadyPlans);
325     
326     State resultingState = NotKnown;
327
328     while (!myReadyPlans.isEmpty()) {
329         RefPtr<Plan> plan = myReadyPlans.takeLast();
330         CompilationKey currentKey = plan->key();
331         
332         if (Options::verboseCompilationQueue())
333             dataLog(*this, ": Completing ", currentKey, "\n");
334         
335         RELEASE_ASSERT(plan->stage == Plan::Ready);
336         
337         plan->finalizeAndNotifyCallback();
338         
339         if (currentKey == requestedKey)
340             resultingState = Compiled;
341     }
342     
343     if (!!requestedKey && resultingState == NotKnown) {
344         LockHolder locker(*m_lock);
345         if (m_plans.contains(requestedKey))
346             resultingState = Compiling;
347     }
348     
349     return resultingState;
350 }
351
352 void Worklist::completeAllPlansForVM(VM& vm)
353 {
354     DeferGC deferGC(vm.heap);
355     waitUntilAllPlansForVMAreReady(vm);
356     completeAllReadyPlansForVM(vm);
357 }
358
359 void Worklist::suspendAllThreads()
360 {
361     m_suspensionLock.lock();
362     for (unsigned i = m_threads.size(); i--;)
363         m_threads[i]->m_rightToRun.lock();
364 }
365
366 void Worklist::resumeAllThreads()
367 {
368     for (unsigned i = m_threads.size(); i--;)
369         m_threads[i]->m_rightToRun.unlock();
370     m_suspensionLock.unlock();
371 }
372
373 void Worklist::visitWeakReferences(SlotVisitor& visitor)
374 {
375     VM* vm = visitor.heap()->vm();
376     {
377         LockHolder locker(*m_lock);
378         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
379             Plan* plan = iter->value.get();
380             if (plan->vm != vm)
381                 continue;
382             plan->checkLivenessAndVisitChildren(visitor);
383         }
384     }
385     // This loop doesn't need locking because:
386     // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks.
387     // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be
388     //     holding here because of a prior call to suspendAllThreads().
389     for (unsigned i = m_threads.size(); i--;) {
390         ThreadData* data = m_threads[i].get();
391         Safepoint* safepoint = data->m_safepoint;
392         if (safepoint && safepoint->vm() == vm)
393             safepoint->checkLivenessAndVisitChildren(visitor);
394     }
395 }
396
397 void Worklist::removeDeadPlans(VM& vm)
398 {
399     {
400         LockHolder locker(*m_lock);
401         HashSet<CompilationKey> deadPlanKeys;
402         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
403             Plan* plan = iter->value.get();
404             if (plan->vm != &vm)
405                 continue;
406             if (plan->isKnownToBeLiveDuringGC()) {
407                 plan->finalizeInGC();
408                 continue;
409             }
410             RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
411             ASSERT(!deadPlanKeys.contains(plan->key()));
412             deadPlanKeys.add(plan->key());
413         }
414         if (!deadPlanKeys.isEmpty()) {
415             for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter)
416                 m_plans.take(*iter)->cancel();
417             Deque<RefPtr<Plan>> newQueue;
418             while (!m_queue.isEmpty()) {
419                 RefPtr<Plan> plan = m_queue.takeFirst();
420                 if (plan->stage != Plan::Cancelled)
421                     newQueue.append(plan);
422             }
423             m_queue.swap(newQueue);
424             for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
425                 if (m_readyPlans[i]->stage != Plan::Cancelled)
426                     continue;
427                 m_readyPlans[i--] = m_readyPlans.last();
428                 m_readyPlans.removeLast();
429             }
430         }
431     }
432     
433     // No locking needed for this part, see comment in visitWeakReferences().
434     for (unsigned i = m_threads.size(); i--;) {
435         ThreadData* data = m_threads[i].get();
436         Safepoint* safepoint = data->m_safepoint;
437         if (!safepoint)
438             continue;
439         if (safepoint->vm() != &vm)
440             continue;
441         if (safepoint->isKnownToBeLiveDuringGC())
442             continue;
443         safepoint->cancel();
444     }
445 }
446
447 void Worklist::removeNonCompilingPlansForVM(VM& vm)
448 {
449     LockHolder locker(*m_lock);
450     HashSet<CompilationKey> deadPlanKeys;
451     Vector<RefPtr<Plan>> deadPlans;
452     for (auto& entry : m_plans) {
453         Plan* plan = entry.value.get();
454         if (plan->vm != &vm)
455             continue;
456         if (plan->stage == Plan::Compiling)
457             continue;
458         deadPlanKeys.add(plan->key());
459         deadPlans.append(plan);
460     }
461     for (CompilationKey key : deadPlanKeys)
462         m_plans.remove(key);
463     Deque<RefPtr<Plan>> newQueue;
464     while (!m_queue.isEmpty()) {
465         RefPtr<Plan> plan = m_queue.takeFirst();
466         if (!deadPlanKeys.contains(plan->key()))
467             newQueue.append(WTFMove(plan));
468     }
469     m_queue = WTFMove(newQueue);
470     m_readyPlans.removeAllMatching(
471         [&] (RefPtr<Plan>& plan) -> bool {
472             return deadPlanKeys.contains(plan->key());
473         });
474     for (auto& plan : deadPlans)
475         plan->cancel();
476 }
477
478 size_t Worklist::queueLength()
479 {
480     LockHolder locker(*m_lock);
481     return m_queue.size();
482 }
483
484 void Worklist::dump(PrintStream& out) const
485 {
486     LockHolder locker(*m_lock);
487     dump(locker, out);
488 }
489
490 void Worklist::dump(const AbstractLocker&, PrintStream& out) const
491 {
492     out.print(
493         "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
494         ", Map Size = ", m_plans.size(), ", Num Ready = ", m_readyPlans.size(),
495         ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]");
496 }
497
498 unsigned Worklist::setNumberOfThreads(unsigned numberOfThreads, int relativePriority)
499 {
500     LockHolder locker(m_suspensionLock);
501     auto currentNumberOfThreads = m_threads.size();
502     if (numberOfThreads < currentNumberOfThreads) {
503         {
504             LockHolder locker(*m_lock);
505             for (unsigned i = currentNumberOfThreads; i-- > numberOfThreads;) {
506                 if (m_threads[i]->m_thread->hasUnderlyingThread(locker)) {
507                     m_queue.append(nullptr);
508                     m_threads[i]->m_thread->notify(locker);
509                 }
510             }
511         }
512         for (unsigned i = currentNumberOfThreads; i-- > numberOfThreads;) {
513             bool isStopped = false;
514             {
515                 LockHolder locker(*m_lock);
516                 isStopped = m_threads[i]->m_thread->tryStop(locker);
517             }
518             if (!isStopped)
519                 m_threads[i]->m_thread->join();
520             m_threads.remove(i);
521         }
522         m_threads.shrinkToFit();
523         ASSERT(m_numberOfActiveThreads <= numberOfThreads);
524     } else if (numberOfThreads > currentNumberOfThreads) {
525         LockHolder locker(*m_lock);
526         for (unsigned i = currentNumberOfThreads; i < numberOfThreads; i++)
527             createNewThread(locker, relativePriority);
528     }
529     return currentNumberOfThreads;
530 }
531
532 static Worklist* theGlobalDFGWorklist;
533
534 Worklist& ensureGlobalDFGWorklist()
535 {
536     static std::once_flag initializeGlobalWorklistOnceFlag;
537     std::call_once(initializeGlobalWorklistOnceFlag, [] {
538         theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef();
539     });
540     return *theGlobalDFGWorklist;
541 }
542
543 Worklist* existingGlobalDFGWorklistOrNull()
544 {
545     return theGlobalDFGWorklist;
546 }
547
548 static Worklist* theGlobalFTLWorklist;
549
550 Worklist& ensureGlobalFTLWorklist()
551 {
552     static std::once_flag initializeGlobalWorklistOnceFlag;
553     std::call_once(initializeGlobalWorklistOnceFlag, [] {
554         theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef();
555     });
556     return *theGlobalFTLWorklist;
557 }
558
559 Worklist* existingGlobalFTLWorklistOrNull()
560 {
561     return theGlobalFTLWorklist;
562 }
563
564 Worklist& ensureGlobalWorklistFor(CompilationMode mode)
565 {
566     switch (mode) {
567     case InvalidCompilationMode:
568         RELEASE_ASSERT_NOT_REACHED();
569         return ensureGlobalDFGWorklist();
570     case DFGMode:
571         return ensureGlobalDFGWorklist();
572     case FTLMode:
573     case FTLForOSREntryMode:
574         return ensureGlobalFTLWorklist();
575     }
576     RELEASE_ASSERT_NOT_REACHED();
577     return ensureGlobalDFGWorklist();
578 }
579
580 unsigned numberOfWorklists() { return 2; }
581
582 Worklist& ensureWorklistForIndex(unsigned index)
583 {
584     switch (index) {
585     case 0:
586         return ensureGlobalDFGWorklist();
587     case 1:
588         return ensureGlobalFTLWorklist();
589     default:
590         RELEASE_ASSERT_NOT_REACHED();
591         return ensureGlobalDFGWorklist();
592     }
593 }
594
595 Worklist* existingWorklistForIndexOrNull(unsigned index)
596 {
597     switch (index) {
598     case 0:
599         return existingGlobalDFGWorklistOrNull();
600     case 1:
601         return existingGlobalFTLWorklistOrNull();
602     default:
603         RELEASE_ASSERT_NOT_REACHED();
604         return 0;
605     }
606 }
607
608 Worklist& existingWorklistForIndex(unsigned index)
609 {
610     Worklist* result = existingWorklistForIndexOrNull(index);
611     RELEASE_ASSERT(result);
612     return *result;
613 }
614
615 void completeAllPlansForVM(VM& vm)
616 {
617     for (unsigned i = DFG::numberOfWorklists(); i--;) {
618         if (DFG::Worklist* worklist = DFG::existingWorklistForIndexOrNull(i))
619             worklist->completeAllPlansForVM(vm);
620     }
621 }
622
623 #else // ENABLE(DFG_JIT)
624
625 void completeAllPlansForVM(VM&)
626 {
627 }
628
629 void markCodeBlocks(VM&, SlotVisitor&)
630 {
631 }
632
633 #endif // ENABLE(DFG_JIT)
634
635 } } // namespace JSC::DFG
636
637