1baace6044e660a8e5441fbad018ea90931a4c91
[WebKit-https.git] / Source / JavaScriptCore / runtime / VMTraps.cpp
1 /*
2  * Copyright (C) 2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "VMTraps.h"
28
29 #include "CallFrame.h"
30 #include "CodeBlock.h"
31 #include "CodeBlockSet.h"
32 #include "DFGCommonData.h"
33 #include "ExceptionHelpers.h"
34 #include "HeapInlines.h"
35 #include "LLIntPCRanges.h"
36 #include "MachineContext.h"
37 #include "MachineStackMarker.h"
38 #include "MacroAssembler.h"
39 #include "VM.h"
40 #include "VMInspector.h"
41 #include "Watchdog.h"
42 #include <wtf/ProcessID.h>
43 #include <wtf/ThreadMessage.h>
44 #include <wtf/threads/Signals.h>
45
46 namespace JSC {
47
48 ALWAYS_INLINE VM& VMTraps::vm() const
49 {
50     return *bitwise_cast<VM*>(bitwise_cast<uintptr_t>(this) - OBJECT_OFFSETOF(VM, m_traps));
51 }
52
53 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
54
55 struct SignalContext {
56     SignalContext(mcontext_t& mcontext)
57         : mcontext(mcontext)
58         , trapPC(MachineContext::instructionPointer(mcontext))
59         , stackPointer(MachineContext::stackPointer(mcontext))
60         , framePointer(MachineContext::framePointer(mcontext))
61     {
62 #if CPU(X86_64) || CPU(X86)
63         // On X86_64, SIGTRAP reports the address after the trapping PC. So, dec by 1.
64         trapPC = reinterpret_cast<uint8_t*>(trapPC) - 1;
65 #endif
66     }
67
68     void adjustPCToPointToTrappingInstruction()
69     {
70 #if CPU(X86_64) || CPU(X86)
71         MachineContext::instructionPointer(mcontext) = trapPC;
72 #endif
73     }
74
75     mcontext_t& mcontext;
76     void* trapPC;
77     void* stackPointer;
78     void* framePointer;
79 };
80
81 inline static bool vmIsInactive(VM& vm)
82 {
83     return !vm.entryScope && !vm.ownerThread();
84 }
85
86 struct VMAndStackBounds {
87     VM* vm;
88     StackBounds stackBounds;
89 };
90
91 static Expected<VMAndStackBounds, VMTraps::Error> findActiveVMAndStackBounds(SignalContext& context)
92 {
93     VMInspector& inspector = VMInspector::instance();
94     auto locker = tryHoldLock(inspector.getLock());
95     if (UNLIKELY(!locker))
96         return makeUnexpected(VMTraps::Error::LockUnavailable);
97     
98     VM* activeVM = nullptr;
99     StackBounds stackBounds = StackBounds::emptyBounds();
100     void* stackPointer = context.stackPointer;
101     bool unableToAcquireMachineThreadsLock = false;
102     inspector.iterate(locker, [&] (VM& vm) {
103         if (vmIsInactive(vm))
104             return VMInspector::FunctorStatus::Continue;
105
106         auto& machineThreads = vm.heap.machineThreads();
107         auto machineThreadsLocker = tryHoldLock(machineThreads.getLock());
108         if (UNLIKELY(!machineThreadsLocker)) {
109             unableToAcquireMachineThreadsLock = true;
110             return VMInspector::FunctorStatus::Continue; // Try next VM.
111         }
112
113         const auto& threadList = machineThreads.threadsListHead(machineThreadsLocker);
114         for (MachineThreads::MachineThread* thread = threadList.head(); thread; thread = thread->next()) {
115             RELEASE_ASSERT(thread->stackBase());
116             RELEASE_ASSERT(thread->stackEnd());
117             if (stackPointer <= thread->stackBase() && stackPointer >= thread->stackEnd()) {
118                 activeVM = &vm;
119                 stackBounds = StackBounds(thread->stackBase(), thread->stackEnd());
120                 return VMInspector::FunctorStatus::Done;
121             }
122         }
123         return VMInspector::FunctorStatus::Continue;
124     });
125
126     if (!activeVM && unableToAcquireMachineThreadsLock)
127         return makeUnexpected(VMTraps::Error::LockUnavailable);
128     return VMAndStackBounds { activeVM, stackBounds };
129 }
130
131 static void installSignalHandler()
132 {
133     installSignalHandler(Signal::Trap, [] (int, siginfo_t*, void* uap) -> SignalAction {
134         SignalContext context(static_cast<ucontext_t*>(uap)->uc_mcontext);
135
136         if (!isJITPC(context.trapPC))
137             return SignalAction::NotHandled;
138
139         // FIXME: This currently eats all traps including jit asserts we should make sure this
140         // always works. https://bugs.webkit.org/show_bug.cgi?id=171039
141         auto activeVMAndStackBounds = findActiveVMAndStackBounds(context);
142         if (!activeVMAndStackBounds)
143             return SignalAction::Handled; // Let the SignalSender try again later.
144
145         VM* vm = activeVMAndStackBounds.value().vm;
146         if (vm) {
147             VMTraps& traps = vm->traps();
148             if (!traps.needTrapHandling())
149                 return SignalAction::Handled; // The polling code beat us to handling the trap already.
150
151             auto expectedSuccess = traps.tryJettisonCodeBlocksOnStack(context);
152             if (!expectedSuccess)
153                 return SignalAction::Handled; // Let the SignalSender try again later.
154             if (expectedSuccess.value())
155                 return SignalAction::Handled; // We've success jettison the codeBlocks.
156         }
157
158         return SignalAction::Handled;
159     });
160 }
161
162 ALWAYS_INLINE static CallFrame* sanitizedTopCallFrame(CallFrame* topCallFrame)
163 {
164 #if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS)
165     // prepareForExternalCall() in DFGSpeculativeJIT.h may set topCallFrame to a bad word
166     // before calling native functions, but tryInstallTrapBreakpoints() below expects
167     // topCallFrame to be null if not set.
168 #if USE(JSVALUE64)
169     const uintptr_t badBeefWord = 0xbadbeef0badbeef;
170 #else
171     const uintptr_t badBeefWord = 0xbadbeef;
172 #endif
173     if (topCallFrame == reinterpret_cast<CallFrame*>(badBeefWord))
174         topCallFrame = nullptr;
175 #endif
176     return topCallFrame;
177 }
178
179 static bool isSaneFrame(CallFrame* frame, CallFrame* calleeFrame, VMEntryFrame* entryFrame, StackBounds stackBounds)
180 {
181     if (reinterpret_cast<void*>(frame) >= reinterpret_cast<void*>(entryFrame))
182         return false;
183     if (calleeFrame >= frame)
184         return false;
185     return stackBounds.contains(frame);
186 }
187     
188 void VMTraps::tryInstallTrapBreakpoints(SignalContext& context, StackBounds stackBounds)
189 {
190     // This must be the initial signal to get the mutator thread's attention.
191     // Let's get the thread to break at invalidation points if needed.
192     VM& vm = this->vm();
193     void* trapPC = context.trapPC;
194
195     CallFrame* callFrame = reinterpret_cast<CallFrame*>(context.framePointer);
196
197     auto codeBlockSetLocker = tryHoldLock(vm.heap.codeBlockSet().getLock());
198     if (!codeBlockSetLocker)
199         return; // Let the SignalSender try again later.
200
201     {
202         auto& allocator = ExecutableAllocator::singleton();
203         auto allocatorLocker = tryHoldLock(allocator.getLock());
204         if (!allocatorLocker)
205             return; // Let the SignalSender try again later.
206
207         if (allocator.isValidExecutableMemory(allocatorLocker, trapPC)) {
208             if (vm.isExecutingInRegExpJIT) {
209                 // We need to do this because a regExpJIT frame isn't a JS frame.
210                 callFrame = sanitizedTopCallFrame(vm.topCallFrame);
211             }
212         } else if (LLInt::isLLIntPC(trapPC)) {
213             // The framePointer probably has the callFrame. We're good to go.
214         } else {
215             // We resort to topCallFrame to see if we can get anything
216             // useful. We usually get here when we're executing C code.
217             callFrame = sanitizedTopCallFrame(vm.topCallFrame);
218         }
219     }
220
221     CodeBlock* foundCodeBlock = nullptr;
222     VMEntryFrame* vmEntryFrame = vm.topVMEntryFrame;
223
224     // We don't have a callee to start with. So, use the end of the stack to keep the
225     // isSaneFrame() checker below happy for the first iteration. It will still check
226     // to ensure that the address is in the stackBounds.
227     CallFrame* calleeFrame = reinterpret_cast<CallFrame*>(stackBounds.end());
228
229     if (!vmEntryFrame || !callFrame)
230         return; // Not running JS code. Let the SignalSender try again later.
231
232     do {
233         if (!isSaneFrame(callFrame, calleeFrame, vmEntryFrame, stackBounds))
234             return; // Let the SignalSender try again later.
235
236         CodeBlock* candidateCodeBlock = callFrame->codeBlock();
237         if (candidateCodeBlock && vm.heap.codeBlockSet().contains(codeBlockSetLocker, candidateCodeBlock)) {
238             foundCodeBlock = candidateCodeBlock;
239             break;
240         }
241
242         calleeFrame = callFrame;
243         callFrame = callFrame->callerFrame(vmEntryFrame);
244
245     } while (callFrame && vmEntryFrame);
246
247     if (!foundCodeBlock) {
248         // We may have just entered the frame and the codeBlock pointer is not
249         // initialized yet. Just bail and let the SignalSender try again later.
250         return;
251     }
252
253     if (JITCode::isOptimizingJIT(foundCodeBlock->jitType())) {
254         auto locker = tryHoldLock(m_lock);
255         if (!locker)
256             return; // Let the SignalSender try again later.
257
258         if (!foundCodeBlock->hasInstalledVMTrapBreakpoints())
259             foundCodeBlock->installVMTrapBreakpoints();
260         return;
261     }
262 }
263
264 auto VMTraps::tryJettisonCodeBlocksOnStack(SignalContext& context) -> Expected<bool, Error>
265 {
266     VM& vm = this->vm();
267     auto codeBlockSetLocker = tryHoldLock(vm.heap.codeBlockSet().getLock());
268     if (!codeBlockSetLocker)
269         return makeUnexpected(Error::LockUnavailable);
270
271     CallFrame* topCallFrame = reinterpret_cast<CallFrame*>(context.framePointer);
272     void* trapPC = context.trapPC;
273     bool trapPCIsVMTrap = false;
274     
275     vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
276         if (!codeBlock->hasInstalledVMTrapBreakpoints())
277             return false; // Not found yet.
278
279         JITCode* jitCode = codeBlock->jitCode().get();
280         ASSERT(JITCode::isOptimizingJIT(jitCode->jitType()));
281         if (jitCode->dfgCommon()->isVMTrapBreakpoint(trapPC)) {
282             trapPCIsVMTrap = true;
283             // At the codeBlock trap point, we're guaranteed that:
284             // 1. the pc is not in the middle of any range of JIT code which invalidation points
285             //    may write over. Hence, it's now safe to patch those invalidation points and
286             //    jettison the codeBlocks.
287             // 2. The top frame must be an optimized JS frame.
288             ASSERT(codeBlock == topCallFrame->codeBlock());
289             codeBlock->jettison(Profiler::JettisonDueToVMTraps);
290             return true;
291         }
292
293         return false; // Not found yet.
294     });
295
296     if (!trapPCIsVMTrap)
297         return false;
298
299     invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
300
301     // Re-run the trapping instruction now that we've patched it with the invalidation
302     // OSR exit off-ramp.
303     context.adjustPCToPointToTrappingInstruction();
304     return true;
305 }
306
307 void VMTraps::invalidateCodeBlocksOnStack()
308 {
309     invalidateCodeBlocksOnStack(vm().topCallFrame);
310 }
311
312 void VMTraps::invalidateCodeBlocksOnStack(ExecState* topCallFrame)
313 {
314     auto codeBlockSetLocker = holdLock(vm().heap.codeBlockSet().getLock());
315     invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
316 }
317     
318 void VMTraps::invalidateCodeBlocksOnStack(Locker<Lock>&, ExecState* topCallFrame)
319 {
320     if (!m_needToInvalidatedCodeBlocks)
321         return;
322
323     m_needToInvalidatedCodeBlocks = false;
324
325     VMEntryFrame* vmEntryFrame = vm().topVMEntryFrame;
326     CallFrame* callFrame = topCallFrame;
327
328     if (!vmEntryFrame)
329         return; // Not running JS code. Nothing to invalidate.
330
331     while (callFrame) {
332         CodeBlock* codeBlock = callFrame->codeBlock();
333         if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()))
334             codeBlock->jettison(Profiler::JettisonDueToVMTraps);
335         callFrame = callFrame->callerFrame(vmEntryFrame);
336     }
337 }
338
339 #endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
340
341 VMTraps::VMTraps()
342 {
343 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
344     if (!Options::usePollingTraps()) {
345         static std::once_flag once;
346         std::call_once(once, [] {
347             installSignalHandler();
348         });
349     }
350 #endif
351 }
352
353 void VMTraps::willDestroyVM()
354 {
355     m_isShuttingDown = true;
356     WTF::storeStoreFence();
357 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
358     while (!m_signalSenders.isEmpty()) {
359         RefPtr<SignalSender> sender;
360         {
361             // We don't want to be holding the VMTraps lock when calling
362             // SignalSender::willDestroyVM() because SignalSender::willDestroyVM()
363             // will acquire the SignalSender lock, and SignalSender::send() needs
364             // to acquire these locks in the opposite order.
365             auto locker = holdLock(m_lock);
366             sender = m_signalSenders.takeAny();
367             if (!sender)
368                 break;
369         }
370         sender->willDestroyVM();
371     }
372     ASSERT(m_signalSenders.isEmpty());
373 #endif
374 }
375
376 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
377 void VMTraps::addSignalSender(VMTraps::SignalSender* sender)
378 {
379     auto locker = holdLock(m_lock);
380     m_signalSenders.add(sender);
381 }
382
383 void VMTraps::removeSignalSender(VMTraps::SignalSender* sender)
384 {
385     auto locker = holdLock(m_lock);
386     m_signalSenders.remove(sender);
387 }
388
389 void VMTraps::SignalSender::willDestroyVM()
390 {
391     auto locker = holdLock(m_lock);
392     m_vm = nullptr;
393 }
394
395 void VMTraps::SignalSender::send()
396 {
397     while (true) {
398         // We need a nested scope so that we'll release the lock before we sleep below.
399         {
400             auto locker = holdLock(m_lock);
401             if (!m_vm)
402                 break;
403
404             VM& vm = *m_vm;
405             auto optionalOwnerThread = vm.ownerThread();
406             if (optionalOwnerThread) {
407                 sendMessage(*optionalOwnerThread.value().get(), [] (siginfo_t*, ucontext_t* ucontext) -> void {
408                     SignalContext context(ucontext->uc_mcontext);
409                     auto activeVMAndStackBounds = findActiveVMAndStackBounds(context);
410                     if (activeVMAndStackBounds) {
411                         VM* vm = activeVMAndStackBounds.value().vm;
412                         if (vm) {
413                             StackBounds stackBounds = activeVMAndStackBounds.value().stackBounds;
414                             VMTraps& traps = vm->traps();
415                             if (traps.needTrapHandling())
416                                 traps.tryInstallTrapBreakpoints(context, stackBounds);
417                         }
418                     }
419                 });
420                 break;
421             }
422
423             if (vmIsInactive(vm))
424                 break;
425
426             VMTraps::Mask mask(m_eventType);
427             if (!vm.needTrapHandling(mask))
428                 break;
429         }
430
431         sleepMS(1);
432     }
433
434     auto locker = holdLock(m_lock);
435     if (m_vm)
436         m_vm->traps().removeSignalSender(this);
437 }
438 #endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
439
440 void VMTraps::fireTrap(VMTraps::EventType eventType)
441 {
442     ASSERT(!vm().currentThreadIsHoldingAPILock());
443     {
444         auto locker = holdLock(m_lock);
445         ASSERT(!m_isShuttingDown);
446         setTrapForEvent(locker, eventType);
447         m_needToInvalidatedCodeBlocks = true;
448     }
449     
450 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
451     if (!Options::usePollingTraps()) {
452         // sendSignal() can loop until it has confirmation that the mutator thread
453         // has received the trap request. We'll call it from another trap so that
454         // fireTrap() does not block.
455         RefPtr<SignalSender> sender = adoptRef(new SignalSender(vm(), eventType));
456         addSignalSender(sender.get());
457         Thread::create("jsc.vmtraps.signalling.thread", [sender] {
458             sender->send();
459         });
460     }
461 #endif
462 }
463
464 void VMTraps::handleTraps(ExecState* exec, VMTraps::Mask mask)
465 {
466     VM& vm = this->vm();
467     auto scope = DECLARE_THROW_SCOPE(vm);
468
469     ASSERT(needTrapHandling(mask));
470     while (needTrapHandling(mask)) {
471         auto eventType = takeTopPriorityTrap(mask);
472         switch (eventType) {
473         case NeedDebuggerBreak:
474             dataLog("VM ", RawPointer(&vm), " on pid ", getCurrentProcessID(), " received NeedDebuggerBreak trap\n");
475             invalidateCodeBlocksOnStack(exec);
476             break;
477                 
478         case NeedWatchdogCheck:
479             ASSERT(vm.watchdog());
480             if (LIKELY(!vm.watchdog()->shouldTerminate(exec)))
481                 continue;
482             FALLTHROUGH;
483
484         case NeedTermination:
485             invalidateCodeBlocksOnStack(exec);
486             throwException(exec, scope, createTerminatedExecutionException(&vm));
487             return;
488
489         default:
490             RELEASE_ASSERT_NOT_REACHED();
491         }
492     }
493 }
494
495 auto VMTraps::takeTopPriorityTrap(VMTraps::Mask mask) -> EventType
496 {
497     auto locker = holdLock(m_lock);
498     for (int i = 0; i < NumberOfEventTypes; ++i) {
499         EventType eventType = static_cast<EventType>(i);
500         if (hasTrapForEvent(locker, eventType, mask)) {
501             clearTrapForEvent(locker, eventType);
502             return eventType;
503         }
504     }
505     return Invalid;
506 }
507
508 } // namespace JSC