2 * Copyright (C) 2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "CallFrame.h"
30 #include "CodeBlock.h"
31 #include "CodeBlockSet.h"
32 #include "DFGCommonData.h"
33 #include "ExceptionHelpers.h"
34 #include "HeapInlines.h"
35 #include "LLIntPCRanges.h"
36 #include "MachineContext.h"
37 #include "MachineStackMarker.h"
38 #include "MacroAssembler.h"
40 #include "VMInspector.h"
42 #include <wtf/ProcessID.h>
43 #include <wtf/ThreadMessage.h>
44 #include <wtf/threads/Signals.h>
48 ALWAYS_INLINE VM& VMTraps::vm() const
50 return *bitwise_cast<VM*>(bitwise_cast<uintptr_t>(this) - OBJECT_OFFSETOF(VM, m_traps));
53 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
55 struct SignalContext {
56 SignalContext(PlatformRegisters& registers)
57 : registers(registers)
58 , trapPC(MachineContext::instructionPointer(registers))
59 , stackPointer(MachineContext::stackPointer(registers))
60 , framePointer(MachineContext::framePointer(registers))
62 #if CPU(X86_64) || CPU(X86)
63 // On X86_64, SIGTRAP reports the address after the trapping PC. So, dec by 1.
64 trapPC = reinterpret_cast<uint8_t*>(trapPC) - 1;
68 void adjustPCToPointToTrappingInstruction()
70 #if CPU(X86_64) || CPU(X86)
71 MachineContext::instructionPointer(registers) = trapPC;
75 PlatformRegisters& registers;
81 inline static bool vmIsInactive(VM& vm)
83 return !vm.entryScope && !vm.ownerThread();
86 struct VMAndStackBounds {
88 StackBounds stackBounds;
91 static Expected<VMAndStackBounds, VMTraps::Error> findActiveVMAndStackBounds(SignalContext& context)
93 VMInspector& inspector = VMInspector::instance();
94 auto locker = tryHoldLock(inspector.getLock());
95 if (UNLIKELY(!locker))
96 return makeUnexpected(VMTraps::Error::LockUnavailable);
98 VM* activeVM = nullptr;
99 StackBounds stackBounds = StackBounds::emptyBounds();
100 void* stackPointer = context.stackPointer;
101 bool unableToAcquireMachineThreadsLock = false;
102 inspector.iterate(locker, [&] (VM& vm) {
103 if (vmIsInactive(vm))
104 return VMInspector::FunctorStatus::Continue;
106 auto& machineThreads = vm.heap.machineThreads();
107 auto machineThreadsLocker = tryHoldLock(machineThreads.getLock());
108 if (UNLIKELY(!machineThreadsLocker)) {
109 unableToAcquireMachineThreadsLock = true;
110 return VMInspector::FunctorStatus::Continue; // Try next VM.
113 const auto& threadList = machineThreads.threadsListHead(machineThreadsLocker);
114 for (MachineThreads::MachineThread* thread = threadList.head(); thread; thread = thread->next()) {
115 RELEASE_ASSERT(thread->stackBase());
116 RELEASE_ASSERT(thread->stackEnd());
117 if (stackPointer <= thread->stackBase() && stackPointer >= thread->stackEnd()) {
119 stackBounds = StackBounds(thread->stackBase(), thread->stackEnd());
120 return VMInspector::FunctorStatus::Done;
123 return VMInspector::FunctorStatus::Continue;
126 if (!activeVM && unableToAcquireMachineThreadsLock)
127 return makeUnexpected(VMTraps::Error::LockUnavailable);
128 return VMAndStackBounds { activeVM, stackBounds };
131 static void installSignalHandler()
133 installSignalHandler(Signal::Trap, [] (Signal, SigInfo&, PlatformRegisters& registers) -> SignalAction {
134 SignalContext context(registers);
136 if (!isJITPC(context.trapPC))
137 return SignalAction::NotHandled;
139 // FIXME: This currently eats all traps including jit asserts we should make sure this
140 // always works. https://bugs.webkit.org/show_bug.cgi?id=171039
141 auto activeVMAndStackBounds = findActiveVMAndStackBounds(context);
142 if (!activeVMAndStackBounds)
143 return SignalAction::Handled; // Let the SignalSender try again later.
145 VM* vm = activeVMAndStackBounds.value().vm;
147 VMTraps& traps = vm->traps();
148 if (!traps.needTrapHandling())
149 return SignalAction::Handled; // The polling code beat us to handling the trap already.
151 auto expectedSuccess = traps.tryJettisonCodeBlocksOnStack(context);
152 if (!expectedSuccess)
153 return SignalAction::Handled; // Let the SignalSender try again later.
154 if (expectedSuccess.value())
155 return SignalAction::Handled; // We've success jettison the codeBlocks.
158 return SignalAction::Handled;
162 ALWAYS_INLINE static CallFrame* sanitizedTopCallFrame(CallFrame* topCallFrame)
164 #if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS)
165 // prepareForExternalCall() in DFGSpeculativeJIT.h may set topCallFrame to a bad word
166 // before calling native functions, but tryInstallTrapBreakpoints() below expects
167 // topCallFrame to be null if not set.
169 const uintptr_t badBeefWord = 0xbadbeef0badbeef;
171 const uintptr_t badBeefWord = 0xbadbeef;
173 if (topCallFrame == reinterpret_cast<CallFrame*>(badBeefWord))
174 topCallFrame = nullptr;
179 static bool isSaneFrame(CallFrame* frame, CallFrame* calleeFrame, VMEntryFrame* entryFrame, StackBounds stackBounds)
181 if (reinterpret_cast<void*>(frame) >= reinterpret_cast<void*>(entryFrame))
183 if (calleeFrame >= frame)
185 return stackBounds.contains(frame);
188 void VMTraps::tryInstallTrapBreakpoints(SignalContext& context, StackBounds stackBounds)
190 // This must be the initial signal to get the mutator thread's attention.
191 // Let's get the thread to break at invalidation points if needed.
193 void* trapPC = context.trapPC;
195 CallFrame* callFrame = reinterpret_cast<CallFrame*>(context.framePointer);
197 auto codeBlockSetLocker = tryHoldLock(vm.heap.codeBlockSet().getLock());
198 if (!codeBlockSetLocker)
199 return; // Let the SignalSender try again later.
202 auto& allocator = ExecutableAllocator::singleton();
203 auto allocatorLocker = tryHoldLock(allocator.getLock());
204 if (!allocatorLocker)
205 return; // Let the SignalSender try again later.
207 if (allocator.isValidExecutableMemory(allocatorLocker, trapPC)) {
208 if (vm.isExecutingInRegExpJIT) {
209 // We need to do this because a regExpJIT frame isn't a JS frame.
210 callFrame = sanitizedTopCallFrame(vm.topCallFrame);
212 } else if (LLInt::isLLIntPC(trapPC)) {
213 // The framePointer probably has the callFrame. We're good to go.
215 // We resort to topCallFrame to see if we can get anything
216 // useful. We usually get here when we're executing C code.
217 callFrame = sanitizedTopCallFrame(vm.topCallFrame);
221 CodeBlock* foundCodeBlock = nullptr;
222 VMEntryFrame* vmEntryFrame = vm.topVMEntryFrame;
224 // We don't have a callee to start with. So, use the end of the stack to keep the
225 // isSaneFrame() checker below happy for the first iteration. It will still check
226 // to ensure that the address is in the stackBounds.
227 CallFrame* calleeFrame = reinterpret_cast<CallFrame*>(stackBounds.end());
229 if (!vmEntryFrame || !callFrame)
230 return; // Not running JS code. Let the SignalSender try again later.
233 if (!isSaneFrame(callFrame, calleeFrame, vmEntryFrame, stackBounds))
234 return; // Let the SignalSender try again later.
236 CodeBlock* candidateCodeBlock = callFrame->codeBlock();
237 if (candidateCodeBlock && vm.heap.codeBlockSet().contains(codeBlockSetLocker, candidateCodeBlock)) {
238 foundCodeBlock = candidateCodeBlock;
242 calleeFrame = callFrame;
243 callFrame = callFrame->callerFrame(vmEntryFrame);
245 } while (callFrame && vmEntryFrame);
247 if (!foundCodeBlock) {
248 // We may have just entered the frame and the codeBlock pointer is not
249 // initialized yet. Just bail and let the SignalSender try again later.
253 if (JITCode::isOptimizingJIT(foundCodeBlock->jitType())) {
254 auto locker = tryHoldLock(m_lock);
256 return; // Let the SignalSender try again later.
258 if (!foundCodeBlock->hasInstalledVMTrapBreakpoints())
259 foundCodeBlock->installVMTrapBreakpoints();
264 auto VMTraps::tryJettisonCodeBlocksOnStack(SignalContext& context) -> Expected<bool, Error>
267 auto codeBlockSetLocker = tryHoldLock(vm.heap.codeBlockSet().getLock());
268 if (!codeBlockSetLocker)
269 return makeUnexpected(Error::LockUnavailable);
271 CallFrame* topCallFrame = reinterpret_cast<CallFrame*>(context.framePointer);
272 void* trapPC = context.trapPC;
273 bool trapPCIsVMTrap = false;
275 vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
276 if (!codeBlock->hasInstalledVMTrapBreakpoints())
277 return false; // Not found yet.
279 JITCode* jitCode = codeBlock->jitCode().get();
280 ASSERT(JITCode::isOptimizingJIT(jitCode->jitType()));
281 if (jitCode->dfgCommon()->isVMTrapBreakpoint(trapPC)) {
282 trapPCIsVMTrap = true;
283 // At the codeBlock trap point, we're guaranteed that:
284 // 1. the pc is not in the middle of any range of JIT code which invalidation points
285 // may write over. Hence, it's now safe to patch those invalidation points and
286 // jettison the codeBlocks.
287 // 2. The top frame must be an optimized JS frame.
288 ASSERT(codeBlock == topCallFrame->codeBlock());
289 codeBlock->jettison(Profiler::JettisonDueToVMTraps);
293 return false; // Not found yet.
299 invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
301 // Re-run the trapping instruction now that we've patched it with the invalidation
302 // OSR exit off-ramp.
303 context.adjustPCToPointToTrappingInstruction();
307 void VMTraps::invalidateCodeBlocksOnStack()
309 invalidateCodeBlocksOnStack(vm().topCallFrame);
312 void VMTraps::invalidateCodeBlocksOnStack(ExecState* topCallFrame)
314 auto codeBlockSetLocker = holdLock(vm().heap.codeBlockSet().getLock());
315 invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
318 void VMTraps::invalidateCodeBlocksOnStack(Locker<Lock>&, ExecState* topCallFrame)
320 if (!m_needToInvalidatedCodeBlocks)
323 m_needToInvalidatedCodeBlocks = false;
325 VMEntryFrame* vmEntryFrame = vm().topVMEntryFrame;
326 CallFrame* callFrame = topCallFrame;
329 return; // Not running JS code. Nothing to invalidate.
332 CodeBlock* codeBlock = callFrame->codeBlock();
333 if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()))
334 codeBlock->jettison(Profiler::JettisonDueToVMTraps);
335 callFrame = callFrame->callerFrame(vmEntryFrame);
339 #endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
343 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
344 if (!Options::usePollingTraps()) {
345 static std::once_flag once;
346 std::call_once(once, [] {
347 installSignalHandler();
353 void VMTraps::willDestroyVM()
355 m_isShuttingDown = true;
356 WTF::storeStoreFence();
357 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
358 while (!m_signalSenders.isEmpty()) {
359 RefPtr<SignalSender> sender;
361 // We don't want to be holding the VMTraps lock when calling
362 // SignalSender::willDestroyVM() because SignalSender::willDestroyVM()
363 // will acquire the SignalSender lock, and SignalSender::send() needs
364 // to acquire these locks in the opposite order.
365 auto locker = holdLock(m_lock);
366 sender = m_signalSenders.takeAny();
370 sender->willDestroyVM();
372 ASSERT(m_signalSenders.isEmpty());
376 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
377 void VMTraps::addSignalSender(VMTraps::SignalSender* sender)
379 auto locker = holdLock(m_lock);
380 m_signalSenders.add(sender);
383 void VMTraps::removeSignalSender(VMTraps::SignalSender* sender)
385 auto locker = holdLock(m_lock);
386 m_signalSenders.remove(sender);
389 void VMTraps::SignalSender::willDestroyVM()
391 auto locker = holdLock(m_lock);
395 void VMTraps::SignalSender::send()
398 // We need a nested scope so that we'll release the lock before we sleep below.
400 auto locker = holdLock(m_lock);
405 auto optionalOwnerThread = vm.ownerThread();
406 if (optionalOwnerThread) {
407 sendMessage(*optionalOwnerThread.value().get(), [] (PlatformRegisters& registers) -> void {
408 SignalContext context(registers);
409 auto activeVMAndStackBounds = findActiveVMAndStackBounds(context);
410 if (activeVMAndStackBounds) {
411 VM* vm = activeVMAndStackBounds.value().vm;
413 StackBounds stackBounds = activeVMAndStackBounds.value().stackBounds;
414 VMTraps& traps = vm->traps();
415 if (traps.needTrapHandling())
416 traps.tryInstallTrapBreakpoints(context, stackBounds);
423 if (vmIsInactive(vm))
426 VMTraps::Mask mask(m_eventType);
427 if (!vm.needTrapHandling(mask))
434 auto locker = holdLock(m_lock);
436 m_vm->traps().removeSignalSender(this);
438 #endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
440 void VMTraps::fireTrap(VMTraps::EventType eventType)
442 ASSERT(!vm().currentThreadIsHoldingAPILock());
444 auto locker = holdLock(m_lock);
445 ASSERT(!m_isShuttingDown);
446 setTrapForEvent(locker, eventType);
447 m_needToInvalidatedCodeBlocks = true;
450 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
451 if (!Options::usePollingTraps()) {
452 // sendSignal() can loop until it has confirmation that the mutator thread
453 // has received the trap request. We'll call it from another trap so that
454 // fireTrap() does not block.
455 RefPtr<SignalSender> sender = adoptRef(new SignalSender(vm(), eventType));
456 addSignalSender(sender.get());
457 Thread::create("jsc.vmtraps.signalling.thread", [sender] {
464 void VMTraps::handleTraps(ExecState* exec, VMTraps::Mask mask)
467 auto scope = DECLARE_THROW_SCOPE(vm);
469 ASSERT(needTrapHandling(mask));
470 while (needTrapHandling(mask)) {
471 auto eventType = takeTopPriorityTrap(mask);
473 case NeedDebuggerBreak:
474 dataLog("VM ", RawPointer(&vm), " on pid ", getCurrentProcessID(), " received NeedDebuggerBreak trap\n");
475 invalidateCodeBlocksOnStack(exec);
478 case NeedWatchdogCheck:
479 ASSERT(vm.watchdog());
480 if (LIKELY(!vm.watchdog()->shouldTerminate(exec)))
484 case NeedTermination:
485 invalidateCodeBlocksOnStack(exec);
486 throwException(exec, scope, createTerminatedExecutionException(&vm));
490 RELEASE_ASSERT_NOT_REACHED();
495 auto VMTraps::takeTopPriorityTrap(VMTraps::Mask mask) -> EventType
497 auto locker = holdLock(m_lock);
498 for (int i = 0; i < NumberOfEventTypes; ++i) {
499 EventType eventType = static_cast<EventType>(i);
500 if (hasTrapForEvent(locker, eventType, mask)) {
501 clearTrapForEvent(locker, eventType);