[Win] jsc.exe sometimes never exits.
[WebKit-https.git] / Source / JavaScriptCore / heap / MachineStackMarker.cpp
1 /*
2  *  Copyright (C) 2003-2009, 2015-2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *  Copyright (C) 2009 Acision BV. All rights reserved.
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation; either
9  *  version 2 of the License, or (at your option) any later version.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21
22 #include "config.h"
23 #include "MachineStackMarker.h"
24
25 #include "ConservativeRoots.h"
26 #include "GPRInfo.h"
27 #include "Heap.h"
28 #include "JSArray.h"
29 #include "JSCInlines.h"
30 #include "LLIntPCRanges.h"
31 #include "MacroAssembler.h"
32 #include "VM.h"
33 #include <setjmp.h>
34 #include <stdlib.h>
35 #include <wtf/MainThread.h>
36 #include <wtf/StdLibExtras.h>
37
38 #if OS(DARWIN)
39
40 #include <mach/mach_init.h>
41 #include <mach/mach_port.h>
42 #include <mach/task.h>
43 #include <mach/thread_act.h>
44 #include <mach/vm_map.h>
45
46 #elif OS(WINDOWS)
47
48 #include <windows.h>
49 #include <malloc.h>
50
51 #elif OS(UNIX)
52
53 #include <sys/mman.h>
54 #include <unistd.h>
55
56 #if OS(SOLARIS)
57 #include <thread.h>
58 #else
59 #include <pthread.h>
60 #endif
61
62 #if HAVE(PTHREAD_NP_H)
63 #include <pthread_np.h>
64 #endif
65
66 #if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
67 #include <signal.h>
68
69 // We use SIGUSR2 to suspend and resume machine threads in JavaScriptCore.
70 static const int SigThreadSuspendResume = SIGUSR2;
71 static StaticLock globalSignalLock;
72 thread_local static std::atomic<JSC::MachineThreads::Thread*> threadLocalCurrentThread;
73
74 static void pthreadSignalHandlerSuspendResume(int, siginfo_t*, void* ucontext)
75 {
76     // Touching thread local atomic types from signal handlers is allowed.
77     JSC::MachineThreads::Thread* thread = threadLocalCurrentThread.load();
78
79     if (thread->suspended.load(std::memory_order_acquire)) {
80         // This is signal handler invocation that is intended to be used to resume sigsuspend.
81         // So this handler invocation itself should not process.
82         //
83         // When signal comes, first, the system calls signal handler. And later, sigsuspend will be resumed. Signal handler invocation always precedes.
84         // So, the problem never happens that suspended.store(true, ...) will be executed before the handler is called.
85         // http://pubs.opengroup.org/onlinepubs/009695399/functions/sigsuspend.html
86         return;
87     }
88
89     ucontext_t* userContext = static_cast<ucontext_t*>(ucontext);
90 #if CPU(PPC)
91     thread->suspendedMachineContext = *userContext->uc_mcontext.uc_regs;
92 #else
93     thread->suspendedMachineContext = userContext->uc_mcontext;
94 #endif
95
96     // Allow suspend caller to see that this thread is suspended.
97     // sem_post is async-signal-safe function. It means that we can call this from a signal handler.
98     // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03
99     //
100     // And sem_post emits memory barrier that ensures that suspendedMachineContext is correctly saved.
101     // http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_11
102     sem_post(&thread->semaphoreForSuspendResume);
103
104     // Reaching here, SigThreadSuspendResume is blocked in this handler (this is configured by sigaction's sa_mask).
105     // So before calling sigsuspend, SigThreadSuspendResume to this thread is deferred. This ensures that the handler is not executed recursively.
106     sigset_t blockedSignalSet;
107     sigfillset(&blockedSignalSet);
108     sigdelset(&blockedSignalSet, SigThreadSuspendResume);
109     sigsuspend(&blockedSignalSet);
110
111     // Allow resume caller to see that this thread is resumed.
112     sem_post(&thread->semaphoreForSuspendResume);
113 }
114 #endif // USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
115
116 #endif
117
118 using namespace WTF;
119
120 namespace JSC {
121
122 using Thread = MachineThreads::Thread;
123
124 class ActiveMachineThreadsManager;
125 static ActiveMachineThreadsManager& activeMachineThreadsManager();
126
127 class ActiveMachineThreadsManager {
128     WTF_MAKE_NONCOPYABLE(ActiveMachineThreadsManager);
129 public:
130
131     class Locker {
132     public:
133         Locker(ActiveMachineThreadsManager& manager)
134             : m_locker(manager.m_lock)
135         {
136         }
137
138     private:
139         LockHolder m_locker;
140     };
141
142     void add(MachineThreads* machineThreads)
143     {
144         LockHolder managerLock(m_lock);
145         m_set.add(machineThreads);
146     }
147
148     void THREAD_SPECIFIC_CALL remove(MachineThreads* machineThreads)
149     {
150         LockHolder managerLock(m_lock);
151         auto recordedMachineThreads = m_set.take(machineThreads);
152         RELEASE_ASSERT(recordedMachineThreads = machineThreads);
153     }
154
155     bool contains(MachineThreads* machineThreads)
156     {
157         return m_set.contains(machineThreads);
158     }
159
160 private:
161     typedef HashSet<MachineThreads*> MachineThreadsSet;
162
163     ActiveMachineThreadsManager() { }
164     
165     Lock m_lock;
166     MachineThreadsSet m_set;
167
168     friend ActiveMachineThreadsManager& activeMachineThreadsManager();
169 };
170
171 static ActiveMachineThreadsManager& activeMachineThreadsManager()
172 {
173     static std::once_flag initializeManagerOnceFlag;
174     static ActiveMachineThreadsManager* manager = nullptr;
175
176     std::call_once(initializeManagerOnceFlag, [] {
177         manager = new ActiveMachineThreadsManager();
178     });
179     return *manager;
180 }
181     
182 static inline PlatformThread getCurrentPlatformThread()
183 {
184 #if OS(DARWIN)
185     return pthread_mach_thread_np(pthread_self());
186 #elif OS(WINDOWS)
187     return GetCurrentThreadId();
188 #elif USE(PTHREADS)
189     return pthread_self();
190 #endif
191 }
192
193 MachineThreads::MachineThreads(Heap* heap)
194     : m_registeredThreads(0)
195     , m_threadSpecificForMachineThreads(0)
196 #if !ASSERT_DISABLED
197     , m_heap(heap)
198 #endif
199 {
200     UNUSED_PARAM(heap);
201     threadSpecificKeyCreate(&m_threadSpecificForMachineThreads, removeThread);
202     activeMachineThreadsManager().add(this);
203 }
204
205 MachineThreads::~MachineThreads()
206 {
207     activeMachineThreadsManager().remove(this);
208     threadSpecificKeyDelete(m_threadSpecificForMachineThreads);
209
210     LockHolder registeredThreadsLock(m_registeredThreadsMutex);
211     for (Thread* t = m_registeredThreads; t;) {
212         Thread* next = t->next;
213         delete t;
214         t = next;
215     }
216 }
217
218 Thread* MachineThreads::Thread::createForCurrentThread()
219 {
220     auto stackBounds = wtfThreadData().stack();
221     return new Thread(getCurrentPlatformThread(), stackBounds.origin(), stackBounds.end());
222 }
223
224 bool MachineThreads::Thread::operator==(const PlatformThread& other) const
225 {
226 #if OS(DARWIN) || OS(WINDOWS)
227     return platformThread == other;
228 #elif USE(PTHREADS)
229     return !!pthread_equal(platformThread, other);
230 #else
231 #error Need a way to compare threads on this platform
232 #endif
233 }
234
235 void MachineThreads::addCurrentThread()
236 {
237     ASSERT(!m_heap->vm()->hasExclusiveThread() || m_heap->vm()->exclusiveThread() == std::this_thread::get_id());
238
239     if (threadSpecificGet(m_threadSpecificForMachineThreads)) {
240 #ifndef NDEBUG
241         LockHolder lock(m_registeredThreadsMutex);
242         ASSERT(threadSpecificGet(m_threadSpecificForMachineThreads) == this);
243 #endif
244         return;
245     }
246
247     Thread* thread = Thread::createForCurrentThread();
248     threadSpecificSet(m_threadSpecificForMachineThreads, this);
249
250     LockHolder lock(m_registeredThreadsMutex);
251
252     thread->next = m_registeredThreads;
253     m_registeredThreads = thread;
254 }
255
256 Thread* MachineThreads::machineThreadForCurrentThread()
257 {
258     LockHolder lock(m_registeredThreadsMutex);
259     PlatformThread platformThread = getCurrentPlatformThread();
260     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
261         if (*thread == platformThread)
262             return thread;
263     }
264
265     RELEASE_ASSERT_NOT_REACHED();
266     return nullptr;
267 }
268
269 void THREAD_SPECIFIC_CALL MachineThreads::removeThread(void* p)
270 {
271     auto& manager = activeMachineThreadsManager();
272     ActiveMachineThreadsManager::Locker lock(manager);
273     auto machineThreads = static_cast<MachineThreads*>(p);
274     if (manager.contains(machineThreads)) {
275         // There's a chance that the MachineThreads registry that this thread
276         // was registered with was already destructed, and another one happened
277         // to be instantiated at the same address. Hence, this thread may or
278         // may not be found in this MachineThreads registry. We only need to
279         // do a removal if this thread is found in it.
280
281 #if PLATFORM(WIN)
282         // On Windows the thread specific destructor is also called when the
283         // main thread is exiting. This may lead to the main thread waiting
284         // forever for the machine thread lock when exiting, if the sampling
285         // profiler thread was terminated by the system while holding the
286         // machine thread lock.
287         if (WTF::isMainThread())
288             return;
289 #endif
290
291         machineThreads->removeThreadIfFound(getCurrentPlatformThread());
292     }
293 }
294
295 template<typename PlatformThread>
296 void MachineThreads::removeThreadIfFound(PlatformThread platformThread)
297 {
298     LockHolder lock(m_registeredThreadsMutex);
299     Thread* t = m_registeredThreads;
300     if (*t == platformThread) {
301         m_registeredThreads = m_registeredThreads->next;
302         delete t;
303     } else {
304         Thread* last = m_registeredThreads;
305         for (t = m_registeredThreads->next; t; t = t->next) {
306             if (*t == platformThread) {
307                 last->next = t->next;
308                 break;
309             }
310             last = t;
311         }
312         delete t;
313     }
314 }
315
316 MachineThreads::Thread::Thread(const PlatformThread& platThread, void* base, void* end)
317     : platformThread(platThread)
318     , stackBase(base)
319     , stackEnd(end)
320 {
321 #if OS(WINDOWS)
322     ASSERT(platformThread == GetCurrentThreadId());
323     bool isSuccessful =
324         DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
325             &platformThreadHandle, 0, FALSE, DUPLICATE_SAME_ACCESS);
326     RELEASE_ASSERT(isSuccessful);
327 #elif USE(PTHREADS) && !OS(DARWIN)
328     threadLocalCurrentThread.store(this);
329
330     // Signal handlers are process global configuration.
331     static std::once_flag initializeSignalHandler;
332     std::call_once(initializeSignalHandler, [] {
333         // Intentionally block SigThreadSuspendResume in the handler.
334         // SigThreadSuspendResume will be allowed in the handler by sigsuspend.
335         struct sigaction action;
336         sigemptyset(&action.sa_mask);
337         sigaddset(&action.sa_mask, SigThreadSuspendResume);
338
339         action.sa_sigaction = pthreadSignalHandlerSuspendResume;
340         action.sa_flags = SA_RESTART | SA_SIGINFO;
341         sigaction(SigThreadSuspendResume, &action, 0);
342     });
343
344     sigset_t mask;
345     sigemptyset(&mask);
346     sigaddset(&mask, SigThreadSuspendResume);
347     pthread_sigmask(SIG_UNBLOCK, &mask, 0);
348
349     sem_init(&semaphoreForSuspendResume, /* Only available in this process. */ 0, /* Initial value for the semaphore. */ 0);
350 #endif
351 }
352
353 MachineThreads::Thread::~Thread()
354 {
355 #if OS(WINDOWS)
356     CloseHandle(platformThreadHandle);
357 #elif USE(PTHREADS) && !OS(DARWIN)
358     sem_destroy(&semaphoreForSuspendResume);
359 #endif
360 }
361
362 bool MachineThreads::Thread::suspend()
363 {
364 #if OS(DARWIN)
365     kern_return_t result = thread_suspend(platformThread);
366     return result == KERN_SUCCESS;
367 #elif OS(WINDOWS)
368     bool threadIsSuspended = (SuspendThread(platformThreadHandle) != (DWORD)-1);
369     ASSERT(threadIsSuspended);
370     return threadIsSuspended;
371 #elif USE(PTHREADS)
372     ASSERT_WITH_MESSAGE(getCurrentPlatformThread() != platformThread, "Currently we don't support suspend the current thread itself.");
373     {
374         // During suspend, suspend or resume should not be executed from the other threads.
375         // We use global lock instead of per thread lock.
376         // Consider the following case, there are threads A and B.
377         // And A attempt to suspend B and B attempt to suspend A.
378         // A and B send signals. And later, signals are delivered to A and B.
379         // In that case, both will be suspended.
380         LockHolder lock(globalSignalLock);
381         if (!suspendCount) {
382             // Ideally, we would like to use pthread_sigqueue. It allows us to pass the argument to the signal handler.
383             // But it can be used in a few platforms, like Linux.
384             // Instead, we use Thread* stored in the thread local storage to pass it to the signal handler.
385             if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
386                 return false;
387             sem_wait(&semaphoreForSuspendResume);
388             // Release barrier ensures that this operation is always executed after all the above processing is done.
389             suspended.store(true, std::memory_order_release);
390         }
391         ++suspendCount;
392     }
393     return true;
394 #else
395 #error Need a way to suspend threads on this platform
396 #endif
397 }
398
399 void MachineThreads::Thread::resume()
400 {
401 #if OS(DARWIN)
402     thread_resume(platformThread);
403 #elif OS(WINDOWS)
404     ResumeThread(platformThreadHandle);
405 #elif USE(PTHREADS)
406     {
407         // During resume, suspend or resume should not be executed from the other threads.
408         LockHolder lock(globalSignalLock);
409         if (suspendCount == 1) {
410             // When allowing SigThreadSuspendResume interrupt in the signal handler by sigsuspend and SigThreadSuspendResume is actually issued,
411             // the signal handler itself will be called once again.
412             // There are several ways to distinguish the handler invocation for suspend and resume.
413             // 1. Use different signal numbers. And check the signal number in the handler.
414             // 2. Use some arguments to distinguish suspend and resume in the handler. If pthread_sigqueue can be used, we can take this.
415             // 3. Use thread local storage with atomic variables in the signal handler.
416             // In this implementaiton, we take (3). suspended flag is used to distinguish it.
417             if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
418                 return;
419             sem_wait(&semaphoreForSuspendResume);
420             // Release barrier ensures that this operation is always executed after all the above processing is done.
421             suspended.store(false, std::memory_order_release);
422         }
423         --suspendCount;
424     }
425 #else
426 #error Need a way to resume threads on this platform
427 #endif
428 }
429
430 size_t MachineThreads::Thread::getRegisters(Thread::Registers& registers)
431 {
432     Thread::Registers::PlatformRegisters& regs = registers.regs;
433 #if OS(DARWIN)
434 #if CPU(X86)
435     unsigned user_count = sizeof(regs)/sizeof(int);
436     thread_state_flavor_t flavor = i386_THREAD_STATE;
437 #elif CPU(X86_64)
438     unsigned user_count = x86_THREAD_STATE64_COUNT;
439     thread_state_flavor_t flavor = x86_THREAD_STATE64;
440 #elif CPU(PPC) 
441     unsigned user_count = PPC_THREAD_STATE_COUNT;
442     thread_state_flavor_t flavor = PPC_THREAD_STATE;
443 #elif CPU(PPC64)
444     unsigned user_count = PPC_THREAD_STATE64_COUNT;
445     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
446 #elif CPU(ARM)
447     unsigned user_count = ARM_THREAD_STATE_COUNT;
448     thread_state_flavor_t flavor = ARM_THREAD_STATE;
449 #elif CPU(ARM64)
450     unsigned user_count = ARM_THREAD_STATE64_COUNT;
451     thread_state_flavor_t flavor = ARM_THREAD_STATE64;
452 #else
453 #error Unknown Architecture
454 #endif
455
456     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
457     if (result != KERN_SUCCESS) {
458         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 
459                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
460         CRASH();
461     }
462     return user_count * sizeof(uintptr_t);
463 // end OS(DARWIN)
464
465 #elif OS(WINDOWS)
466     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
467     GetThreadContext(platformThreadHandle, &regs);
468     return sizeof(CONTEXT);
469 #elif USE(PTHREADS)
470     pthread_attr_init(&regs.attribute);
471 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
472 #if !OS(OPENBSD)
473     // e.g. on FreeBSD 5.4, neundorf@kde.org
474     pthread_attr_get_np(platformThread, &regs.attribute);
475 #endif
476 #else
477     // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
478     pthread_getattr_np(platformThread, &regs.attribute);
479 #endif
480     regs.machineContext = suspendedMachineContext;
481     return 0;
482 #else
483 #error Need a way to get thread registers on this platform
484 #endif
485 }
486
487 void* MachineThreads::Thread::Registers::stackPointer() const
488 {
489 #if OS(DARWIN)
490
491 #if __DARWIN_UNIX03
492
493 #if CPU(X86)
494     return reinterpret_cast<void*>(regs.__esp);
495 #elif CPU(X86_64)
496     return reinterpret_cast<void*>(regs.__rsp);
497 #elif CPU(PPC) || CPU(PPC64)
498     return reinterpret_cast<void*>(regs.__r1);
499 #elif CPU(ARM)
500     return reinterpret_cast<void*>(regs.__sp);
501 #elif CPU(ARM64)
502     return reinterpret_cast<void*>(regs.__sp);
503 #else
504 #error Unknown Architecture
505 #endif
506
507 #else // !__DARWIN_UNIX03
508
509 #if CPU(X86)
510     return reinterpret_cast<void*>(regs.esp);
511 #elif CPU(X86_64)
512     return reinterpret_cast<void*>(regs.rsp);
513 #elif CPU(PPC) || CPU(PPC64)
514     return reinterpret_cast<void*>(regs.r1);
515 #else
516 #error Unknown Architecture
517 #endif
518
519 #endif // __DARWIN_UNIX03
520
521 // end OS(DARWIN)
522 #elif OS(WINDOWS)
523
524 #if CPU(ARM)
525     return reinterpret_cast<void*>((uintptr_t) regs.Sp);
526 #elif CPU(MIPS)
527     return reinterpret_cast<void*>((uintptr_t) regs.IntSp);
528 #elif CPU(X86)
529     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
530 #elif CPU(X86_64)
531     return reinterpret_cast<void*>((uintptr_t) regs.Rsp);
532 #else
533 #error Unknown Architecture
534 #endif
535
536 #elif USE(PTHREADS)
537
538 #if OS(FREEBSD) && ENABLE(JIT)
539
540 #if CPU(X86)
541     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esp);
542 #elif CPU(X86_64)
543     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rsp);
544 #elif CPU(ARM)
545     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_SP]);
546 #elif CPU(ARM64)
547     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_sp);
548 #elif CPU(MIPS)
549     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[29]);
550 #else
551 #error Unknown Architecture
552 #endif
553
554 #elif defined(__GLIBC__) && ENABLE(JIT)
555
556 #if CPU(X86)
557     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESP]);
558 #elif CPU(X86_64)
559     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RSP]);
560 #elif CPU(ARM)
561     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_sp);
562 #elif CPU(ARM64)
563     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.sp);
564 #elif CPU(MIPS)
565     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[29]);
566 #else
567 #error Unknown Architecture
568 #endif
569
570 #else
571     void* stackBase = 0;
572     size_t stackSize = 0;
573 #if OS(OPENBSD)
574     stack_t ss;
575     int rc = pthread_stackseg_np(pthread_self(), &ss);
576     stackBase = (void*)((size_t) ss.ss_sp - ss.ss_size);
577     stackSize = ss.ss_size;
578 #else
579     int rc = pthread_attr_getstack(&regs.attribute, &stackBase, &stackSize);
580 #endif
581     (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
582     ASSERT(stackBase);
583     return static_cast<char*>(stackBase) + stackSize;
584 #endif
585
586 #else
587 #error Need a way to get the stack pointer for another thread on this platform
588 #endif
589 }
590
591 #if ENABLE(SAMPLING_PROFILER)
592 void* MachineThreads::Thread::Registers::framePointer() const
593 {
594 #if OS(DARWIN)
595
596 #if __DARWIN_UNIX03
597
598 #if CPU(X86)
599     return reinterpret_cast<void*>(regs.__ebp);
600 #elif CPU(X86_64)
601     return reinterpret_cast<void*>(regs.__rbp);
602 #elif CPU(ARM)
603     return reinterpret_cast<void*>(regs.__r[11]);
604 #elif CPU(ARM64)
605     return reinterpret_cast<void*>(regs.__x[29]);
606 #else
607 #error Unknown Architecture
608 #endif
609
610 #else // !__DARWIN_UNIX03
611
612 #if CPU(X86)
613     return reinterpret_cast<void*>(regs.esp);
614 #elif CPU(X86_64)
615     return reinterpret_cast<void*>(regs.rsp);
616 #else
617 #error Unknown Architecture
618 #endif
619
620 #endif // __DARWIN_UNIX03
621
622 // end OS(DARWIN)
623 #elif OS(WINDOWS)
624
625 #if CPU(ARM)
626     return reinterpret_cast<void*>((uintptr_t) regs.R11);
627 #elif CPU(MIPS)
628 #error Dont know what to do with mips. Do we even need this?
629 #elif CPU(X86)
630     return reinterpret_cast<void*>((uintptr_t) regs.Ebp);
631 #elif CPU(X86_64)
632     return reinterpret_cast<void*>((uintptr_t) regs.Rbp);
633 #else
634 #error Unknown Architecture
635 #endif
636
637 #elif OS(FREEBSD)
638
639 #if CPU(X86)
640     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_ebp);
641 #elif CPU(X86_64)
642     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rbp);
643 #elif CPU(ARM)
644     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_FP]);
645 #elif CPU(ARM64)
646     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[29]);
647 #elif CPU(MIPS)
648     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[30]);
649 #else
650 #error Unknown Architecture
651 #endif
652
653 #elif defined(__GLIBC__)
654
655 // The following sequence depends on glibc's sys/ucontext.h.
656 #if CPU(X86)
657     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EBP]);
658 #elif CPU(X86_64)
659     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RBP]);
660 #elif CPU(ARM)
661     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_fp);
662 #elif CPU(ARM64)
663     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[29]);
664 #elif CPU(MIPS)
665     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[30]);
666 #else
667 #error Unknown Architecture
668 #endif
669
670 #else
671 #error Need a way to get the frame pointer for another thread on this platform
672 #endif
673 }
674
675 void* MachineThreads::Thread::Registers::instructionPointer() const
676 {
677 #if OS(DARWIN)
678
679 #if __DARWIN_UNIX03
680
681 #if CPU(X86)
682     return reinterpret_cast<void*>(regs.__eip);
683 #elif CPU(X86_64)
684     return reinterpret_cast<void*>(regs.__rip);
685 #elif CPU(ARM)
686     return reinterpret_cast<void*>(regs.__pc);
687 #elif CPU(ARM64)
688     return reinterpret_cast<void*>(regs.__pc);
689 #else
690 #error Unknown Architecture
691 #endif
692
693 #else // !__DARWIN_UNIX03
694 #if CPU(X86)
695     return reinterpret_cast<void*>(regs.eip);
696 #elif CPU(X86_64)
697     return reinterpret_cast<void*>(regs.rip);
698 #else
699 #error Unknown Architecture
700 #endif
701
702 #endif // __DARWIN_UNIX03
703
704 // end OS(DARWIN)
705 #elif OS(WINDOWS)
706
707 #if CPU(ARM)
708     return reinterpret_cast<void*>((uintptr_t) regs.Pc);
709 #elif CPU(MIPS)
710 #error Dont know what to do with mips. Do we even need this?
711 #elif CPU(X86)
712     return reinterpret_cast<void*>((uintptr_t) regs.Eip);
713 #elif CPU(X86_64)
714     return reinterpret_cast<void*>((uintptr_t) regs.Rip);
715 #else
716 #error Unknown Architecture
717 #endif
718
719 #elif OS(FREEBSD)
720
721 #if CPU(X86)
722     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_eip);
723 #elif CPU(X86_64)
724     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rip);
725 #elif CPU(ARM)
726     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_PC]);
727 #elif CPU(ARM64)
728     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_elr);
729 #elif CPU(MIPS)
730     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_pc);
731 #else
732 #error Unknown Architecture
733 #endif
734
735 #elif defined(__GLIBC__)
736
737 // The following sequence depends on glibc's sys/ucontext.h.
738 #if CPU(X86)
739     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EIP]);
740 #elif CPU(X86_64)
741     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RIP]);
742 #elif CPU(ARM)
743     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_pc);
744 #elif CPU(ARM64)
745     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
746 #elif CPU(MIPS)
747     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
748 #else
749 #error Unknown Architecture
750 #endif
751
752 #else
753 #error Need a way to get the instruction pointer for another thread on this platform
754 #endif
755 }
756 void* MachineThreads::Thread::Registers::llintPC() const
757 {
758     // LLInt uses regT4 as PC.
759 #if OS(DARWIN)
760
761 #if __DARWIN_UNIX03
762
763 #if CPU(X86)
764     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
765     return reinterpret_cast<void*>(regs.__esi);
766 #elif CPU(X86_64)
767     static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
768     return reinterpret_cast<void*>(regs.__r8);
769 #elif CPU(ARM)
770     static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
771     return reinterpret_cast<void*>(regs.__r[8]);
772 #elif CPU(ARM64)
773     static_assert(LLInt::LLIntPC == ARM64Registers::x4, "Wrong LLInt PC.");
774     return reinterpret_cast<void*>(regs.__x[4]);
775 #else
776 #error Unknown Architecture
777 #endif
778
779 #else // !__DARWIN_UNIX03
780 #if CPU(X86)
781     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
782     return reinterpret_cast<void*>(regs.esi);
783 #elif CPU(X86_64)
784     static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
785     return reinterpret_cast<void*>(regs.r8);
786 #else
787 #error Unknown Architecture
788 #endif
789
790 #endif // __DARWIN_UNIX03
791
792 // end OS(DARWIN)
793 #elif OS(WINDOWS)
794
795 #if CPU(ARM)
796     static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
797     return reinterpret_cast<void*>((uintptr_t) regs.R8);
798 #elif CPU(MIPS)
799 #error Dont know what to do with mips. Do we even need this?
800 #elif CPU(X86)
801     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
802     return reinterpret_cast<void*>((uintptr_t) regs.Esi);
803 #elif CPU(X86_64)
804     static_assert(LLInt::LLIntPC == X86Registers::r10, "Wrong LLInt PC.");
805     return reinterpret_cast<void*>((uintptr_t) regs.R10);
806 #else
807 #error Unknown Architecture
808 #endif
809
810 #elif OS(FREEBSD)
811
812 #if CPU(X86)
813     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esi);
814 #elif CPU(X86_64)
815     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_r8);
816 #elif CPU(ARM)
817     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_R8]);
818 #elif CPU(ARM64)
819     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[4]);
820 #elif CPU(MIPS)
821     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[12]);
822 #else
823 #error Unknown Architecture
824 #endif
825
826 #elif defined(__GLIBC__)
827
828 // The following sequence depends on glibc's sys/ucontext.h.
829 #if CPU(X86)
830     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESI]);
831 #elif CPU(X86_64)
832     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_R8]);
833 #elif CPU(ARM)
834     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_r8);
835 #elif CPU(ARM64)
836     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[4]);
837 #elif CPU(MIPS)
838     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[12]);
839 #else
840 #error Unknown Architecture
841 #endif
842
843 #else
844 #error Need a way to get the LLIntPC for another thread on this platform
845 #endif
846 }
847 #endif // ENABLE(SAMPLING_PROFILER)
848
849 void MachineThreads::Thread::freeRegisters(Thread::Registers& registers)
850 {
851     Thread::Registers::PlatformRegisters& regs = registers.regs;
852 #if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
853     pthread_attr_destroy(&regs.attribute);
854 #else
855     UNUSED_PARAM(regs);
856 #endif
857 }
858
859 static inline int osRedZoneAdjustment()
860 {
861     int redZoneAdjustment = 0;
862 #if !OS(WINDOWS)
863 #if CPU(X86_64)
864     // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
865     redZoneAdjustment = -128;
866 #elif CPU(ARM64)
867     // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
868     redZoneAdjustment = -128;
869 #endif
870 #endif // !OS(WINDOWS)
871     return redZoneAdjustment;
872 }
873
874 std::pair<void*, size_t> MachineThreads::Thread::captureStack(void* stackTop)
875 {
876     char* begin = reinterpret_cast_ptr<char*>(stackBase);
877     char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
878     ASSERT(begin >= end);
879
880     char* endWithRedZone = end + osRedZoneAdjustment();
881     ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
882
883     if (endWithRedZone < stackEnd)
884         endWithRedZone = reinterpret_cast_ptr<char*>(stackEnd);
885
886     std::swap(begin, endWithRedZone);
887     return std::make_pair(begin, endWithRedZone - begin);
888 }
889
890 SUPPRESS_ASAN
891 static void copyMemory(void* dst, const void* src, size_t size)
892 {
893     size_t dstAsSize = reinterpret_cast<size_t>(dst);
894     size_t srcAsSize = reinterpret_cast<size_t>(src);
895     RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
896     RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
897     RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
898
899     intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
900     const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
901     size /= sizeof(intptr_t);
902     while (size--)
903         *dstPtr++ = *srcPtr++;
904 }
905     
906
907
908 // This function must not call malloc(), free(), or any other function that might
909 // acquire a lock. Since 'thread' is suspended, trying to acquire a lock
910 // will deadlock if 'thread' holds that lock.
911 // This function, specifically the memory copying, was causing problems with Address Sanitizer in
912 // apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
913 // copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
914 // significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
915 // operation. As the heap is generally much larger than the stack the performance hit is minimal.
916 // See: https://bugs.webkit.org/show_bug.cgi?id=146297
917 void MachineThreads::tryCopyOtherThreadStack(Thread* thread, void* buffer, size_t capacity, size_t* size)
918 {
919     Thread::Registers registers;
920     size_t registersSize = thread->getRegisters(registers);
921     std::pair<void*, size_t> stack = thread->captureStack(registers.stackPointer());
922
923     bool canCopy = *size + registersSize + stack.second <= capacity;
924
925     if (canCopy)
926         copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
927     *size += registersSize;
928
929     if (canCopy)
930         copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
931     *size += stack.second;
932
933     thread->freeRegisters(registers);
934 }
935
936 bool MachineThreads::tryCopyOtherThreadStacks(LockHolder&, void* buffer, size_t capacity, size_t* size)
937 {
938     // Prevent two VMs from suspending each other's threads at the same time,
939     // which can cause deadlock: <rdar://problem/20300842>.
940     static StaticLock mutex;
941     std::lock_guard<StaticLock> lock(mutex);
942
943     *size = 0;
944
945     PlatformThread currentPlatformThread = getCurrentPlatformThread();
946     int numberOfThreads = 0; // Using 0 to denote that we haven't counted the number of threads yet.
947     int index = 1;
948     Thread* threadsToBeDeleted = nullptr;
949
950     Thread* previousThread = nullptr;
951     for (Thread* thread = m_registeredThreads; thread; index++) {
952         if (*thread != currentPlatformThread) {
953             bool success = thread->suspend();
954 #if OS(DARWIN)
955             if (!success) {
956                 if (!numberOfThreads) {
957                     for (Thread* countedThread = m_registeredThreads; countedThread; countedThread = countedThread->next)
958                         numberOfThreads++;
959                 }
960                 
961                 // Re-do the suspension to get the actual failure result for logging.
962                 kern_return_t error = thread_suspend(thread->platformThread);
963                 ASSERT(error != KERN_SUCCESS);
964
965                 WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
966                     "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p] platformThread %p.",
967                     error, index, numberOfThreads, thread, reinterpret_cast<void*>(thread->platformThread));
968
969                 // Put the invalid thread on the threadsToBeDeleted list.
970                 // We can't just delete it here because we have suspended other
971                 // threads, and they may still be holding the C heap lock which
972                 // we need for deleting the invalid thread. Hence, we need to
973                 // defer the deletion till after we have resumed all threads.
974                 Thread* nextThread = thread->next;
975                 thread->next = threadsToBeDeleted;
976                 threadsToBeDeleted = thread;
977
978                 if (previousThread)
979                     previousThread->next = nextThread;
980                 else
981                     m_registeredThreads = nextThread;
982                 thread = nextThread;
983                 continue;
984             }
985 #else
986             UNUSED_PARAM(numberOfThreads);
987             UNUSED_PARAM(previousThread);
988             ASSERT_UNUSED(success, success);
989 #endif
990         }
991         previousThread = thread;
992         thread = thread->next;
993     }
994
995     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
996         if (*thread != currentPlatformThread)
997             tryCopyOtherThreadStack(thread, buffer, capacity, size);
998     }
999
1000     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
1001         if (*thread != currentPlatformThread)
1002             thread->resume();
1003     }
1004
1005     for (Thread* thread = threadsToBeDeleted; thread; ) {
1006         Thread* nextThread = thread->next;
1007         delete thread;
1008         thread = nextThread;
1009     }
1010     
1011     return *size <= capacity;
1012 }
1013
1014 static void growBuffer(size_t size, void** buffer, size_t* capacity)
1015 {
1016     if (*buffer)
1017         fastFree(*buffer);
1018
1019     *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
1020     *buffer = fastMalloc(*capacity);
1021 }
1022
1023 void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
1024 {
1025     size_t size;
1026     size_t capacity = 0;
1027     void* buffer = nullptr;
1028     LockHolder lock(m_registeredThreadsMutex);
1029     while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
1030         growBuffer(size, &buffer, &capacity);
1031
1032     if (!buffer)
1033         return;
1034
1035     conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
1036     fastFree(buffer);
1037 }
1038
1039 } // namespace JSC