Unreviewed, rolling out r209766.
[WebKit.git] / Source / JavaScriptCore / heap / MachineStackMarker.cpp
1 /*
2  *  Copyright (C) 2003-2009, 2015-2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *  Copyright (C) 2009 Acision BV. All rights reserved.
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation; either
9  *  version 2 of the License, or (at your option) any later version.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21
22 #include "config.h"
23 #include "MachineStackMarker.h"
24
25 #include "ConservativeRoots.h"
26 #include "GPRInfo.h"
27 #include "Heap.h"
28 #include "JSArray.h"
29 #include "JSCInlines.h"
30 #include "LLIntPCRanges.h"
31 #include "MacroAssembler.h"
32 #include "VM.h"
33 #include <setjmp.h>
34 #include <stdlib.h>
35 #include <wtf/StdLibExtras.h>
36
37 #if OS(DARWIN)
38
39 #include <mach/mach_init.h>
40 #include <mach/mach_port.h>
41 #include <mach/task.h>
42 #include <mach/thread_act.h>
43 #include <mach/vm_map.h>
44
45 #elif OS(WINDOWS)
46
47 #include <windows.h>
48 #include <malloc.h>
49
50 #elif OS(UNIX)
51
52 #include <sys/mman.h>
53 #include <unistd.h>
54
55 #if OS(SOLARIS)
56 #include <thread.h>
57 #else
58 #include <pthread.h>
59 #endif
60
61 #if HAVE(PTHREAD_NP_H)
62 #include <pthread_np.h>
63 #endif
64
65 #if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
66 #include <signal.h>
67
68 // We use SIGUSR2 to suspend and resume machine threads in JavaScriptCore.
69 static const int SigThreadSuspendResume = SIGUSR2;
70 static StaticLock globalSignalLock;
71 thread_local static std::atomic<JSC::MachineThreads::Thread*> threadLocalCurrentThread;
72
73 static void pthreadSignalHandlerSuspendResume(int, siginfo_t*, void* ucontext)
74 {
75     // Touching thread local atomic types from signal handlers is allowed.
76     JSC::MachineThreads::Thread* thread = threadLocalCurrentThread.load();
77
78     if (thread->suspended.load(std::memory_order_acquire)) {
79         // This is signal handler invocation that is intended to be used to resume sigsuspend.
80         // So this handler invocation itself should not process.
81         //
82         // When signal comes, first, the system calls signal handler. And later, sigsuspend will be resumed. Signal handler invocation always precedes.
83         // So, the problem never happens that suspended.store(true, ...) will be executed before the handler is called.
84         // http://pubs.opengroup.org/onlinepubs/009695399/functions/sigsuspend.html
85         return;
86     }
87
88     ucontext_t* userContext = static_cast<ucontext_t*>(ucontext);
89 #if CPU(PPC)
90     thread->suspendedMachineContext = *userContext->uc_mcontext.uc_regs;
91 #else
92     thread->suspendedMachineContext = userContext->uc_mcontext;
93 #endif
94
95     // Allow suspend caller to see that this thread is suspended.
96     // sem_post is async-signal-safe function. It means that we can call this from a signal handler.
97     // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03
98     //
99     // And sem_post emits memory barrier that ensures that suspendedMachineContext is correctly saved.
100     // http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_11
101     sem_post(&thread->semaphoreForSuspendResume);
102
103     // Reaching here, SigThreadSuspendResume is blocked in this handler (this is configured by sigaction's sa_mask).
104     // So before calling sigsuspend, SigThreadSuspendResume to this thread is deferred. This ensures that the handler is not executed recursively.
105     sigset_t blockedSignalSet;
106     sigfillset(&blockedSignalSet);
107     sigdelset(&blockedSignalSet, SigThreadSuspendResume);
108     sigsuspend(&blockedSignalSet);
109
110     // Allow resume caller to see that this thread is resumed.
111     sem_post(&thread->semaphoreForSuspendResume);
112 }
113 #endif // USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
114
115 #endif
116
117 using namespace WTF;
118
119 namespace JSC {
120
121 using Thread = MachineThreads::Thread;
122
123 class ActiveMachineThreadsManager;
124 static ActiveMachineThreadsManager& activeMachineThreadsManager();
125
126 class ActiveMachineThreadsManager {
127     WTF_MAKE_NONCOPYABLE(ActiveMachineThreadsManager);
128 public:
129
130     class Locker {
131     public:
132         Locker(ActiveMachineThreadsManager& manager)
133             : m_locker(manager.m_lock)
134         {
135         }
136
137     private:
138         LockHolder m_locker;
139     };
140
141     void add(MachineThreads* machineThreads)
142     {
143         LockHolder managerLock(m_lock);
144         m_set.add(machineThreads);
145     }
146
147     void THREAD_SPECIFIC_CALL remove(MachineThreads* machineThreads)
148     {
149         LockHolder managerLock(m_lock);
150         auto recordedMachineThreads = m_set.take(machineThreads);
151         RELEASE_ASSERT(recordedMachineThreads = machineThreads);
152     }
153
154     bool contains(MachineThreads* machineThreads)
155     {
156         return m_set.contains(machineThreads);
157     }
158
159 private:
160     typedef HashSet<MachineThreads*> MachineThreadsSet;
161
162     ActiveMachineThreadsManager() { }
163     
164     Lock m_lock;
165     MachineThreadsSet m_set;
166
167     friend ActiveMachineThreadsManager& activeMachineThreadsManager();
168 };
169
170 static ActiveMachineThreadsManager& activeMachineThreadsManager()
171 {
172     static std::once_flag initializeManagerOnceFlag;
173     static ActiveMachineThreadsManager* manager = nullptr;
174
175     std::call_once(initializeManagerOnceFlag, [] {
176         manager = new ActiveMachineThreadsManager();
177     });
178     return *manager;
179 }
180     
181 static inline PlatformThread getCurrentPlatformThread()
182 {
183 #if OS(DARWIN)
184     return pthread_mach_thread_np(pthread_self());
185 #elif OS(WINDOWS)
186     return GetCurrentThreadId();
187 #elif USE(PTHREADS)
188     return pthread_self();
189 #endif
190 }
191
192 MachineThreads::MachineThreads(Heap* heap)
193     : m_registeredThreads(0)
194     , m_threadSpecificForMachineThreads(0)
195 #if !ASSERT_DISABLED
196     , m_heap(heap)
197 #endif
198 {
199     UNUSED_PARAM(heap);
200     threadSpecificKeyCreate(&m_threadSpecificForMachineThreads, removeThread);
201     activeMachineThreadsManager().add(this);
202 }
203
204 MachineThreads::~MachineThreads()
205 {
206     activeMachineThreadsManager().remove(this);
207     threadSpecificKeyDelete(m_threadSpecificForMachineThreads);
208
209     LockHolder registeredThreadsLock(m_registeredThreadsMutex);
210     for (Thread* t = m_registeredThreads; t;) {
211         Thread* next = t->next;
212         delete t;
213         t = next;
214     }
215 }
216
217 Thread* MachineThreads::Thread::createForCurrentThread()
218 {
219     auto stackBounds = wtfThreadData().stack();
220     return new Thread(getCurrentPlatformThread(), stackBounds.origin(), stackBounds.end());
221 }
222
223 bool MachineThreads::Thread::operator==(const PlatformThread& other) const
224 {
225 #if OS(DARWIN) || OS(WINDOWS)
226     return platformThread == other;
227 #elif USE(PTHREADS)
228     return !!pthread_equal(platformThread, other);
229 #else
230 #error Need a way to compare threads on this platform
231 #endif
232 }
233
234 void MachineThreads::addCurrentThread()
235 {
236     ASSERT(!m_heap->vm()->hasExclusiveThread() || m_heap->vm()->exclusiveThread() == std::this_thread::get_id());
237
238     if (threadSpecificGet(m_threadSpecificForMachineThreads)) {
239 #ifndef NDEBUG
240         LockHolder lock(m_registeredThreadsMutex);
241         ASSERT(threadSpecificGet(m_threadSpecificForMachineThreads) == this);
242 #endif
243         return;
244     }
245
246     Thread* thread = Thread::createForCurrentThread();
247     threadSpecificSet(m_threadSpecificForMachineThreads, this);
248
249     LockHolder lock(m_registeredThreadsMutex);
250
251     thread->next = m_registeredThreads;
252     m_registeredThreads = thread;
253 }
254
255 Thread* MachineThreads::machineThreadForCurrentThread()
256 {
257     LockHolder lock(m_registeredThreadsMutex);
258     PlatformThread platformThread = getCurrentPlatformThread();
259     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
260         if (*thread == platformThread)
261             return thread;
262     }
263
264     RELEASE_ASSERT_NOT_REACHED();
265     return nullptr;
266 }
267
268 void THREAD_SPECIFIC_CALL MachineThreads::removeThread(void* p)
269 {
270     auto& manager = activeMachineThreadsManager();
271     ActiveMachineThreadsManager::Locker lock(manager);
272     auto machineThreads = static_cast<MachineThreads*>(p);
273     if (manager.contains(machineThreads)) {
274         // There's a chance that the MachineThreads registry that this thread
275         // was registered with was already destructed, and another one happened
276         // to be instantiated at the same address. Hence, this thread may or
277         // may not be found in this MachineThreads registry. We only need to
278         // do a removal if this thread is found in it.
279         machineThreads->removeThreadIfFound(getCurrentPlatformThread());
280     }
281 }
282
283 template<typename PlatformThread>
284 void MachineThreads::removeThreadIfFound(PlatformThread platformThread)
285 {
286     LockHolder lock(m_registeredThreadsMutex);
287     Thread* t = m_registeredThreads;
288     if (*t == platformThread) {
289         m_registeredThreads = m_registeredThreads->next;
290         delete t;
291     } else {
292         Thread* last = m_registeredThreads;
293         for (t = m_registeredThreads->next; t; t = t->next) {
294             if (*t == platformThread) {
295                 last->next = t->next;
296                 break;
297             }
298             last = t;
299         }
300         delete t;
301     }
302 }
303
304 MachineThreads::Thread::Thread(const PlatformThread& platThread, void* base, void* end)
305     : platformThread(platThread)
306     , stackBase(base)
307     , stackEnd(end)
308 {
309 #if OS(WINDOWS)
310     ASSERT(platformThread == GetCurrentThreadId());
311     bool isSuccessful =
312         DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
313             &platformThreadHandle, 0, FALSE, DUPLICATE_SAME_ACCESS);
314     RELEASE_ASSERT(isSuccessful);
315 #elif USE(PTHREADS) && !OS(DARWIN)
316     threadLocalCurrentThread.store(this);
317
318     // Signal handlers are process global configuration.
319     static std::once_flag initializeSignalHandler;
320     std::call_once(initializeSignalHandler, [] {
321         // Intentionally block SigThreadSuspendResume in the handler.
322         // SigThreadSuspendResume will be allowed in the handler by sigsuspend.
323         struct sigaction action;
324         sigemptyset(&action.sa_mask);
325         sigaddset(&action.sa_mask, SigThreadSuspendResume);
326
327         action.sa_sigaction = pthreadSignalHandlerSuspendResume;
328         action.sa_flags = SA_RESTART | SA_SIGINFO;
329         sigaction(SigThreadSuspendResume, &action, 0);
330     });
331
332     sigset_t mask;
333     sigemptyset(&mask);
334     sigaddset(&mask, SigThreadSuspendResume);
335     pthread_sigmask(SIG_UNBLOCK, &mask, 0);
336
337     sem_init(&semaphoreForSuspendResume, /* Only available in this process. */ 0, /* Initial value for the semaphore. */ 0);
338 #endif
339 }
340
341 MachineThreads::Thread::~Thread()
342 {
343 #if OS(WINDOWS)
344     CloseHandle(platformThreadHandle);
345 #elif USE(PTHREADS) && !OS(DARWIN)
346     sem_destroy(&semaphoreForSuspendResume);
347 #endif
348 }
349
350 bool MachineThreads::Thread::suspend()
351 {
352 #if OS(DARWIN)
353     kern_return_t result = thread_suspend(platformThread);
354     return result == KERN_SUCCESS;
355 #elif OS(WINDOWS)
356     bool threadIsSuspended = (SuspendThread(platformThreadHandle) != (DWORD)-1);
357     ASSERT(threadIsSuspended);
358     return threadIsSuspended;
359 #elif USE(PTHREADS)
360     ASSERT_WITH_MESSAGE(getCurrentPlatformThread() != platformThread, "Currently we don't support suspend the current thread itself.");
361     {
362         // During suspend, suspend or resume should not be executed from the other threads.
363         // We use global lock instead of per thread lock.
364         // Consider the following case, there are threads A and B.
365         // And A attempt to suspend B and B attempt to suspend A.
366         // A and B send signals. And later, signals are delivered to A and B.
367         // In that case, both will be suspended.
368         LockHolder lock(globalSignalLock);
369         if (!suspendCount) {
370             // Ideally, we would like to use pthread_sigqueue. It allows us to pass the argument to the signal handler.
371             // But it can be used in a few platforms, like Linux.
372             // Instead, we use Thread* stored in the thread local storage to pass it to the signal handler.
373             if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
374                 return false;
375             sem_wait(&semaphoreForSuspendResume);
376             // Release barrier ensures that this operation is always executed after all the above processing is done.
377             suspended.store(true, std::memory_order_release);
378         }
379         ++suspendCount;
380     }
381     return true;
382 #else
383 #error Need a way to suspend threads on this platform
384 #endif
385 }
386
387 void MachineThreads::Thread::resume()
388 {
389 #if OS(DARWIN)
390     thread_resume(platformThread);
391 #elif OS(WINDOWS)
392     ResumeThread(platformThreadHandle);
393 #elif USE(PTHREADS)
394     {
395         // During resume, suspend or resume should not be executed from the other threads.
396         LockHolder lock(globalSignalLock);
397         if (suspendCount == 1) {
398             // When allowing SigThreadSuspendResume interrupt in the signal handler by sigsuspend and SigThreadSuspendResume is actually issued,
399             // the signal handler itself will be called once again.
400             // There are several ways to distinguish the handler invocation for suspend and resume.
401             // 1. Use different signal numbers. And check the signal number in the handler.
402             // 2. Use some arguments to distinguish suspend and resume in the handler. If pthread_sigqueue can be used, we can take this.
403             // 3. Use thread local storage with atomic variables in the signal handler.
404             // In this implementaiton, we take (3). suspended flag is used to distinguish it.
405             if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
406                 return;
407             sem_wait(&semaphoreForSuspendResume);
408             // Release barrier ensures that this operation is always executed after all the above processing is done.
409             suspended.store(false, std::memory_order_release);
410         }
411         --suspendCount;
412     }
413 #else
414 #error Need a way to resume threads on this platform
415 #endif
416 }
417
418 size_t MachineThreads::Thread::getRegisters(Thread::Registers& registers)
419 {
420     Thread::Registers::PlatformRegisters& regs = registers.regs;
421 #if OS(DARWIN)
422 #if CPU(X86)
423     unsigned user_count = sizeof(regs)/sizeof(int);
424     thread_state_flavor_t flavor = i386_THREAD_STATE;
425 #elif CPU(X86_64)
426     unsigned user_count = x86_THREAD_STATE64_COUNT;
427     thread_state_flavor_t flavor = x86_THREAD_STATE64;
428 #elif CPU(PPC) 
429     unsigned user_count = PPC_THREAD_STATE_COUNT;
430     thread_state_flavor_t flavor = PPC_THREAD_STATE;
431 #elif CPU(PPC64)
432     unsigned user_count = PPC_THREAD_STATE64_COUNT;
433     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
434 #elif CPU(ARM)
435     unsigned user_count = ARM_THREAD_STATE_COUNT;
436     thread_state_flavor_t flavor = ARM_THREAD_STATE;
437 #elif CPU(ARM64)
438     unsigned user_count = ARM_THREAD_STATE64_COUNT;
439     thread_state_flavor_t flavor = ARM_THREAD_STATE64;
440 #else
441 #error Unknown Architecture
442 #endif
443
444     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
445     if (result != KERN_SUCCESS) {
446         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 
447                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
448         CRASH();
449     }
450     return user_count * sizeof(uintptr_t);
451 // end OS(DARWIN)
452
453 #elif OS(WINDOWS)
454     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
455     GetThreadContext(platformThreadHandle, &regs);
456     return sizeof(CONTEXT);
457 #elif USE(PTHREADS)
458     pthread_attr_init(&regs.attribute);
459 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
460 #if !OS(OPENBSD)
461     // e.g. on FreeBSD 5.4, neundorf@kde.org
462     pthread_attr_get_np(platformThread, &regs.attribute);
463 #endif
464 #else
465     // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
466     pthread_getattr_np(platformThread, &regs.attribute);
467 #endif
468     regs.machineContext = suspendedMachineContext;
469     return 0;
470 #else
471 #error Need a way to get thread registers on this platform
472 #endif
473 }
474
475 void* MachineThreads::Thread::Registers::stackPointer() const
476 {
477 #if OS(DARWIN)
478
479 #if __DARWIN_UNIX03
480
481 #if CPU(X86)
482     return reinterpret_cast<void*>(regs.__esp);
483 #elif CPU(X86_64)
484     return reinterpret_cast<void*>(regs.__rsp);
485 #elif CPU(PPC) || CPU(PPC64)
486     return reinterpret_cast<void*>(regs.__r1);
487 #elif CPU(ARM)
488     return reinterpret_cast<void*>(regs.__sp);
489 #elif CPU(ARM64)
490     return reinterpret_cast<void*>(regs.__sp);
491 #else
492 #error Unknown Architecture
493 #endif
494
495 #else // !__DARWIN_UNIX03
496
497 #if CPU(X86)
498     return reinterpret_cast<void*>(regs.esp);
499 #elif CPU(X86_64)
500     return reinterpret_cast<void*>(regs.rsp);
501 #elif CPU(PPC) || CPU(PPC64)
502     return reinterpret_cast<void*>(regs.r1);
503 #else
504 #error Unknown Architecture
505 #endif
506
507 #endif // __DARWIN_UNIX03
508
509 // end OS(DARWIN)
510 #elif OS(WINDOWS)
511
512 #if CPU(ARM)
513     return reinterpret_cast<void*>((uintptr_t) regs.Sp);
514 #elif CPU(MIPS)
515     return reinterpret_cast<void*>((uintptr_t) regs.IntSp);
516 #elif CPU(X86)
517     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
518 #elif CPU(X86_64)
519     return reinterpret_cast<void*>((uintptr_t) regs.Rsp);
520 #else
521 #error Unknown Architecture
522 #endif
523
524 #elif USE(PTHREADS)
525
526 #if OS(FREEBSD) && ENABLE(JIT)
527
528 #if CPU(X86)
529     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esp);
530 #elif CPU(X86_64)
531     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rsp);
532 #elif CPU(ARM)
533     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_SP]);
534 #elif CPU(ARM64)
535     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_sp);
536 #elif CPU(MIPS)
537     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[29]);
538 #else
539 #error Unknown Architecture
540 #endif
541
542 #elif defined(__GLIBC__) && ENABLE(JIT)
543
544 #if CPU(X86)
545     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESP]);
546 #elif CPU(X86_64)
547     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RSP]);
548 #elif CPU(ARM)
549     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_sp);
550 #elif CPU(ARM64)
551     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.sp);
552 #elif CPU(MIPS)
553     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[29]);
554 #else
555 #error Unknown Architecture
556 #endif
557
558 #else
559     void* stackBase = 0;
560     size_t stackSize = 0;
561 #if OS(OPENBSD)
562     stack_t ss;
563     int rc = pthread_stackseg_np(pthread_self(), &ss);
564     stackBase = (void*)((size_t) ss.ss_sp - ss.ss_size);
565     stackSize = ss.ss_size;
566 #else
567     int rc = pthread_attr_getstack(&regs.attribute, &stackBase, &stackSize);
568 #endif
569     (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
570     ASSERT(stackBase);
571     return static_cast<char*>(stackBase) + stackSize;
572 #endif
573
574 #else
575 #error Need a way to get the stack pointer for another thread on this platform
576 #endif
577 }
578
579 #if ENABLE(SAMPLING_PROFILER)
580 void* MachineThreads::Thread::Registers::framePointer() const
581 {
582 #if OS(DARWIN)
583
584 #if __DARWIN_UNIX03
585
586 #if CPU(X86)
587     return reinterpret_cast<void*>(regs.__ebp);
588 #elif CPU(X86_64)
589     return reinterpret_cast<void*>(regs.__rbp);
590 #elif CPU(ARM)
591     return reinterpret_cast<void*>(regs.__r[11]);
592 #elif CPU(ARM64)
593     return reinterpret_cast<void*>(regs.__x[29]);
594 #else
595 #error Unknown Architecture
596 #endif
597
598 #else // !__DARWIN_UNIX03
599
600 #if CPU(X86)
601     return reinterpret_cast<void*>(regs.esp);
602 #elif CPU(X86_64)
603     return reinterpret_cast<void*>(regs.rsp);
604 #else
605 #error Unknown Architecture
606 #endif
607
608 #endif // __DARWIN_UNIX03
609
610 // end OS(DARWIN)
611 #elif OS(WINDOWS)
612
613 #if CPU(ARM)
614     return reinterpret_cast<void*>((uintptr_t) regs.R11);
615 #elif CPU(MIPS)
616 #error Dont know what to do with mips. Do we even need this?
617 #elif CPU(X86)
618     return reinterpret_cast<void*>((uintptr_t) regs.Ebp);
619 #elif CPU(X86_64)
620     return reinterpret_cast<void*>((uintptr_t) regs.Rbp);
621 #else
622 #error Unknown Architecture
623 #endif
624
625 #elif OS(FREEBSD)
626
627 #if CPU(X86)
628     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_ebp);
629 #elif CPU(X86_64)
630     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rbp);
631 #elif CPU(ARM)
632     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_FP]);
633 #elif CPU(ARM64)
634     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[29]);
635 #elif CPU(MIPS)
636     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[30]);
637 #else
638 #error Unknown Architecture
639 #endif
640
641 #elif defined(__GLIBC__)
642
643 // The following sequence depends on glibc's sys/ucontext.h.
644 #if CPU(X86)
645     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EBP]);
646 #elif CPU(X86_64)
647     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RBP]);
648 #elif CPU(ARM)
649     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_fp);
650 #elif CPU(ARM64)
651     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[29]);
652 #elif CPU(MIPS)
653     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[30]);
654 #else
655 #error Unknown Architecture
656 #endif
657
658 #else
659 #error Need a way to get the frame pointer for another thread on this platform
660 #endif
661 }
662
663 void* MachineThreads::Thread::Registers::instructionPointer() const
664 {
665 #if OS(DARWIN)
666
667 #if __DARWIN_UNIX03
668
669 #if CPU(X86)
670     return reinterpret_cast<void*>(regs.__eip);
671 #elif CPU(X86_64)
672     return reinterpret_cast<void*>(regs.__rip);
673 #elif CPU(ARM)
674     return reinterpret_cast<void*>(regs.__pc);
675 #elif CPU(ARM64)
676     return reinterpret_cast<void*>(regs.__pc);
677 #else
678 #error Unknown Architecture
679 #endif
680
681 #else // !__DARWIN_UNIX03
682 #if CPU(X86)
683     return reinterpret_cast<void*>(regs.eip);
684 #elif CPU(X86_64)
685     return reinterpret_cast<void*>(regs.rip);
686 #else
687 #error Unknown Architecture
688 #endif
689
690 #endif // __DARWIN_UNIX03
691
692 // end OS(DARWIN)
693 #elif OS(WINDOWS)
694
695 #if CPU(ARM)
696     return reinterpret_cast<void*>((uintptr_t) regs.Pc);
697 #elif CPU(MIPS)
698 #error Dont know what to do with mips. Do we even need this?
699 #elif CPU(X86)
700     return reinterpret_cast<void*>((uintptr_t) regs.Eip);
701 #elif CPU(X86_64)
702     return reinterpret_cast<void*>((uintptr_t) regs.Rip);
703 #else
704 #error Unknown Architecture
705 #endif
706
707 #elif OS(FREEBSD)
708
709 #if CPU(X86)
710     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_eip);
711 #elif CPU(X86_64)
712     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rip);
713 #elif CPU(ARM)
714     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_PC]);
715 #elif CPU(ARM64)
716     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_elr);
717 #elif CPU(MIPS)
718     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_pc);
719 #else
720 #error Unknown Architecture
721 #endif
722
723 #elif defined(__GLIBC__)
724
725 // The following sequence depends on glibc's sys/ucontext.h.
726 #if CPU(X86)
727     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EIP]);
728 #elif CPU(X86_64)
729     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RIP]);
730 #elif CPU(ARM)
731     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_pc);
732 #elif CPU(ARM64)
733     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
734 #elif CPU(MIPS)
735     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
736 #else
737 #error Unknown Architecture
738 #endif
739
740 #else
741 #error Need a way to get the instruction pointer for another thread on this platform
742 #endif
743 }
744 void* MachineThreads::Thread::Registers::llintPC() const
745 {
746     // LLInt uses regT4 as PC.
747 #if OS(DARWIN)
748
749 #if __DARWIN_UNIX03
750
751 #if CPU(X86)
752     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
753     return reinterpret_cast<void*>(regs.__esi);
754 #elif CPU(X86_64)
755     static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
756     return reinterpret_cast<void*>(regs.__r8);
757 #elif CPU(ARM)
758     static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
759     return reinterpret_cast<void*>(regs.__r[8]);
760 #elif CPU(ARM64)
761     static_assert(LLInt::LLIntPC == ARM64Registers::x4, "Wrong LLInt PC.");
762     return reinterpret_cast<void*>(regs.__x[4]);
763 #else
764 #error Unknown Architecture
765 #endif
766
767 #else // !__DARWIN_UNIX03
768 #if CPU(X86)
769     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
770     return reinterpret_cast<void*>(regs.esi);
771 #elif CPU(X86_64)
772     static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
773     return reinterpret_cast<void*>(regs.r8);
774 #else
775 #error Unknown Architecture
776 #endif
777
778 #endif // __DARWIN_UNIX03
779
780 // end OS(DARWIN)
781 #elif OS(WINDOWS)
782
783 #if CPU(ARM)
784     static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
785     return reinterpret_cast<void*>((uintptr_t) regs.R8);
786 #elif CPU(MIPS)
787 #error Dont know what to do with mips. Do we even need this?
788 #elif CPU(X86)
789     static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
790     return reinterpret_cast<void*>((uintptr_t) regs.Esi);
791 #elif CPU(X86_64)
792     static_assert(LLInt::LLIntPC == X86Registers::r10, "Wrong LLInt PC.");
793     return reinterpret_cast<void*>((uintptr_t) regs.R10);
794 #else
795 #error Unknown Architecture
796 #endif
797
798 #elif OS(FREEBSD)
799
800 #if CPU(X86)
801     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esi);
802 #elif CPU(X86_64)
803     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_r8);
804 #elif CPU(ARM)
805     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_R8]);
806 #elif CPU(ARM64)
807     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[4]);
808 #elif CPU(MIPS)
809     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[12]);
810 #else
811 #error Unknown Architecture
812 #endif
813
814 #elif defined(__GLIBC__)
815
816 // The following sequence depends on glibc's sys/ucontext.h.
817 #if CPU(X86)
818     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESI]);
819 #elif CPU(X86_64)
820     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_R8]);
821 #elif CPU(ARM)
822     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_r8);
823 #elif CPU(ARM64)
824     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[4]);
825 #elif CPU(MIPS)
826     return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[12]);
827 #else
828 #error Unknown Architecture
829 #endif
830
831 #else
832 #error Need a way to get the LLIntPC for another thread on this platform
833 #endif
834 }
835 #endif // ENABLE(SAMPLING_PROFILER)
836
837 void MachineThreads::Thread::freeRegisters(Thread::Registers& registers)
838 {
839     Thread::Registers::PlatformRegisters& regs = registers.regs;
840 #if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
841     pthread_attr_destroy(&regs.attribute);
842 #else
843     UNUSED_PARAM(regs);
844 #endif
845 }
846
847 static inline int osRedZoneAdjustment()
848 {
849     int redZoneAdjustment = 0;
850 #if !OS(WINDOWS)
851 #if CPU(X86_64)
852     // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
853     redZoneAdjustment = -128;
854 #elif CPU(ARM64)
855     // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
856     redZoneAdjustment = -128;
857 #endif
858 #endif // !OS(WINDOWS)
859     return redZoneAdjustment;
860 }
861
862 std::pair<void*, size_t> MachineThreads::Thread::captureStack(void* stackTop)
863 {
864     char* begin = reinterpret_cast_ptr<char*>(stackBase);
865     char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
866     ASSERT(begin >= end);
867
868     char* endWithRedZone = end + osRedZoneAdjustment();
869     ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
870
871     if (endWithRedZone < stackEnd)
872         endWithRedZone = reinterpret_cast_ptr<char*>(stackEnd);
873
874     std::swap(begin, endWithRedZone);
875     return std::make_pair(begin, endWithRedZone - begin);
876 }
877
878 SUPPRESS_ASAN
879 static void copyMemory(void* dst, const void* src, size_t size)
880 {
881     size_t dstAsSize = reinterpret_cast<size_t>(dst);
882     size_t srcAsSize = reinterpret_cast<size_t>(src);
883     RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
884     RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
885     RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
886
887     intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
888     const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
889     size /= sizeof(intptr_t);
890     while (size--)
891         *dstPtr++ = *srcPtr++;
892 }
893     
894
895
896 // This function must not call malloc(), free(), or any other function that might
897 // acquire a lock. Since 'thread' is suspended, trying to acquire a lock
898 // will deadlock if 'thread' holds that lock.
899 // This function, specifically the memory copying, was causing problems with Address Sanitizer in
900 // apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
901 // copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
902 // significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
903 // operation. As the heap is generally much larger than the stack the performance hit is minimal.
904 // See: https://bugs.webkit.org/show_bug.cgi?id=146297
905 void MachineThreads::tryCopyOtherThreadStack(Thread* thread, void* buffer, size_t capacity, size_t* size)
906 {
907     Thread::Registers registers;
908     size_t registersSize = thread->getRegisters(registers);
909     std::pair<void*, size_t> stack = thread->captureStack(registers.stackPointer());
910
911     bool canCopy = *size + registersSize + stack.second <= capacity;
912
913     if (canCopy)
914         copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
915     *size += registersSize;
916
917     if (canCopy)
918         copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
919     *size += stack.second;
920
921     thread->freeRegisters(registers);
922 }
923
924 bool MachineThreads::tryCopyOtherThreadStacks(LockHolder&, void* buffer, size_t capacity, size_t* size)
925 {
926     // Prevent two VMs from suspending each other's threads at the same time,
927     // which can cause deadlock: <rdar://problem/20300842>.
928     static StaticLock mutex;
929     std::lock_guard<StaticLock> lock(mutex);
930
931     *size = 0;
932
933     PlatformThread currentPlatformThread = getCurrentPlatformThread();
934     int numberOfThreads = 0; // Using 0 to denote that we haven't counted the number of threads yet.
935     int index = 1;
936     Thread* threadsToBeDeleted = nullptr;
937
938     Thread* previousThread = nullptr;
939     for (Thread* thread = m_registeredThreads; thread; index++) {
940         if (*thread != currentPlatformThread) {
941             bool success = thread->suspend();
942 #if OS(DARWIN)
943             if (!success) {
944                 if (!numberOfThreads) {
945                     for (Thread* countedThread = m_registeredThreads; countedThread; countedThread = countedThread->next)
946                         numberOfThreads++;
947                 }
948                 
949                 // Re-do the suspension to get the actual failure result for logging.
950                 kern_return_t error = thread_suspend(thread->platformThread);
951                 ASSERT(error != KERN_SUCCESS);
952
953                 WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
954                     "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p] platformThread %p.",
955                     error, index, numberOfThreads, thread, reinterpret_cast<void*>(thread->platformThread));
956
957                 // Put the invalid thread on the threadsToBeDeleted list.
958                 // We can't just delete it here because we have suspended other
959                 // threads, and they may still be holding the C heap lock which
960                 // we need for deleting the invalid thread. Hence, we need to
961                 // defer the deletion till after we have resumed all threads.
962                 Thread* nextThread = thread->next;
963                 thread->next = threadsToBeDeleted;
964                 threadsToBeDeleted = thread;
965
966                 if (previousThread)
967                     previousThread->next = nextThread;
968                 else
969                     m_registeredThreads = nextThread;
970                 thread = nextThread;
971                 continue;
972             }
973 #else
974             UNUSED_PARAM(numberOfThreads);
975             UNUSED_PARAM(previousThread);
976             ASSERT_UNUSED(success, success);
977 #endif
978         }
979         previousThread = thread;
980         thread = thread->next;
981     }
982
983     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
984         if (*thread != currentPlatformThread)
985             tryCopyOtherThreadStack(thread, buffer, capacity, size);
986     }
987
988     for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
989         if (*thread != currentPlatformThread)
990             thread->resume();
991     }
992
993     for (Thread* thread = threadsToBeDeleted; thread; ) {
994         Thread* nextThread = thread->next;
995         delete thread;
996         thread = nextThread;
997     }
998     
999     return *size <= capacity;
1000 }
1001
1002 static void growBuffer(size_t size, void** buffer, size_t* capacity)
1003 {
1004     if (*buffer)
1005         fastFree(*buffer);
1006
1007     *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
1008     *buffer = fastMalloc(*capacity);
1009 }
1010
1011 void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
1012 {
1013     size_t size;
1014     size_t capacity = 0;
1015     void* buffer = nullptr;
1016     LockHolder lock(m_registeredThreadsMutex);
1017     while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
1018         growBuffer(size, &buffer, &capacity);
1019
1020     if (!buffer)
1021         return;
1022
1023     conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
1024     fastFree(buffer);
1025 }
1026
1027 } // namespace JSC