Improve use of NeverDestroyed
[WebKit-https.git] / Source / JavaScriptCore / heap / MachineStackMarker.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *  Copyright (C) 2009 Acision BV. All rights reserved.
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation; either
9  *  version 2 of the License, or (at your option) any later version.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21
22 #include "config.h"
23 #include "MachineStackMarker.h"
24
25 #include "ConservativeRoots.h"
26 #include "GPRInfo.h"
27 #include "Heap.h"
28 #include "JSArray.h"
29 #include "JSCInlines.h"
30 #include "LLIntPCRanges.h"
31 #include "MacroAssembler.h"
32 #include "VM.h"
33 #include <setjmp.h>
34 #include <stdlib.h>
35 #include <wtf/MainThread.h>
36 #include <wtf/StdLibExtras.h>
37
38 using namespace WTF;
39
40 namespace JSC {
41
42 class ActiveMachineThreadsManager;
43 static ActiveMachineThreadsManager& activeMachineThreadsManager();
44
45 class ActiveMachineThreadsManager {
46     WTF_MAKE_NONCOPYABLE(ActiveMachineThreadsManager);
47 public:
48
49     class Locker {
50     public:
51         Locker(ActiveMachineThreadsManager& manager)
52             : m_locker(manager.m_lock)
53         {
54         }
55
56     private:
57         LockHolder m_locker;
58     };
59
60     void add(MachineThreads* machineThreads)
61     {
62         LockHolder managerLock(m_lock);
63         m_set.add(machineThreads);
64     }
65
66     void THREAD_SPECIFIC_CALL remove(MachineThreads* machineThreads)
67     {
68         LockHolder managerLock(m_lock);
69         auto recordedMachineThreads = m_set.take(machineThreads);
70         RELEASE_ASSERT(recordedMachineThreads == machineThreads);
71     }
72
73     bool contains(MachineThreads* machineThreads)
74     {
75         return m_set.contains(machineThreads);
76     }
77
78 private:
79     typedef HashSet<MachineThreads*> MachineThreadsSet;
80
81     ActiveMachineThreadsManager() { }
82     
83     Lock m_lock;
84     MachineThreadsSet m_set;
85
86     friend ActiveMachineThreadsManager& activeMachineThreadsManager();
87 };
88
89 static ActiveMachineThreadsManager& activeMachineThreadsManager()
90 {
91     static std::once_flag initializeManagerOnceFlag;
92     static ActiveMachineThreadsManager* manager = nullptr;
93
94     std::call_once(initializeManagerOnceFlag, [] {
95         manager = new ActiveMachineThreadsManager();
96     });
97     return *manager;
98 }
99
100 #if CPU(X86_64) && OS(DARWIN)
101 #define FILL_CALLEE_SAVES_FOR_CRASH_INFO(number)     \
102     asm volatile(                                    \
103         "movq $0xc0defefe000000" number ", %%rbx;" \
104         "movq $0xc0defefe000000" number ", %%r12;" \
105         "movq $0xc0defefe000000" number ", %%r13;" \
106         "movq $0xc0defefe000000" number ", %%r14;" \
107         "movq $0xc0defefe000000" number ", %%r15;" \
108         :                                            \
109         :                                            \
110         : "%rbx", "%r12", "%r13", "%r14", "%r15"     \
111     );
112
113 #define FILL_CALLER_SAVES_FOR_CRASH_INFO(number)     \
114     asm volatile(                                    \
115         "movq $0xc0defefe000000" number ", %%rax;" \
116         "movq $0xc0defefe000000" number ", %%rdi;" \
117         "movq $0xc0defefe000000" number ", %%rsi;" \
118         "movq $0xc0defefe000000" number ", %%rdx;" \
119         "movq $0xc0defefe000000" number ", %%rcx;" \
120         "movq $0xc0defefe000000" number ", %%r8;"  \
121         "movq $0xc0defefe000000" number ", %%r9;"  \
122         "movq $0xc0defefe000000" number ", %%r10;" \
123         "movq $0xc0defefe000000" number ", %%r11;" \
124         :                                            \
125         :                                            \
126         : "%rax", "%rdi", "%rsi", "%rdx", "%rcx", "%r8", "%r9", "%r10", "%r11" \
127     );
128 #else
129 #define FILL_CALLEE_SAVES_FOR_CRASH_INFO(number)
130 #define FILL_CALLER_SAVES_FOR_CRASH_INFO(number)
131 #endif
132
133 MachineThreads::MachineThreads()
134     : m_registeredThreads()
135     , m_threadSpecificForMachineThreads(0)
136 {
137     FILL_CALLEE_SAVES_FOR_CRASH_INFO("01");
138     threadSpecificKeyCreate(&m_threadSpecificForMachineThreads, removeThread);
139     FILL_CALLEE_SAVES_FOR_CRASH_INFO("02");
140     activeMachineThreadsManager().add(this);
141     FILL_CALLER_SAVES_FOR_CRASH_INFO("03");
142 }
143
144 MachineThreads::~MachineThreads()
145 {
146     activeMachineThreadsManager().remove(this);
147     threadSpecificKeyDelete(m_threadSpecificForMachineThreads);
148
149     LockHolder registeredThreadsLock(m_registeredThreadsMutex);
150     for (MachineThread* current = m_registeredThreads.head(); current;) {
151         MachineThread* next = current->next();
152         delete current;
153         current = next;
154     }
155 }
156
157 void MachineThreads::addCurrentThread()
158 {
159     if (threadSpecificGet(m_threadSpecificForMachineThreads)) {
160 #ifndef NDEBUG
161         LockHolder lock(m_registeredThreadsMutex);
162         ASSERT(threadSpecificGet(m_threadSpecificForMachineThreads) == this);
163 #endif
164         return;
165     }
166
167     MachineThread* thread = new MachineThread();
168     threadSpecificSet(m_threadSpecificForMachineThreads, this);
169
170     LockHolder lock(m_registeredThreadsMutex);
171
172     m_registeredThreads.append(thread);
173 }
174
175 auto MachineThreads::machineThreadForCurrentThread() -> MachineThread*
176 {
177     LockHolder lock(m_registeredThreadsMutex);
178     ThreadIdentifier id = currentThread();
179     for (MachineThread* thread = m_registeredThreads.head(); thread; thread = thread->next()) {
180         if (thread->threadID() == id)
181             return thread;
182     }
183
184     RELEASE_ASSERT_NOT_REACHED();
185     return nullptr;
186 }
187
188 void THREAD_SPECIFIC_CALL MachineThreads::removeThread(void* p)
189 {
190     auto& manager = activeMachineThreadsManager();
191     ActiveMachineThreadsManager::Locker lock(manager);
192     auto machineThreads = static_cast<MachineThreads*>(p);
193     if (manager.contains(machineThreads)) {
194         // There's a chance that the MachineThreads registry that this thread
195         // was registered with was already destructed, and another one happened
196         // to be instantiated at the same address. Hence, this thread may or
197         // may not be found in this MachineThreads registry. We only need to
198         // do a removal if this thread is found in it.
199
200 #if OS(WINDOWS)
201         // On Windows the thread specific destructor is also called when the
202         // main thread is exiting. This may lead to the main thread waiting
203         // forever for the machine thread lock when exiting, if the sampling
204         // profiler thread was terminated by the system while holding the
205         // machine thread lock.
206         if (WTF::isMainThread())
207             return;
208 #endif
209
210         machineThreads->removeThreadIfFound(currentThread());
211     }
212 }
213
214 void MachineThreads::removeThreadIfFound(ThreadIdentifier id)
215 {
216     LockHolder lock(m_registeredThreadsMutex);
217     for (MachineThread* current = m_registeredThreads.head(); current; current = current->next()) {
218         if (current->threadID() == id) {
219             m_registeredThreads.remove(current);
220             delete current;
221             break;
222         }
223     }
224 }
225
226 SUPPRESS_ASAN
227 void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState& currentThreadState)
228 {
229     if (currentThreadState.registerState) {
230         void* registersBegin = currentThreadState.registerState;
231         void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(currentThreadState.registerState + 1)));
232         conservativeRoots.add(registersBegin, registersEnd, jitStubRoutines, codeBlocks);
233     }
234
235     conservativeRoots.add(currentThreadState.stackTop, currentThreadState.stackOrigin, jitStubRoutines, codeBlocks);
236 }
237
238 MachineThreads::MachineThread::MachineThread()
239     : m_thread(WTF::Thread::current())
240 {
241     auto stackBounds = wtfThreadData().stack();
242     m_stackBase = stackBounds.origin();
243     m_stackEnd = stackBounds.end();
244 }
245
246 size_t MachineThreads::MachineThread::getRegisters(MachineThread::Registers& registers)
247 {
248     WTF::PlatformRegisters& regs = registers.regs;
249     return m_thread->getRegisters(regs);
250 }
251
252 void* MachineThreads::MachineThread::Registers::stackPointer() const
253 {
254     return MachineContext::stackPointer(regs);
255 }
256
257 #if ENABLE(SAMPLING_PROFILER)
258 void* MachineThreads::MachineThread::Registers::framePointer() const
259 {
260 #if OS(WINDOWS) || HAVE(MACHINE_CONTEXT)
261     return MachineContext::framePointer(regs);
262 #else
263 #error Need a way to get the frame pointer for another thread on this platform
264 #endif
265 }
266
267 void* MachineThreads::MachineThread::Registers::instructionPointer() const
268 {
269 #if OS(WINDOWS) || HAVE(MACHINE_CONTEXT)
270     return MachineContext::instructionPointer(regs);
271 #else
272 #error Need a way to get the instruction pointer for another thread on this platform
273 #endif
274 }
275
276 void* MachineThreads::MachineThread::Registers::llintPC() const
277 {
278     // LLInt uses regT4 as PC.
279 #if OS(WINDOWS) || HAVE(MACHINE_CONTEXT)
280     return MachineContext::llintInstructionPointer(regs);
281 #else
282 #error Need a way to get the LLIntPC for another thread on this platform
283 #endif
284 }
285 #endif // ENABLE(SAMPLING_PROFILER)
286
287 static inline int osRedZoneAdjustment()
288 {
289     int redZoneAdjustment = 0;
290 #if !OS(WINDOWS)
291 #if CPU(X86_64)
292     // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
293     redZoneAdjustment = -128;
294 #elif CPU(ARM64)
295     // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
296     redZoneAdjustment = -128;
297 #endif
298 #endif // !OS(WINDOWS)
299     return redZoneAdjustment;
300 }
301
302 std::pair<void*, size_t> MachineThreads::MachineThread::captureStack(void* stackTop)
303 {
304     char* begin = reinterpret_cast_ptr<char*>(m_stackBase);
305     char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
306     ASSERT(begin >= end);
307
308     char* endWithRedZone = end + osRedZoneAdjustment();
309     ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
310
311     if (endWithRedZone < m_stackEnd)
312         endWithRedZone = reinterpret_cast_ptr<char*>(m_stackEnd);
313
314     std::swap(begin, endWithRedZone);
315     return std::make_pair(begin, endWithRedZone - begin);
316 }
317
318 SUPPRESS_ASAN
319 static void copyMemory(void* dst, const void* src, size_t size)
320 {
321     size_t dstAsSize = reinterpret_cast<size_t>(dst);
322     size_t srcAsSize = reinterpret_cast<size_t>(src);
323     RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
324     RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
325     RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
326
327     intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
328     const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
329     size /= sizeof(intptr_t);
330     while (size--)
331         *dstPtr++ = *srcPtr++;
332 }
333     
334
335
336 // This function must not call malloc(), free(), or any other function that might
337 // acquire a lock. Since 'thread' is suspended, trying to acquire a lock
338 // will deadlock if 'thread' holds that lock.
339 // This function, specifically the memory copying, was causing problems with Address Sanitizer in
340 // apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
341 // copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
342 // significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
343 // operation. As the heap is generally much larger than the stack the performance hit is minimal.
344 // See: https://bugs.webkit.org/show_bug.cgi?id=146297
345 void MachineThreads::tryCopyOtherThreadStack(MachineThread* thread, void* buffer, size_t capacity, size_t* size)
346 {
347     MachineThread::Registers registers;
348     size_t registersSize = thread->getRegisters(registers);
349
350     // This is a workaround for <rdar://problem/27607384>. libdispatch recycles work
351     // queue threads without running pthread exit destructors. This can cause us to scan a
352     // thread during work queue initialization, when the stack pointer is null.
353     if (UNLIKELY(!registers.stackPointer())) {
354         *size = 0;
355         return;
356     }
357
358     std::pair<void*, size_t> stack = thread->captureStack(registers.stackPointer());
359
360     bool canCopy = *size + registersSize + stack.second <= capacity;
361
362     if (canCopy)
363         copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
364     *size += registersSize;
365
366     if (canCopy)
367         copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
368     *size += stack.second;
369 }
370
371 bool MachineThreads::tryCopyOtherThreadStacks(const AbstractLocker&, void* buffer, size_t capacity, size_t* size)
372 {
373     // Prevent two VMs from suspending each other's threads at the same time,
374     // which can cause deadlock: <rdar://problem/20300842>.
375     static StaticLock mutex;
376     std::lock_guard<StaticLock> lock(mutex);
377
378     *size = 0;
379
380     ThreadIdentifier id = currentThread();
381     int numberOfThreads = 0; // Using 0 to denote that we haven't counted the number of threads yet.
382     int index = 1;
383     DoublyLinkedList<MachineThread> threadsToBeDeleted;
384
385     for (MachineThread* thread = m_registeredThreads.head(); thread; index++) {
386         if (thread->threadID() != id) {
387             auto result = thread->suspend();
388 #if OS(DARWIN)
389             if (!result) {
390                 if (!numberOfThreads)
391                     numberOfThreads = m_registeredThreads.size();
392
393                 ASSERT(result.error() != KERN_SUCCESS);
394
395                 WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
396                     "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p] id %u.",
397                     result.error(), index, numberOfThreads, thread, thread->threadID());
398
399                 // Put the invalid thread on the threadsToBeDeleted list.
400                 // We can't just delete it here because we have suspended other
401                 // threads, and they may still be holding the C heap lock which
402                 // we need for deleting the invalid thread. Hence, we need to
403                 // defer the deletion till after we have resumed all threads.
404                 MachineThread* nextThread = thread->next();
405                 m_registeredThreads.remove(thread);
406                 threadsToBeDeleted.append(thread);
407                 thread = nextThread;
408                 continue;
409             }
410 #else
411             UNUSED_PARAM(numberOfThreads);
412             ASSERT_UNUSED(result, result);
413 #endif
414         }
415         thread = thread->next();
416     }
417
418     for (MachineThread* thread = m_registeredThreads.head(); thread; thread = thread->next()) {
419         if (thread->threadID() != id)
420             tryCopyOtherThreadStack(thread, buffer, capacity, size);
421     }
422
423     for (MachineThread* thread = m_registeredThreads.head(); thread; thread = thread->next()) {
424         if (thread->threadID() != id)
425             thread->resume();
426     }
427
428     for (MachineThread* thread = threadsToBeDeleted.head(); thread; ) {
429         MachineThread* nextThread = thread->next();
430         delete thread;
431         thread = nextThread;
432     }
433     
434     return *size <= capacity;
435 }
436
437 static void growBuffer(size_t size, void** buffer, size_t* capacity)
438 {
439     if (*buffer)
440         fastFree(*buffer);
441
442     *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
443     *buffer = fastMalloc(*capacity);
444 }
445
446 void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState* currentThreadState)
447 {
448     if (currentThreadState)
449         gatherFromCurrentThread(conservativeRoots, jitStubRoutines, codeBlocks, *currentThreadState);
450
451     size_t size;
452     size_t capacity = 0;
453     void* buffer = nullptr;
454     LockHolder lock(m_registeredThreadsMutex);
455     while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
456         growBuffer(size, &buffer, &capacity);
457
458     if (!buffer)
459         return;
460
461     conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
462     fastFree(buffer);
463 }
464
465 NEVER_INLINE int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>& lambda)
466 {
467     DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(state);
468     lambda(state);
469     return 42; // Suppress tail call optimization.
470 }
471
472 } // namespace JSC