e6af53b619fd6077b75e4175be375f48e860cb43
[WebKit-https.git] / Source / JavaScriptCore / heap / MachineStackMarker.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *  Copyright (C) 2009 Acision BV. All rights reserved.
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation; either
9  *  version 2 of the License, or (at your option) any later version.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21
22 #include "config.h"
23 #include "MachineStackMarker.h"
24
25 #include "ConservativeRoots.h"
26 #include "MachineContext.h"
27 #include <setjmp.h>
28 #include <stdlib.h>
29 #include <wtf/BitVector.h>
30 #include <wtf/PageBlock.h>
31 #include <wtf/StdLibExtras.h>
32
33 using namespace WTF;
34
35 namespace JSC {
36
37 MachineThreads::MachineThreads()
38     : m_threadGroup(ThreadGroup::create())
39 {
40 }
41
42 SUPPRESS_ASAN
43 void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState& currentThreadState)
44 {
45     if (currentThreadState.registerState) {
46         void* registersBegin = currentThreadState.registerState;
47         void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(currentThreadState.registerState + 1)));
48         conservativeRoots.add(registersBegin, registersEnd, jitStubRoutines, codeBlocks);
49     }
50
51     conservativeRoots.add(currentThreadState.stackTop, currentThreadState.stackOrigin, jitStubRoutines, codeBlocks);
52 }
53
54 static inline int osRedZoneAdjustment()
55 {
56     int redZoneAdjustment = 0;
57 #if !OS(WINDOWS)
58 #if CPU(X86_64)
59     // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
60     redZoneAdjustment = -128;
61 #elif CPU(ARM64)
62     // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
63     redZoneAdjustment = -128;
64 #endif
65 #endif // !OS(WINDOWS)
66     return redZoneAdjustment;
67 }
68
69 static std::pair<void*, size_t> captureStack(Thread& thread, void* stackTop)
70 {
71     char* begin = reinterpret_cast_ptr<char*>(thread.stack().origin());
72     char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
73     ASSERT(begin >= end);
74
75     char* endWithRedZone = end + osRedZoneAdjustment();
76     ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
77
78     if (endWithRedZone < thread.stack().end())
79         endWithRedZone = reinterpret_cast_ptr<char*>(thread.stack().end());
80
81     std::swap(begin, endWithRedZone);
82     return std::make_pair(begin, endWithRedZone - begin);
83 }
84
85 SUPPRESS_ASAN
86 static void copyMemory(void* dst, const void* src, size_t size)
87 {
88     size_t dstAsSize = reinterpret_cast<size_t>(dst);
89     size_t srcAsSize = reinterpret_cast<size_t>(src);
90     RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
91     RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
92     RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
93
94     intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
95     const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
96     size /= sizeof(intptr_t);
97     while (size--)
98         *dstPtr++ = *srcPtr++;
99 }
100     
101
102
103 // This function must not call malloc(), free(), or any other function that might
104 // acquire a lock. Since 'thread' is suspended, trying to acquire a lock
105 // will deadlock if 'thread' holds that lock.
106 // This function, specifically the memory copying, was causing problems with Address Sanitizer in
107 // apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
108 // copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
109 // significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
110 // operation. As the heap is generally much larger than the stack the performance hit is minimal.
111 // See: https://bugs.webkit.org/show_bug.cgi?id=146297
112 void MachineThreads::tryCopyOtherThreadStack(Thread& thread, void* buffer, size_t capacity, size_t* size)
113 {
114     PlatformRegisters registers;
115     size_t registersSize = thread.getRegisters(registers);
116
117     // This is a workaround for <rdar://problem/27607384>. libdispatch recycles work
118     // queue threads without running pthread exit destructors. This can cause us to scan a
119     // thread during work queue initialization, when the stack pointer is null.
120     if (UNLIKELY(!MachineContext::stackPointer(registers))) {
121         *size = 0;
122         return;
123     }
124
125     std::pair<void*, size_t> stack = captureStack(thread, MachineContext::stackPointer(registers));
126
127     bool canCopy = *size + registersSize + stack.second <= capacity;
128
129     if (canCopy)
130         copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
131     *size += registersSize;
132
133     if (canCopy)
134         copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
135     *size += stack.second;
136 }
137
138 bool MachineThreads::tryCopyOtherThreadStacks(const AbstractLocker& locker, void* buffer, size_t capacity, size_t* size)
139 {
140     // Prevent two VMs from suspending each other's threads at the same time,
141     // which can cause deadlock: <rdar://problem/20300842>.
142     static StaticLock mutex;
143     std::lock_guard<StaticLock> lock(mutex);
144
145     *size = 0;
146
147     Thread& currentThread = Thread::current();
148     const auto& threads = m_threadGroup->threads(locker);
149     BitVector isSuspended(threads.size());
150
151     {
152         unsigned index = 0;
153         for (auto& thread : threads) {
154             if (thread.ptr() != &currentThread) {
155                 auto result = thread->suspend();
156                 if (result)
157                     isSuspended.set(index);
158                 else {
159 #if OS(DARWIN)
160                     // These threads will be removed from the ThreadGroup. Thus, we do not do anything here except for reporting.
161                     ASSERT(result.error() != KERN_SUCCESS);
162                     WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
163                         "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p].",
164                         result.error(), index, threads.size(), thread.ptr());
165 #endif
166                 }
167             }
168             ++index;
169         }
170     }
171
172     {
173         unsigned index = 0;
174         for (auto& thread : threads) {
175             if (isSuspended.get(index))
176                 tryCopyOtherThreadStack(thread.get(), buffer, capacity, size);
177             ++index;
178         }
179     }
180
181     {
182         unsigned index = 0;
183         for (auto& thread : threads) {
184             if (isSuspended.get(index))
185                 thread->resume();
186             ++index;
187         }
188     }
189
190     return *size <= capacity;
191 }
192
193 static void growBuffer(size_t size, void** buffer, size_t* capacity)
194 {
195     if (*buffer)
196         fastFree(*buffer);
197
198     *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
199     *buffer = fastMalloc(*capacity);
200 }
201
202 void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState* currentThreadState)
203 {
204     if (currentThreadState)
205         gatherFromCurrentThread(conservativeRoots, jitStubRoutines, codeBlocks, *currentThreadState);
206
207     size_t size;
208     size_t capacity = 0;
209     void* buffer = nullptr;
210     auto locker = holdLock(m_threadGroup->getLock());
211     while (!tryCopyOtherThreadStacks(locker, buffer, capacity, &size))
212         growBuffer(size, &buffer, &capacity);
213
214     if (!buffer)
215         return;
216
217     conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
218     fastFree(buffer);
219 }
220
221 NEVER_INLINE int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>& lambda)
222 {
223     DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(state);
224     lambda(state);
225     return 42; // Suppress tail call optimization.
226 }
227
228 } // namespace JSC