26a94b1b251f0ee5369291dfbb6be404aa162b9e
[WebKit.git] / Source / JavaScriptCore / jit / ExecutableAllocatorFixedVMPool.cpp
1 /*
2  * Copyright (C) 2009, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "ExecutableAllocator.h"
28
29 #include "JSCInlines.h"
30
31 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
32
33 #include "CodeProfiling.h"
34 #include "ExecutableAllocationFuzz.h"
35 #include <errno.h>
36 #if !PLATFORM(WIN)
37 #include <unistd.h>
38 #endif
39 #include <wtf/MetaAllocator.h>
40 #include <wtf/PageReservation.h>
41 #include <wtf/VMTags.h>
42
43 #if OS(DARWIN)
44 #include <sys/mman.h>
45 #endif
46
47 #if OS(LINUX)
48 #include <stdio.h>
49 #endif
50
51 #include "LinkBuffer.h"
52 #include "MacroAssembler.h"
53
54 #if PLATFORM(MAC) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
55 #define HAVE_REMAP_JIT 1
56 #endif
57
58 #if HAVE(REMAP_JIT)
59 #if CPU(ARM64) && PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000
60 #define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1
61 #endif
62 #endif
63
64 #if OS(DARWIN)
65 #include <mach/mach.h>
66 extern "C" {
67     /* Routine mach_vm_remap */
68 #ifdef mig_external
69     mig_external
70 #else
71     extern
72 #endif /* mig_external */
73     kern_return_t mach_vm_remap
74     (
75      vm_map_t target_task,
76      mach_vm_address_t *target_address,
77      mach_vm_size_t size,
78      mach_vm_offset_t mask,
79      int flags,
80      vm_map_t src_task,
81      mach_vm_address_t src_address,
82      boolean_t copy,
83      vm_prot_t *cur_protection,
84      vm_prot_t *max_protection,
85      vm_inherit_t inheritance
86      );
87 }
88
89 #endif
90
91 using namespace WTF;
92
93 namespace JSC {
94
95 JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool;
96 JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool;
97
98 JS_EXPORTDATA JITWriteFunction jitWriteFunction;
99
100 #if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT)
101 static uintptr_t startOfFixedWritableMemoryPool;
102 #endif
103
104 class FixedVMPoolExecutableAllocator : public MetaAllocator {
105     WTF_MAKE_FAST_ALLOCATED;
106 public:
107     FixedVMPoolExecutableAllocator()
108         : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
109     {
110         size_t reservationSize;
111         if (Options::jitMemoryReservationSize())
112             reservationSize = Options::jitMemoryReservationSize();
113         else
114             reservationSize = fixedExecutableMemoryPoolSize;
115         reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
116         m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
117         if (m_reservation) {
118             ASSERT(m_reservation.size() == reservationSize);
119             void* reservationBase = m_reservation.base();
120
121             if (Options::useSeparatedWXHeap()) {
122                 // First page of our JIT allocation is reserved.
123                 ASSERT(reservationSize >= pageSize() * 2);
124                 reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
125                 reservationSize -= pageSize();
126                 initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
127             }
128
129             addFreshFreeSpace(reservationBase, reservationSize);
130
131             startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(reservationBase);
132             endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize;
133         }
134     }
135
136     virtual ~FixedVMPoolExecutableAllocator();
137     
138 protected:
139     void* allocateNewSpace(size_t&) override
140     {
141         // We're operating in a fixed pool, so new allocation is always prohibited.
142         return 0;
143     }
144     
145     void notifyNeedPage(void* page) override
146     {
147 #if USE(MADV_FREE_FOR_JIT_MEMORY)
148         UNUSED_PARAM(page);
149 #else
150         m_reservation.commit(page, pageSize());
151 #endif
152     }
153     
154     void notifyPageIsFree(void* page) override
155     {
156 #if USE(MADV_FREE_FOR_JIT_MEMORY)
157         for (;;) {
158             int result = madvise(page, pageSize(), MADV_FREE);
159             if (!result)
160                 return;
161             ASSERT(result == -1);
162             if (errno != EAGAIN) {
163                 RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
164                 break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
165             }
166         }
167 #else
168         m_reservation.decommit(page, pageSize());
169 #endif
170     }
171
172 private:
173 #if OS(DARWIN) && HAVE(REMAP_JIT)
174     void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
175     {
176         mach_vm_address_t writableAddr = 0;
177
178         // Create a second mapping of the JIT region at a random address.
179         vm_prot_t cur, max;
180         int remapFlags = VM_FLAGS_ANYWHERE;
181 #if defined(VM_FLAGS_RANDOM_ADDR)
182         remapFlags |= VM_FLAGS_RANDOM_ADDR;
183 #endif
184         kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
185             remapFlags,
186             mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
187             &cur, &max, VM_INHERIT_DEFAULT);
188
189         bool remapSucceeded = (ret == KERN_SUCCESS);
190         if (!remapSucceeded)
191             return;
192
193         // Assemble a thunk that will serve as the means for writing into the JIT region.
194         MacroAssemblerCodeRef writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
195
196         int result = 0;
197
198 #if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
199         // Prevent reading the write thunk code.
200         result = mprotect(stubBase, stubSize, VM_PROT_EXECUTE_ONLY);
201         RELEASE_ASSERT(!result);
202 #endif
203
204         // Prevent writing into the executable JIT mapping.
205         result = mprotect(jitBase, jitSize, VM_PROT_READ | VM_PROT_EXECUTE);
206         RELEASE_ASSERT(!result);
207
208         // Prevent execution in the writable JIT mapping.
209         result = mprotect((void*)writableAddr, jitSize, VM_PROT_READ | VM_PROT_WRITE);
210         RELEASE_ASSERT(!result);
211
212         // Zero out writableAddr to avoid leaking the address of the writable mapping.
213         memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
214
215         jitWriteFunction = reinterpret_cast<JITWriteFunction>(writeThunk.code().executableAddress());
216     }
217
218 #if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
219     MacroAssemblerCodeRef jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
220     {
221         using namespace ARM64Registers;
222         using TrustedImm32 = MacroAssembler::TrustedImm32;
223
224         MacroAssembler jit;
225
226         jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
227         jit.addPtr(x7, x0);
228
229         jit.move(x0, x3);
230         MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
231
232         jit.add64(TrustedImm32(32), x3);
233         jit.and64(TrustedImm32(-32), x3);
234         jit.loadPair64(x1, x12, x13);
235         jit.loadPair64(x1, TrustedImm32(16), x14, x15);
236         jit.sub64(x3, x0, x5);
237         jit.addPtr(x5, x1);
238
239         jit.loadPair64(x1, x8, x9);
240         jit.loadPair64(x1, TrustedImm32(16), x10, x11);
241         jit.add64(TrustedImm32(32), x1);
242         jit.sub64(x5, x2);
243         jit.storePair64(x12, x13, x0);
244         jit.storePair64(x14, x15, x0, TrustedImm32(16));
245         MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
246
247         MacroAssembler::Label copyLoop = jit.label();
248         jit.storePair64WithNonTemporalAccess(x8, x9, x3);
249         jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
250         jit.add64(TrustedImm32(32), x3);
251         jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
252         jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
253         jit.add64(TrustedImm32(32), x1);
254         jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
255
256         cleanup.link(&jit);
257         jit.add64(x2, x1);
258         jit.loadPair64(x1, x12, x13);
259         jit.loadPair64(x1, TrustedImm32(16), x14, x15);
260         jit.storePair64(x8, x9, x3);
261         jit.storePair64(x10, x11, x3, TrustedImm32(16));
262         jit.addPtr(x2, x3);
263         jit.storePair64(x12, x13, x3, TrustedImm32(32));
264         jit.storePair64(x14, x15, x3, TrustedImm32(48));
265         jit.ret();
266
267         MacroAssembler::Label local0 = jit.label();
268         jit.load64(x1, PostIndex(8), x6);
269         jit.store64(x6, x3, PostIndex(8));
270         smallCopy.link(&jit);
271         jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
272         MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
273         MacroAssembler::Label local1 = jit.label();
274         jit.load8(x1, PostIndex(1), x6);
275         jit.store8(x6, x3, PostIndex(1));
276         jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
277         local2.link(&jit);
278         jit.ret();
279
280         LinkBuffer linkBuffer(jit, stubBase, stubSize);
281         return FINALIZE_CODE(linkBuffer, ("Bulletproof JIT write thunk"));
282     }
283 #else // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
284     static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
285     {
286         memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize);
287     }
288
289     MacroAssemblerCodeRef jitWriteThunkGenerator(void* address, void*, size_t)
290     {
291         startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
292         uintptr_t function = (uintptr_t)((void*)&genericWriteToJITRegion);
293 #if CPU(ARM_THUMB2)
294         // Handle thumb offset
295         function -= 1;
296 #endif
297         return MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr((void*)function));
298     }
299 #endif
300
301 #else // OS(DARWIN) && HAVE(REMAP_JIT)
302     void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
303     {
304     }
305 #endif
306
307 private:
308     PageReservation m_reservation;
309 };
310
311 static FixedVMPoolExecutableAllocator* allocator;
312
313 void ExecutableAllocator::initializeAllocator()
314 {
315     ASSERT(!allocator);
316     allocator = new FixedVMPoolExecutableAllocator();
317     CodeProfiling::notifyAllocator(allocator);
318 }
319
320 ExecutableAllocator::ExecutableAllocator(VM&)
321 {
322     ASSERT(allocator);
323 }
324
325 ExecutableAllocator::~ExecutableAllocator()
326 {
327 }
328
329 FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
330 {
331     m_reservation.deallocate();
332 }
333
334 bool ExecutableAllocator::isValid() const
335 {
336     return !!allocator->bytesReserved();
337 }
338
339 bool ExecutableAllocator::underMemoryPressure()
340 {
341     MetaAllocator::Statistics statistics = allocator->currentStatistics();
342     return statistics.bytesAllocated > statistics.bytesReserved / 2;
343 }
344
345 double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
346 {
347     MetaAllocator::Statistics statistics = allocator->currentStatistics();
348     ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
349     size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
350     size_t bytesAvailable = static_cast<size_t>(
351         statistics.bytesReserved * (1 - executablePoolReservationFraction));
352     if (bytesAllocated >= bytesAvailable)
353         bytesAllocated = bytesAvailable;
354     double result = 1.0;
355     size_t divisor = bytesAvailable - bytesAllocated;
356     if (divisor)
357         result = static_cast<double>(bytesAvailable) / divisor;
358     if (result < 1.0)
359         result = 1.0;
360     return result;
361 }
362
363 RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
364 {
365     if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
366         dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
367         WTFReportBacktrace();
368     }
369     
370     if (effort == JITCompilationCanFail
371         && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
372         return nullptr;
373     
374     if (effort == JITCompilationCanFail) {
375         // Don't allow allocations if we are down to reserve.
376         MetaAllocator::Statistics statistics = allocator->currentStatistics();
377         size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
378         size_t bytesAvailable = static_cast<size_t>(
379             statistics.bytesReserved * (1 - executablePoolReservationFraction));
380         if (bytesAllocated > bytesAvailable)
381             return nullptr;
382     }
383     
384     RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
385     if (!result) {
386         if (effort != JITCompilationCanFail) {
387             dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
388             CRASH();
389         }
390         return nullptr;
391     }
392     return result;
393 }
394
395 bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
396 {
397     return allocator->isInAllocatedMemory(locker, address);
398 }
399
400 Lock& ExecutableAllocator::getLock() const
401 {
402     return allocator->getLock();
403 }
404
405 size_t ExecutableAllocator::committedByteCount()
406 {
407     return allocator->bytesCommitted();
408 }
409
410 #if ENABLE(META_ALLOCATOR_PROFILE)
411 void ExecutableAllocator::dumpProfile()
412 {
413     allocator->dumpProfile();
414 }
415 #endif
416
417 }
418
419
420 #endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)