Wincairo buildfix
[WebKit-https.git] / Source / JavaScriptCore / jit / ExecutableAllocatorFixedVMPool.cpp
1 /*
2  * Copyright (C) 2009, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "ExecutableAllocator.h"
28
29 #include "JSCInlines.h"
30
31 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
32
33 #include "CodeProfiling.h"
34 #include "ExecutableAllocationFuzz.h"
35 #include <errno.h>
36 #if !PLATFORM(WIN)
37 #include <unistd.h>
38 #endif
39 #include <wtf/MetaAllocator.h>
40 #include <wtf/PageReservation.h>
41 #include <wtf/VMTags.h>
42
43 #if OS(DARWIN)
44 #include <sys/mman.h>
45 #endif
46
47 #if OS(LINUX)
48 #include <stdio.h>
49 #endif
50
51 #if ENABLE(SEPARATED_WX_HEAP)
52 #include "LinkBuffer.h"
53 #include "MacroAssembler.h"
54
55 #if OS(DARWIN)
56 #include <mach/mach.h>
57 extern "C" {
58     /* Routine mach_vm_remap */
59 #ifdef mig_external
60     mig_external
61 #else
62     extern
63 #endif /* mig_external */
64     kern_return_t mach_vm_remap
65     (
66      vm_map_t target_task,
67      mach_vm_address_t *target_address,
68      mach_vm_size_t size,
69      mach_vm_offset_t mask,
70      int flags,
71      vm_map_t src_task,
72      mach_vm_address_t src_address,
73      boolean_t copy,
74      vm_prot_t *cur_protection,
75      vm_prot_t *max_protection,
76      vm_inherit_t inheritance
77      );
78 }
79
80 #endif
81
82 #endif
83
84 using namespace WTF;
85
86 namespace JSC {
87
88 JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool;
89 JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool;
90
91 #if ENABLE(SEPARATED_WX_HEAP)
92 JS_EXPORTDATA uintptr_t jitWriteFunctionAddress;
93 #endif
94
95 class FixedVMPoolExecutableAllocator : public MetaAllocator {
96     WTF_MAKE_FAST_ALLOCATED;
97 public:
98     FixedVMPoolExecutableAllocator()
99         : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
100     {
101         size_t reservationSize;
102         if (Options::jitMemoryReservationSize())
103             reservationSize = Options::jitMemoryReservationSize();
104         else
105             reservationSize = fixedExecutableMemoryPoolSize;
106         reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
107         m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
108         if (m_reservation) {
109             ASSERT(m_reservation.size() == reservationSize);
110             void* reservationBase = m_reservation.base();
111
112 #if ENABLE(SEPARATED_WX_HEAP)
113             if (Options::useSeparatedWXHeap()) {
114                 // First page of our JIT allocation is reserved.
115                 ASSERT(reservationSize >= pageSize() * 2);
116                 reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
117                 reservationSize -= pageSize();
118                 initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
119             }
120 #endif
121
122             addFreshFreeSpace(reservationBase, reservationSize);
123
124             startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(reservationBase);
125             endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize;
126         }
127     }
128
129     virtual ~FixedVMPoolExecutableAllocator();
130     
131 protected:
132     void* allocateNewSpace(size_t&) override
133     {
134         // We're operating in a fixed pool, so new allocation is always prohibited.
135         return 0;
136     }
137     
138     void notifyNeedPage(void* page) override
139     {
140 #if USE(MADV_FREE_FOR_JIT_MEMORY)
141         UNUSED_PARAM(page);
142 #else
143         m_reservation.commit(page, pageSize());
144 #endif
145     }
146     
147     void notifyPageIsFree(void* page) override
148     {
149 #if USE(MADV_FREE_FOR_JIT_MEMORY)
150         for (;;) {
151             int result = madvise(page, pageSize(), MADV_FREE);
152             if (!result)
153                 return;
154             ASSERT(result == -1);
155             if (errno != EAGAIN) {
156                 RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
157                 break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
158             }
159         }
160 #else
161         m_reservation.decommit(page, pageSize());
162 #endif
163     }
164
165 private:
166 #if ENABLE(SEPARATED_WX_HEAP)
167     void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
168     {
169         mach_vm_address_t writableAddr = 0;
170
171         // Create a second mapping of the JIT region at a random address.
172         vm_prot_t cur, max;
173         int remapFlags = VM_FLAGS_ANYWHERE;
174 #if defined(VM_FLAGS_RANDOM_ADDR)
175         remapFlags |= VM_FLAGS_RANDOM_ADDR;
176 #endif
177         kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
178             remapFlags,
179             mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
180             &cur, &max, VM_INHERIT_DEFAULT);
181
182         bool remapSucceeded = (ret == KERN_SUCCESS);
183         if (!remapSucceeded)
184             writableAddr = (mach_vm_address_t)jitBase;
185
186         // Assemble a thunk that will serve as the means for writing into the JIT region.
187         MacroAssemblerCodeRef writeThunk = jitWriteThunkGenerator(writableAddr, stubBase, stubSize);
188
189         int result = 0;
190
191         if (!remapSucceeded) {
192 #if defined(VM_PROT_EXECUTE_ONLY)
193             // Prevent reading the write thunk code.
194             result = mprotect(stubBase, stubSize, VM_PROT_EXECUTE_ONLY);
195             RELEASE_ASSERT(!result);
196 #endif
197
198             // Prevent writing into the executable JIT mapping.
199             result = mprotect(jitBase, jitSize, VM_PROT_READ | VM_PROT_EXECUTE);
200             RELEASE_ASSERT(!result);
201
202             // Prevent execution in the writable JIT mapping.
203             result = mprotect((void*)writableAddr, jitSize, VM_PROT_READ | VM_PROT_WRITE);
204             RELEASE_ASSERT(!result);
205
206             // Zero out writableAddr to avoid leaking the address of the writable mapping.
207             memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
208         }
209         jitWriteFunctionAddress = (uintptr_t)writeThunk.code().executableAddress();
210     }
211
212 #if CPU(ARM64)
213     MacroAssemblerCodeRef jitWriteThunkGenerator(mach_vm_address_t writableAddr, void* stubBase, size_t stubSize)
214     {
215         using namespace ARM64Registers;
216         using TrustedImm32 = MacroAssembler::TrustedImm32;
217
218         MacroAssembler jit;
219
220         jit.move(MacroAssembler::TrustedImmPtr((const void*)writableAddr), x7);
221         jit.addPtr(x7, x0);
222
223         jit.move(x0, x3);
224         MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
225
226         jit.add64(TrustedImm32(32), x3);
227         jit.and64(TrustedImm32(-32), x3);
228         jit.loadPair64(x1, x12, x13);
229         jit.loadPair64(x1, TrustedImm32(16), x14, x15);
230         jit.sub64(x3, x0, x5);
231         jit.addPtr(x5, x1);
232
233         jit.loadPair64(x1, x8, x9);
234         jit.loadPair64(x1, TrustedImm32(16), x10, x11);
235         jit.add64(TrustedImm32(32), x1);
236         jit.sub64(x5, x2);
237         jit.storePair64(x12, x13, x0);
238         jit.storePair64(x14, x15, x0, TrustedImm32(16));
239         MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
240
241         MacroAssembler::Label copyLoop = jit.label();
242         jit.storePair64WithNonTemporalAccess(x8, x9, x3);
243         jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
244         jit.add64(TrustedImm32(32), x3);
245         jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
246         jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
247         jit.add64(TrustedImm32(32), x1);
248         jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
249
250         cleanup.link(&jit);
251         jit.add64(x2, x1);
252         jit.loadPair64(x1, x12, x13);
253         jit.loadPair64(x1, TrustedImm32(16), x14, x15);
254         jit.storePair64(x8, x9, x3);
255         jit.storePair64(x10, x11, x3, TrustedImm32(16));
256         jit.addPtr(x2, x3);
257         jit.storePair64(x12, x13, x3, TrustedImm32(32));
258         jit.storePair64(x14, x15, x3, TrustedImm32(48));
259         jit.ret();
260
261         MacroAssembler::Label local0 = jit.label();
262         jit.load64(x1, PostIndex(8), x6);
263         jit.store64(x6, x3, PostIndex(8));
264         smallCopy.link(&jit);
265         jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
266         MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
267         MacroAssembler::Label local1 = jit.label();
268         jit.load8(x1, PostIndex(1), x6);
269         jit.store8(x6, x3, PostIndex(1));
270         jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
271         local2.link(&jit);
272         jit.ret();
273
274         LinkBuffer linkBuffer(jit, stubBase, stubSize);
275         return FINALIZE_CODE(linkBuffer, ("Bulletproof JIT write thunk"));
276     }
277 #endif // CPU(ARM64)
278 #endif // ENABLE(SEPARATED_WX_HEAP)
279
280 private:
281     PageReservation m_reservation;
282 };
283
284 static FixedVMPoolExecutableAllocator* allocator;
285
286 void ExecutableAllocator::initializeAllocator()
287 {
288     ASSERT(!allocator);
289     allocator = new FixedVMPoolExecutableAllocator();
290     CodeProfiling::notifyAllocator(allocator);
291 }
292
293 ExecutableAllocator::ExecutableAllocator(VM&)
294 {
295     ASSERT(allocator);
296 }
297
298 ExecutableAllocator::~ExecutableAllocator()
299 {
300 }
301
302 FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
303 {
304     m_reservation.deallocate();
305 }
306
307 bool ExecutableAllocator::isValid() const
308 {
309     return !!allocator->bytesReserved();
310 }
311
312 bool ExecutableAllocator::underMemoryPressure()
313 {
314     MetaAllocator::Statistics statistics = allocator->currentStatistics();
315     return statistics.bytesAllocated > statistics.bytesReserved / 2;
316 }
317
318 double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
319 {
320     MetaAllocator::Statistics statistics = allocator->currentStatistics();
321     ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
322     size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
323     size_t bytesAvailable = static_cast<size_t>(
324         statistics.bytesReserved * (1 - executablePoolReservationFraction));
325     if (bytesAllocated >= bytesAvailable)
326         bytesAllocated = bytesAvailable;
327     double result = 1.0;
328     size_t divisor = bytesAvailable - bytesAllocated;
329     if (divisor)
330         result = static_cast<double>(bytesAvailable) / divisor;
331     if (result < 1.0)
332         result = 1.0;
333     return result;
334 }
335
336 RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
337 {
338     if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
339         dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
340         WTFReportBacktrace();
341     }
342     
343     if (effort == JITCompilationCanFail
344         && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
345         return nullptr;
346     
347     if (effort == JITCompilationCanFail) {
348         // Don't allow allocations if we are down to reserve.
349         MetaAllocator::Statistics statistics = allocator->currentStatistics();
350         size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
351         size_t bytesAvailable = static_cast<size_t>(
352             statistics.bytesReserved * (1 - executablePoolReservationFraction));
353         if (bytesAllocated > bytesAvailable)
354             return nullptr;
355     }
356     
357     RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
358     if (!result) {
359         if (effort != JITCompilationCanFail) {
360             dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
361             CRASH();
362         }
363         return nullptr;
364     }
365     return result;
366 }
367
368 bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
369 {
370     return allocator->isInAllocatedMemory(locker, address);
371 }
372
373 Lock& ExecutableAllocator::getLock() const
374 {
375     return allocator->getLock();
376 }
377
378 size_t ExecutableAllocator::committedByteCount()
379 {
380     return allocator->bytesCommitted();
381 }
382
383 #if ENABLE(META_ALLOCATOR_PROFILE)
384 void ExecutableAllocator::dumpProfile()
385 {
386     allocator->dumpProfile();
387 }
388 #endif
389
390 }
391
392
393 #endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)