f8cae469f79b28ec3644ab08c3c5ea2ee1e3c428
[WebKit-https.git] / Source / JavaScriptCore / ftl / FTLCompile.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2014 Samsung Electronics
4  * Copyright (C) 2014 University of Szeged
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
26  */
27
28 #include "config.h"
29 #include "FTLCompile.h"
30
31 #if ENABLE(FTL_JIT)
32
33 #include "CodeBlockWithJITType.h"
34 #include "CCallHelpers.h"
35 #include "DFGCommon.h"
36 #include "DFGGraphSafepoint.h"
37 #include "DataView.h"
38 #include "Disassembler.h"
39 #include "FTLExitThunkGenerator.h"
40 #include "FTLInlineCacheSize.h"
41 #include "FTLJITCode.h"
42 #include "FTLThunks.h"
43 #include "FTLUnwindInfo.h"
44 #include "LLVMAPI.h"
45 #include "LinkBuffer.h"
46
47 namespace JSC { namespace FTL {
48
49 using namespace DFG;
50
51 static uint8_t* mmAllocateCodeSection(
52     void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
53 {
54     State& state = *static_cast<State*>(opaqueState);
55     
56     RELEASE_ASSERT(alignment <= jitAllocationGranule);
57     
58     RefPtr<ExecutableMemoryHandle> result =
59         state.graph.m_vm.executableAllocator.allocate(
60             state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationCanFail);
61     
62     if (!result) {
63         // Signal failure. This compilation will get tossed.
64         state.allocationFailed = true;
65         
66         // Fake an allocation, since LLVM cannot handle failures in the memory manager.
67         RefPtr<DataSection> fakeSection = adoptRef(new DataSection(size, jitAllocationGranule));
68         state.jitCode->addDataSection(fakeSection);
69         return bitwise_cast<uint8_t*>(fakeSection->base());
70     }
71     
72     // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
73     // for clients that use older LLVMs.
74     if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
75         state.unwindDataSection = result->start();
76         state.unwindDataSectionSize = result->sizeInBytes();
77     }
78     
79     state.jitCode->addHandle(result);
80     state.codeSectionNames.append(sectionName);
81     
82     return static_cast<uint8_t*>(result->start());
83 }
84
85 static uint8_t* mmAllocateDataSection(
86     void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
87     const char* sectionName, LLVMBool isReadOnly)
88 {
89     UNUSED_PARAM(sectionID);
90     UNUSED_PARAM(isReadOnly);
91
92     // Allocate the GOT in the code section to make it reachable for all code.
93     if (!strcmp(sectionName, SECTION_NAME("got")))
94         return mmAllocateCodeSection(opaqueState, size, alignment, sectionID, sectionName);
95
96     State& state = *static_cast<State*>(opaqueState);
97
98     RefPtr<DataSection> section = adoptRef(new DataSection(size, alignment));
99
100     if (!strcmp(sectionName, SECTION_NAME("llvm_stackmaps")))
101         state.stackmapsSection = section;
102     else {
103         state.jitCode->addDataSection(section);
104         state.dataSectionNames.append(sectionName);
105 #if OS(DARWIN)
106         if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
107 #elif OS(LINUX)
108         if (!strcmp(sectionName, SECTION_NAME("eh_frame"))) {
109 #else
110 #error "Unrecognized OS"
111 #endif
112             state.unwindDataSection = section->base();
113             state.unwindDataSectionSize = size;
114         }
115     }
116
117     return bitwise_cast<uint8_t*>(section->base());
118 }
119
120 static LLVMBool mmApplyPermissions(void*, char**)
121 {
122     return false;
123 }
124
125 static void mmDestroy(void*)
126 {
127 }
128
129 static void dumpDataSection(DataSection* section, const char* prefix)
130 {
131     for (unsigned j = 0; j < section->size() / sizeof(int64_t); ++j) {
132         char buf[32];
133         int64_t* wordPointer = static_cast<int64_t*>(section->base()) + j;
134         snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(wordPointer)));
135         dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(*wordPointer));
136     }
137 }
138
139 static int offsetOfStackRegion(StackMaps::RecordMap& recordMap, uint32_t stackmapID)
140 {
141     if (stackmapID == UINT_MAX)
142         return 0;
143     
144     StackMaps::RecordMap::iterator iter = recordMap.find(stackmapID);
145     RELEASE_ASSERT(iter != recordMap.end());
146     RELEASE_ASSERT(iter->value.size() == 1);
147     RELEASE_ASSERT(iter->value[0].record.locations.size() == 1);
148     Location capturedLocation =
149         Location::forStackmaps(nullptr, iter->value[0].record.locations[0]);
150     RELEASE_ASSERT(capturedLocation.kind() == Location::Register);
151     RELEASE_ASSERT(capturedLocation.gpr() == GPRInfo::callFrameRegister);
152     RELEASE_ASSERT(!(capturedLocation.addend() % sizeof(Register)));
153     return capturedLocation.addend() / sizeof(Register);
154 }
155
156 static void generateInlineIfPossibleOutOfLineIfNot(State& state, VM& vm, CodeBlock* codeBlock, CCallHelpers& code, char* startOfInlineCode, size_t sizeOfInlineCode, const char* codeDescription, const std::function<void(LinkBuffer&, CCallHelpers&, bool wasCompiledInline)>& callback)
157 {
158     std::unique_ptr<LinkBuffer> codeLinkBuffer;
159     size_t actualCodeSize = code.m_assembler.buffer().codeSize();
160
161     if (actualCodeSize <= sizeOfInlineCode) {
162         LinkBuffer codeLinkBuffer(vm, code, startOfInlineCode, sizeOfInlineCode);
163
164         // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
165         MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + actualCodeSize, sizeOfInlineCode - actualCodeSize);
166
167         callback(codeLinkBuffer, code, true);
168
169         return;
170     }
171
172     if (Options::assertICSizing() || Options::dumpFailedICSizing()) {
173         static size_t maxSize = 0;
174         if (maxSize < actualCodeSize)
175             maxSize = actualCodeSize;
176         dataLogF("ALERT: Under-estimated FTL Inline Cache Size for %s: estimated %zu, actual %zu, max %zu\n", codeDescription, sizeOfInlineCode, actualCodeSize, maxSize);
177         if (Options::assertICSizing())
178             CRASH();
179     }
180
181     // If there isn't enough space in the provided inline code area, allocate out of line
182     // executable memory to link the provided code. Place a jump at the beginning of the
183     // inline area and jump to the out of line code. Similarly return by appending a jump
184     // to the provided code that goes to the instruction after the inline code.
185     // Fill the middle with nop's.
186     MacroAssembler::Jump returnToMainline = code.jump();
187
188     // Allocate out of line executable memory and link the provided code there.
189     codeLinkBuffer = std::make_unique<LinkBuffer>(vm, code, codeBlock, JITCompilationMustSucceed);
190
191     // Plant a jmp in the inline buffer to the out of line code.
192     MacroAssembler callToOutOfLineCode;
193     MacroAssembler::Jump jumpToOutOfLine = callToOutOfLineCode.jump();
194     LinkBuffer inlineBuffer(vm, callToOutOfLineCode, startOfInlineCode, sizeOfInlineCode);
195     inlineBuffer.link(jumpToOutOfLine, codeLinkBuffer->entrypoint());
196
197     // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
198     MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + inlineBuffer.size(), sizeOfInlineCode - inlineBuffer.size());
199
200     // Link the end of the out of line code to right after the inline area.
201     codeLinkBuffer->link(returnToMainline, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(startOfInlineCode)).labelAtOffset(sizeOfInlineCode));
202
203     callback(*codeLinkBuffer.get(), code, false);
204
205     state.finalizer->outOfLineCodeInfos.append(OutOfLineCodeInfo(WTF::move(codeLinkBuffer), codeDescription));
206 }
207
208 template<typename DescriptorType>
209 void generateICFastPath(
210     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
211     StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
212 {
213     VM& vm = state.graph.m_vm;
214
215     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
216     if (iter == recordMap.end()) {
217         // It was optimized out.
218         return;
219     }
220     
221     Vector<StackMaps::RecordAndIndex>& records = iter->value;
222     
223     RELEASE_ASSERT(records.size() == ic.m_generators.size());
224     
225     for (unsigned i = records.size(); i--;) {
226         StackMaps::Record& record = records[i].record;
227         auto generator = ic.m_generators[i];
228
229         CCallHelpers fastPathJIT(&vm, codeBlock);
230         generator.generateFastPath(fastPathJIT);
231         
232         char* startOfIC =
233             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
234
235         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
236             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i],
237                 CodeLocationLabel(startOfIC + sizeOfIC));
238
239             linkBuffer.link(generator.slowPathJump(),
240                 state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
241
242             generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
243         });
244     }
245 }
246
247 static void generateCheckInICFastPath(
248     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
249     StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC)
250 {
251     VM& vm = state.graph.m_vm;
252
253     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
254     if (iter == recordMap.end()) {
255         // It was optimized out.
256         return;
257     }
258     
259     Vector<StackMaps::RecordAndIndex>& records = iter->value;
260     
261     RELEASE_ASSERT(records.size() == ic.m_generators.size());
262
263     for (unsigned i = records.size(); i--;) {
264         StackMaps::Record& record = records[i].record;
265         auto generator = ic.m_generators[i];
266
267         StructureStubInfo& stubInfo = *generator.m_stub;
268         auto call = generator.m_slowCall;
269         auto slowPathBegin = generator.m_beginLabel;
270
271         CCallHelpers fastPathJIT(&vm, codeBlock);
272         
273         auto jump = fastPathJIT.patchableJump();
274         auto done = fastPathJIT.label();
275
276         char* startOfIC =
277             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
278
279         auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) {
280             LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer;
281
282             state.finalizer->sideCodeLinkBuffer->link(
283                 ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
284
285             CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin);
286             fastPath.link(jump, slowPathBeginLoc);
287
288             CodeLocationCall callReturnLocation = slowPath.locationOf(call);
289
290             stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
291                 callReturnLocation, fastPath.locationOf(done));
292
293             stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
294                 callReturnLocation, fastPath.locationOf(jump));
295             stubInfo.callReturnLocation = callReturnLocation;
296             stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
297                 callReturnLocation, slowPathBeginLoc);
298         };
299
300         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink);
301     }
302 }
303
304
305 static RegisterSet usedRegistersFor(const StackMaps::Record& record)
306 {
307     if (Options::assumeAllRegsInFTLICAreLive())
308         return RegisterSet::allRegisters();
309     return RegisterSet(record.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
310 }
311
312 template<typename CallType>
313 void adjustCallICsForStackmaps(Vector<CallType>& calls, StackMaps::RecordMap& recordMap)
314 {
315     // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
316     // generated code. That implies first pruning the ones that LLVM didn't generate.
317
318     Vector<CallType> oldCalls;
319     oldCalls.swap(calls);
320     
321     for (unsigned i = 0; i < oldCalls.size(); ++i) {
322         CallType& call = oldCalls[i];
323         
324         StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID());
325         if (iter == recordMap.end())
326             continue;
327         
328         for (unsigned j = 0; j < iter->value.size(); ++j) {
329             CallType copy = call;
330             copy.m_instructionOffset = iter->value[j].record.instructionOffset;
331             calls.append(copy);
332         }
333     }
334
335     std::sort(calls.begin(), calls.end());
336 }
337
338 static void fixFunctionBasedOnStackMaps(
339     State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
340     StackMaps::RecordMap& recordMap)
341 {
342     Graph& graph = state.graph;
343     VM& vm = graph.m_vm;
344     StackMaps& stackmaps = jitCode->stackmaps;
345     
346     int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
347     int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
348     
349     for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
350         InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
351         
352         if (inlineCallFrame->argumentCountRegister.isValid())
353             inlineCallFrame->argumentCountRegister += localsOffset;
354         
355         for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
356             inlineCallFrame->arguments[argument] =
357                 inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
358         }
359         
360         if (inlineCallFrame->isClosureCall) {
361             inlineCallFrame->calleeRecovery =
362                 inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
363         }
364
365         if (graph.hasDebuggerEnabled())
366             codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
367     }
368     
369     MacroAssembler::Label stackOverflowException;
370
371     {
372         CCallHelpers checkJIT(&vm, codeBlock);
373         
374         // At this point it's perfectly fair to just blow away all state and restore the
375         // JS JIT view of the universe.
376         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
377         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
378         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
379         MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
380         checkJIT.jumpToExceptionHandler();
381
382         stackOverflowException = checkJIT.label();
383         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
384         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
385         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
386         MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
387         checkJIT.jumpToExceptionHandler();
388
389         auto linkBuffer = std::make_unique<LinkBuffer>(
390             vm, checkJIT, codeBlock, JITCompilationCanFail);
391         if (linkBuffer->didFailToAllocate()) {
392             state.allocationFailed = true;
393             return;
394         }
395         linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
396         linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
397
398         state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
399     }
400
401     RELEASE_ASSERT(state.jitCode->osrExit.size() == 0);
402     for (unsigned i = 0; i < state.jitCode->osrExitDescriptors.size(); i++) {
403         OSRExitDescriptor& exitDescriptor = state.jitCode->osrExitDescriptors[i];
404         auto iter = recordMap.find(exitDescriptor.m_stackmapID);
405         if (iter == recordMap.end()) {
406             // It was optimized out.
407             continue;
408         }
409
410         for (unsigned j = 0; j < iter->value.size(); j++) {
411             uint32_t stackmapRecordIndex = iter->value[j].index;
412             OSRExit exit(exitDescriptor, stackmapRecordIndex);
413             state.jitCode->osrExit.append(exit);
414             state.finalizer->osrExit.append(OSRExitCompilationInfo());
415         }
416     }
417     ExitThunkGenerator exitThunkGenerator(state);
418     exitThunkGenerator.emitThunks();
419     if (exitThunkGenerator.didThings()) {
420         RELEASE_ASSERT(state.finalizer->osrExit.size());
421         
422         auto linkBuffer = std::make_unique<LinkBuffer>(
423             vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
424         if (linkBuffer->didFailToAllocate()) {
425             state.allocationFailed = true;
426             return;
427         }
428         
429         RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
430         
431         for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
432             OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
433             OSRExit& exit = jitCode->osrExit[i];
434             
435             if (verboseCompilationEnabled())
436                 dataLog("Handling OSR stackmap #", exit.m_descriptor.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
437
438             info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
439             exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
440             
441             for (unsigned j = exit.m_descriptor.m_values.size(); j--;)
442                 exit.m_descriptor.m_values[j] = exit.m_descriptor.m_values[j].withLocalsOffset(localsOffset);
443             for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor.m_materializations)
444                 materialization->accountForLocalsOffset(localsOffset);
445             
446             if (verboseCompilationEnabled()) {
447                 DumpContext context;
448                 dataLog("    Exit values: ", inContext(exit.m_descriptor.m_values, &context), "\n");
449                 if (!exit.m_descriptor.m_materializations.isEmpty()) {
450                     dataLog("    Materializations: \n");
451                     for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor.m_materializations)
452                         dataLog("        Materialize(", pointerDump(materialization), ")\n");
453                 }
454             }
455         }
456         
457         state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
458     }
459
460     if (!state.getByIds.isEmpty()
461         || !state.putByIds.isEmpty()
462         || !state.checkIns.isEmpty()
463         || !state.lazySlowPaths.isEmpty()) {
464         CCallHelpers slowPathJIT(&vm, codeBlock);
465         
466         CCallHelpers::JumpList exceptionTarget;
467         
468         for (unsigned i = state.getByIds.size(); i--;) {
469             GetByIdDescriptor& getById = state.getByIds[i];
470             
471             if (verboseCompilationEnabled())
472                 dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
473             
474             auto iter = recordMap.find(getById.stackmapID());
475             if (iter == recordMap.end()) {
476                 // It was optimized out.
477                 continue;
478             }
479             
480             CodeOrigin codeOrigin = getById.codeOrigin();
481             for (unsigned i = 0; i < iter->value.size(); ++i) {
482                 StackMaps::Record& record = iter->value[i].record;
483             
484                 RegisterSet usedRegisters = usedRegistersFor(record);
485                 
486                 GPRReg result = record.locations[0].directGPR();
487                 GPRReg base = record.locations[1].directGPR();
488                 
489                 JITGetByIdGenerator gen(
490                     codeBlock, codeOrigin, state.jitCode->common.addUniqueCallSiteIndex(codeOrigin), usedRegisters, JSValueRegs(base),
491                     JSValueRegs(result));
492                 
493                 MacroAssembler::Label begin = slowPathJIT.label();
494
495                 MacroAssembler::Call call = callOperation(
496                     state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
497                     operationGetByIdOptimize, result, CCallHelpers::TrustedImmPtr(gen.stubInfo()),
498                     base, CCallHelpers::TrustedImmPtr(getById.uid())).call();
499
500                 gen.reportSlowPathCall(begin, call);
501
502                 getById.m_slowPathDone.append(slowPathJIT.jump());
503                 getById.m_generators.append(gen);
504             }
505         }
506         
507         for (unsigned i = state.putByIds.size(); i--;) {
508             PutByIdDescriptor& putById = state.putByIds[i];
509             
510             if (verboseCompilationEnabled())
511                 dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
512             
513             auto iter = recordMap.find(putById.stackmapID());
514             if (iter == recordMap.end()) {
515                 // It was optimized out.
516                 continue;
517             }
518             
519             CodeOrigin codeOrigin = putById.codeOrigin();
520             for (unsigned i = 0; i < iter->value.size(); ++i) {
521                 StackMaps::Record& record = iter->value[i].record;
522                 
523                 RegisterSet usedRegisters = usedRegistersFor(record);
524                 
525                 GPRReg base = record.locations[0].directGPR();
526                 GPRReg value = record.locations[1].directGPR();
527                 
528                 JITPutByIdGenerator gen(
529                     codeBlock, codeOrigin, state.jitCode->common.addUniqueCallSiteIndex(codeOrigin), usedRegisters, JSValueRegs(base),
530                     JSValueRegs(value), GPRInfo::patchpointScratchRegister, putById.ecmaMode(), putById.putKind());
531                 
532                 MacroAssembler::Label begin = slowPathJIT.label();
533                 
534                 MacroAssembler::Call call = callOperation(
535                     state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
536                     gen.slowPathFunction(), InvalidGPRReg,
537                     CCallHelpers::TrustedImmPtr(gen.stubInfo()), value, base,
538                     CCallHelpers::TrustedImmPtr(putById.uid())).call();
539                 
540                 gen.reportSlowPathCall(begin, call);
541                 
542                 putById.m_slowPathDone.append(slowPathJIT.jump());
543                 putById.m_generators.append(gen);
544             }
545         }
546
547         for (unsigned i = state.checkIns.size(); i--;) {
548             CheckInDescriptor& checkIn = state.checkIns[i];
549             
550             if (verboseCompilationEnabled())
551                 dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
552             
553             auto iter = recordMap.find(checkIn.stackmapID());
554             if (iter == recordMap.end()) {
555                 // It was optimized out.
556                 continue;
557             }
558             
559             CodeOrigin codeOrigin = checkIn.codeOrigin();
560             for (unsigned i = 0; i < iter->value.size(); ++i) {
561                 StackMaps::Record& record = iter->value[i].record;
562                 RegisterSet usedRegisters = usedRegistersFor(record);
563                 GPRReg result = record.locations[0].directGPR();
564                 GPRReg obj = record.locations[1].directGPR();
565                 StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
566                 stubInfo->codeOrigin = codeOrigin;
567                 stubInfo->callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(codeOrigin);
568                 stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
569                 stubInfo->patch.valueGPR = static_cast<int8_t>(result);
570                 stubInfo->patch.usedRegisters = usedRegisters;
571
572                 MacroAssembler::Label begin = slowPathJIT.label();
573
574                 MacroAssembler::Call slowCall = callOperation(
575                     state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
576                     operationInOptimize, result, CCallHelpers::TrustedImmPtr(stubInfo), obj,
577                     CCallHelpers::TrustedImmPtr(checkIn.uid())).call();
578
579                 checkIn.m_slowPathDone.append(slowPathJIT.jump());
580                 
581                 checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
582             }
583         }
584
585         for (unsigned i = state.lazySlowPaths.size(); i--;) {
586             LazySlowPathDescriptor& descriptor = state.lazySlowPaths[i];
587
588             if (verboseCompilationEnabled())
589                 dataLog("Handling lazySlowPath stackmap #", descriptor.stackmapID(), "\n");
590
591             auto iter = recordMap.find(descriptor.stackmapID());
592             if (iter == recordMap.end()) {
593                 // It was optimized out.
594                 continue;
595             }
596             CodeOrigin codeOrigin = descriptor.codeOrigin();
597             for (unsigned i = 0; i < iter->value.size(); ++i) {
598                 StackMaps::Record& record = iter->value[i].record;
599                 RegisterSet usedRegisters = usedRegistersFor(record);
600                 Vector<Location> locations;
601                 for (auto location : record.locations)
602                     locations.append(Location::forStackmaps(&stackmaps, location));
603
604                 char* startOfIC =
605                     bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
606                 CodeLocationLabel patchpoint((MacroAssemblerCodePtr(startOfIC)));
607                 CodeLocationLabel exceptionTarget =
608                     state.finalizer->handleExceptionsLinkBuffer->entrypoint();
609
610                 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>(
611                     patchpoint, exceptionTarget, usedRegisters, state.jitCode->common.addUniqueCallSiteIndex(codeOrigin),
612                     descriptor.m_linker->run(locations));
613
614                 CCallHelpers::Label begin = slowPathJIT.label();
615
616                 slowPathJIT.pushToSaveImmediateWithoutTouchingRegisters(
617                     CCallHelpers::TrustedImm32(state.jitCode->lazySlowPaths.size()));
618                 CCallHelpers::Jump generatorJump = slowPathJIT.jump();
619                 
620                 descriptor.m_generators.append(std::make_tuple(lazySlowPath.get(), begin));
621
622                 state.jitCode->lazySlowPaths.append(WTF::move(lazySlowPath));
623                 state.finalizer->lazySlowPathGeneratorJumps.append(generatorJump);
624             }
625         }
626         
627         exceptionTarget.link(&slowPathJIT);
628         MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
629         
630         state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
631         if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
632             state.allocationFailed = true;
633             return;
634         }
635         state.finalizer->sideCodeLinkBuffer->link(
636             exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
637         
638         for (unsigned i = state.getByIds.size(); i--;) {
639             generateICFastPath(
640                 state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
641                 sizeOfGetById());
642         }
643         for (unsigned i = state.putByIds.size(); i--;) {
644             generateICFastPath(
645                 state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
646                 sizeOfPutById());
647         }
648         for (unsigned i = state.checkIns.size(); i--;) {
649             generateCheckInICFastPath(
650                 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
651                 sizeOfIn()); 
652         }
653         for (unsigned i = state.lazySlowPaths.size(); i--;) {
654             LazySlowPathDescriptor& lazySlowPath = state.lazySlowPaths[i];
655             for (auto& tuple : lazySlowPath.m_generators) {
656                 MacroAssembler::replaceWithJump(
657                     std::get<0>(tuple)->patchpoint(),
658                     state.finalizer->sideCodeLinkBuffer->locationOf(std::get<1>(tuple)));
659             }
660         }
661     }
662     
663     adjustCallICsForStackmaps(state.jsCalls, recordMap);
664     
665     for (unsigned i = state.jsCalls.size(); i--;) {
666         JSCall& call = state.jsCalls[i];
667
668         CCallHelpers fastPathJIT(&vm, codeBlock);
669         call.emit(fastPathJIT, state);
670
671         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
672
673         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
674             call.link(vm, linkBuffer);
675         });
676     }
677     
678     adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
679     
680     for (unsigned i = state.jsCallVarargses.size(); i--;) {
681         JSCallVarargs& call = state.jsCallVarargses[i];
682         
683         CCallHelpers fastPathJIT(&vm, codeBlock);
684         call.emit(fastPathJIT, state, varargsSpillSlotsOffset);
685
686         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
687         size_t sizeOfIC = sizeOfICFor(call.node());
688
689         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
690             call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
691         });
692     }
693
694     adjustCallICsForStackmaps(state.jsTailCalls, recordMap);
695
696     for (unsigned i = state.jsTailCalls.size(); i--;) {
697         JSTailCall& call = state.jsTailCalls[i];
698
699         CCallHelpers fastPathJIT(&vm, codeBlock);
700         call.emit(*state.jitCode.get(), fastPathJIT);
701
702         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
703         size_t sizeOfIC = call.estimatedSize();
704
705         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
706             call.link(vm, linkBuffer);
707         });
708     }
709     
710     auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
711     // It's sort of remotely possible that we won't have an in-band exception handling
712     // path, for some kinds of functions.
713     if (iter != recordMap.end()) {
714         for (unsigned i = iter->value.size(); i--;) {
715             StackMaps::Record& record = iter->value[i].record;
716             
717             CodeLocationLabel source = CodeLocationLabel(
718                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
719
720             RELEASE_ASSERT(stackOverflowException.isSet());
721
722             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
723         }
724     }
725     
726     iter = recordMap.find(state.handleExceptionStackmapID);
727     // It's sort of remotely possible that we won't have an in-band exception handling
728     // path, for some kinds of functions.
729     if (iter != recordMap.end()) {
730         for (unsigned i = iter->value.size(); i--;) {
731             StackMaps::Record& record = iter->value[i].record;
732             
733             CodeLocationLabel source = CodeLocationLabel(
734                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
735             
736             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
737         }
738     }
739     
740     for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
741         OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
742         OSRExit& exit = jitCode->osrExit[exitIndex];
743         
744         Vector<const void*> codeAddresses;
745         
746         StackMaps::Record& record = jitCode->stackmaps.records[exit.m_stackmapRecordIndex];
747         
748         CodeLocationLabel source = CodeLocationLabel(
749             bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
750         
751         codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
752         
753         if (exit.m_descriptor.m_isInvalidationPoint)
754             jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
755         else
756             MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
757         
758         if (graph.compilation())
759             graph.compilation()->addOSRExitSite(codeAddresses);
760     }
761 }
762
763 void compile(State& state, Safepoint::Result& safepointResult)
764 {
765     char* error = 0;
766     
767     {
768         GraphSafepoint safepoint(state.graph, safepointResult);
769         
770         LLVMMCJITCompilerOptions options;
771         llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
772         options.OptLevel = Options::llvmBackendOptimizationLevel();
773         options.NoFramePointerElim = true;
774         if (Options::useLLVMSmallCodeModel())
775             options.CodeModel = LLVMCodeModelSmall;
776         options.EnableFastISel = enableLLVMFastISel;
777         options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
778             &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
779     
780         LLVMExecutionEngineRef engine;
781         
782         if (isARM64()) {
783 #if OS(DARWIN)
784             llvm->SetTarget(state.module, "arm64-apple-ios");
785 #elif OS(LINUX)
786             llvm->SetTarget(state.module, "aarch64-linux-gnu");
787 #else
788 #error "Unrecognized OS"
789 #endif
790         }
791
792         if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
793             dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
794             CRASH();
795         }
796         
797         // At this point we no longer own the module.
798         LModule module = state.module;
799         state.module = nullptr;
800
801         // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
802         // it to the module.
803         LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
804         LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
805         char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
806         llvm->SetDataLayout(module, stringRepOfTargetData);
807         free(stringRepOfTargetData);
808
809         LLVMPassManagerRef functionPasses = 0;
810         LLVMPassManagerRef modulePasses;
811
812         if (Options::llvmSimpleOpt()) {
813             modulePasses = llvm->CreatePassManager();
814             llvm->AddTargetData(targetData, modulePasses);
815             llvm->AddAnalysisPasses(targetMachine, modulePasses);
816             llvm->AddPromoteMemoryToRegisterPass(modulePasses);
817             llvm->AddGlobalOptimizerPass(modulePasses);
818             llvm->AddFunctionInliningPass(modulePasses);
819             llvm->AddPruneEHPass(modulePasses);
820             llvm->AddGlobalDCEPass(modulePasses);
821             llvm->AddConstantPropagationPass(modulePasses);
822             llvm->AddAggressiveDCEPass(modulePasses);
823             llvm->AddInstructionCombiningPass(modulePasses);
824             // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
825             llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
826             llvm->AddBasicAliasAnalysisPass(modulePasses);
827             // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
828             llvm->AddGVNPass(modulePasses);
829             llvm->AddCFGSimplificationPass(modulePasses);
830             llvm->AddDeadStoreEliminationPass(modulePasses);
831             
832             if (enableLLVMFastISel)
833                 llvm->AddLowerSwitchPass(modulePasses);
834
835             llvm->RunPassManager(modulePasses, module);
836         } else {
837             LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
838             llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
839             llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
840             llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
841         
842             functionPasses = llvm->CreateFunctionPassManagerForModule(module);
843             modulePasses = llvm->CreatePassManager();
844         
845             llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
846         
847             llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
848             llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
849         
850             llvm->PassManagerBuilderDispose(passBuilder);
851         
852             llvm->InitializeFunctionPassManager(functionPasses);
853             for (LValue function = llvm->GetFirstFunction(module); function; function = llvm->GetNextFunction(function))
854                 llvm->RunFunctionPassManager(functionPasses, function);
855             llvm->FinalizeFunctionPassManager(functionPasses);
856         
857             llvm->RunPassManager(modulePasses, module);
858         }
859
860         if (shouldDumpDisassembly() || verboseCompilationEnabled())
861             state.dumpState(module, "after optimization");
862         
863         // FIXME: Need to add support for the case where JIT memory allocation failed.
864         // https://bugs.webkit.org/show_bug.cgi?id=113620
865         state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
866         if (functionPasses)
867             llvm->DisposePassManager(functionPasses);
868         llvm->DisposePassManager(modulePasses);
869         llvm->DisposeExecutionEngine(engine);
870     }
871
872     if (safepointResult.didGetCancelled())
873         return;
874     RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
875     
876     if (state.allocationFailed)
877         return;
878     
879     if (shouldDumpDisassembly()) {
880         for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
881             ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
882             dataLog(
883                 "Generated LLVM code for ",
884                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
885                 " #", i, ", ", state.codeSectionNames[i], ":\n");
886             disassemble(
887                 MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
888                 "    ", WTF::dataFile(), LLVMSubset);
889         }
890         
891         for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
892             DataSection* section = state.jitCode->dataSections()[i].get();
893             dataLog(
894                 "Generated LLVM data section for ",
895                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
896                 " #", i, ", ", state.dataSectionNames[i], ":\n");
897             dumpDataSection(section, "    ");
898         }
899     }
900     
901     std::unique_ptr<RegisterAtOffsetList> registerOffsets = parseUnwindInfo(
902         state.unwindDataSection, state.unwindDataSectionSize,
903         state.generatedFunction);
904     if (shouldDumpDisassembly()) {
905         dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
906         dataLog("    ", *registerOffsets, "\n");
907     }
908     state.graph.m_codeBlock->setCalleeSaveRegisters(WTF::move(registerOffsets));
909     
910     if (state.stackmapsSection && state.stackmapsSection->size()) {
911         if (shouldDumpDisassembly()) {
912             dataLog(
913                 "Generated LLVM stackmaps section for ",
914                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
915             dataLog("    Raw data:\n");
916             dumpDataSection(state.stackmapsSection.get(), "    ");
917         }
918         
919         RefPtr<DataView> stackmapsData = DataView::create(
920             ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
921         state.jitCode->stackmaps.parse(stackmapsData.get());
922     
923         if (shouldDumpDisassembly()) {
924             dataLog("    Structured data:\n");
925             state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
926         }
927         
928         StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
929         fixFunctionBasedOnStackMaps(
930             state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
931             recordMap);
932         if (state.allocationFailed)
933             return;
934         
935         if (shouldDumpDisassembly() || Options::asyncDisassembly()) {
936             for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
937                 if (state.codeSectionNames[i] != SECTION_NAME("text"))
938                     continue;
939                 
940                 ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
941                 
942                 CString header = toCString(
943                     "Generated LLVM code after stackmap-based fix-up for ",
944                     CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
945                     " in ", state.graph.m_plan.mode, " #", i, ", ",
946                     state.codeSectionNames[i], ":\n");
947                 
948                 if (Options::asyncDisassembly()) {
949                     disassembleAsynchronously(
950                         header, MacroAssemblerCodeRef(handle), handle->sizeInBytes(), "    ",
951                         LLVMSubset);
952                     continue;
953                 }
954                 
955                 dataLog(header);
956                 disassemble(
957                     MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
958                     "    ", WTF::dataFile(), LLVMSubset);
959             }
960         }
961     }
962 }
963
964 } } // namespace JSC::FTL
965
966 #endif // ENABLE(FTL_JIT)
967