OSR exits that are exception handlers should emit less code eagerly in the thunk...
[WebKit-https.git] / Source / JavaScriptCore / ftl / FTLCompile.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2014 Samsung Electronics
4  * Copyright (C) 2014 University of Szeged
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
26  */
27
28 #include "config.h"
29 #include "FTLCompile.h"
30
31 #if ENABLE(FTL_JIT) && !FTL_USES_B3
32
33 #include "CodeBlockWithJITType.h"
34 #include "CCallHelpers.h"
35 #include "DFGCommon.h"
36 #include "DFGGraphSafepoint.h"
37 #include "DFGOperations.h"
38 #include "DataView.h"
39 #include "Disassembler.h"
40 #include "FTLCompileBinaryOp.h"
41 #include "FTLExceptionHandlerManager.h"
42 #include "FTLExitThunkGenerator.h"
43 #include "FTLInlineCacheDescriptorInlines.h"
44 #include "FTLInlineCacheSize.h"
45 #include "FTLJITCode.h"
46 #include "FTLThunks.h"
47 #include "FTLUnwindInfo.h"
48 #include "LLVMAPI.h"
49 #include "LinkBuffer.h"
50 #include "ScratchRegisterAllocator.h"
51
52 namespace JSC { namespace FTL {
53
54 using namespace DFG;
55
56 static RegisterSet usedRegistersFor(const StackMaps::Record&);
57
58 static uint8_t* mmAllocateCodeSection(
59     void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
60 {
61     State& state = *static_cast<State*>(opaqueState);
62     
63     RELEASE_ASSERT(alignment <= jitAllocationGranule);
64     
65     RefPtr<ExecutableMemoryHandle> result =
66         state.graph.m_vm.executableAllocator.allocate(
67             state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationCanFail);
68     
69     if (!result) {
70         // Signal failure. This compilation will get tossed.
71         state.allocationFailed = true;
72         
73         // Fake an allocation, since LLVM cannot handle failures in the memory manager.
74         RefPtr<DataSection> fakeSection = adoptRef(new DataSection(size, jitAllocationGranule));
75         state.jitCode->addDataSection(fakeSection);
76         return bitwise_cast<uint8_t*>(fakeSection->base());
77     }
78     
79     // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
80     // for clients that use older LLVMs.
81     if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
82         state.unwindDataSection = result->start();
83         state.unwindDataSectionSize = result->sizeInBytes();
84     }
85     
86     state.jitCode->addHandle(result);
87     state.codeSectionNames.append(sectionName);
88     
89     return static_cast<uint8_t*>(result->start());
90 }
91
92 static uint8_t* mmAllocateDataSection(
93     void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
94     const char* sectionName, LLVMBool isReadOnly)
95 {
96     UNUSED_PARAM(sectionID);
97     UNUSED_PARAM(isReadOnly);
98
99     // Allocate the GOT in the code section to make it reachable for all code.
100     if (!strcmp(sectionName, SECTION_NAME("got")))
101         return mmAllocateCodeSection(opaqueState, size, alignment, sectionID, sectionName);
102
103     State& state = *static_cast<State*>(opaqueState);
104
105     RefPtr<DataSection> section = adoptRef(new DataSection(size, alignment));
106
107     if (!strcmp(sectionName, SECTION_NAME("llvm_stackmaps")))
108         state.stackmapsSection = section;
109     else {
110         state.jitCode->addDataSection(section);
111         state.dataSectionNames.append(sectionName);
112 #if OS(DARWIN)
113         if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
114 #elif OS(LINUX)
115         if (!strcmp(sectionName, SECTION_NAME("eh_frame"))) {
116 #else
117 #error "Unrecognized OS"
118 #endif
119             state.unwindDataSection = section->base();
120             state.unwindDataSectionSize = size;
121         }
122     }
123
124     return bitwise_cast<uint8_t*>(section->base());
125 }
126
127 static LLVMBool mmApplyPermissions(void*, char**)
128 {
129     return false;
130 }
131
132 static void mmDestroy(void*)
133 {
134 }
135
136 static void dumpDataSection(DataSection* section, const char* prefix)
137 {
138     for (unsigned j = 0; j < section->size() / sizeof(int64_t); ++j) {
139         char buf[32];
140         int64_t* wordPointer = static_cast<int64_t*>(section->base()) + j;
141         snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(wordPointer)));
142         dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(*wordPointer));
143     }
144 }
145
146 static int offsetOfStackRegion(StackMaps::RecordMap& recordMap, uint32_t stackmapID)
147 {
148     if (stackmapID == UINT_MAX)
149         return 0;
150     
151     StackMaps::RecordMap::iterator iter = recordMap.find(stackmapID);
152     RELEASE_ASSERT(iter != recordMap.end());
153     RELEASE_ASSERT(iter->value.size() == 1);
154     RELEASE_ASSERT(iter->value[0].record.locations.size() == 1);
155     Location capturedLocation =
156         Location::forStackmaps(nullptr, iter->value[0].record.locations[0]);
157     RELEASE_ASSERT(capturedLocation.kind() == Location::Register);
158     RELEASE_ASSERT(capturedLocation.gpr() == GPRInfo::callFrameRegister);
159     RELEASE_ASSERT(!(capturedLocation.addend() % sizeof(Register)));
160     return capturedLocation.addend() / sizeof(Register);
161 }
162
163 static void generateInlineIfPossibleOutOfLineIfNot(State& state, VM& vm, CodeBlock* codeBlock, CCallHelpers& code, char* startOfInlineCode, size_t sizeOfInlineCode, const char* codeDescription, const std::function<void(LinkBuffer&, CCallHelpers&, bool wasCompiledInline)>& callback)
164 {
165     std::unique_ptr<LinkBuffer> codeLinkBuffer;
166     size_t actualCodeSize = code.m_assembler.buffer().codeSize();
167
168     if (actualCodeSize <= sizeOfInlineCode) {
169         LinkBuffer codeLinkBuffer(vm, code, startOfInlineCode, sizeOfInlineCode);
170
171         // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
172         MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + actualCodeSize, sizeOfInlineCode - actualCodeSize);
173
174         callback(codeLinkBuffer, code, true);
175
176         return;
177     }
178
179     if (Options::assertICSizing() || Options::dumpFailedICSizing()) {
180         static size_t maxSize = 0;
181         if (maxSize < actualCodeSize)
182             maxSize = actualCodeSize;
183         dataLogF("ALERT: Under-estimated FTL Inline Cache Size for %s: estimated %zu, actual %zu, max %zu\n", codeDescription, sizeOfInlineCode, actualCodeSize, maxSize);
184         if (Options::assertICSizing())
185             CRASH();
186     }
187
188     // If there isn't enough space in the provided inline code area, allocate out of line
189     // executable memory to link the provided code. Place a jump at the beginning of the
190     // inline area and jump to the out of line code. Similarly return by appending a jump
191     // to the provided code that goes to the instruction after the inline code.
192     // Fill the middle with nop's.
193     MacroAssembler::Jump returnToMainline = code.jump();
194
195     // Allocate out of line executable memory and link the provided code there.
196     codeLinkBuffer = std::make_unique<LinkBuffer>(vm, code, codeBlock, JITCompilationMustSucceed);
197
198     // Plant a jmp in the inline buffer to the out of line code.
199     MacroAssembler callToOutOfLineCode;
200     MacroAssembler::Jump jumpToOutOfLine = callToOutOfLineCode.jump();
201     LinkBuffer inlineBuffer(vm, callToOutOfLineCode, startOfInlineCode, sizeOfInlineCode);
202     inlineBuffer.link(jumpToOutOfLine, codeLinkBuffer->entrypoint());
203
204     // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
205     MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + inlineBuffer.size(), sizeOfInlineCode - inlineBuffer.size());
206
207     // Link the end of the out of line code to right after the inline area.
208     codeLinkBuffer->link(returnToMainline, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(startOfInlineCode)).labelAtOffset(sizeOfInlineCode));
209
210     callback(*codeLinkBuffer.get(), code, false);
211
212     state.finalizer->outOfLineCodeInfos.append(OutOfLineCodeInfo(WTF::move(codeLinkBuffer), codeDescription));
213 }
214
215 template<typename DescriptorType>
216 void generateICFastPath(
217     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
218     StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
219 {
220     VM& vm = state.graph.m_vm;
221
222     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
223     if (iter == recordMap.end()) {
224         // It was optimized out.
225         return;
226     }
227     
228     Vector<StackMaps::RecordAndIndex>& records = iter->value;
229     
230     RELEASE_ASSERT(records.size() == ic.m_generators.size());
231     
232     for (unsigned i = records.size(); i--;) {
233         StackMaps::Record& record = records[i].record;
234         auto generator = ic.m_generators[i];
235
236         CCallHelpers fastPathJIT(&vm, codeBlock);
237         generator.generateFastPath(fastPathJIT);
238         
239         char* startOfIC =
240             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
241
242         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
243             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i],
244                 CodeLocationLabel(startOfIC + sizeOfIC));
245
246             linkBuffer.link(generator.slowPathJump(),
247                 state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
248
249             generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
250         });
251     }
252 }
253
254 static void generateCheckInICFastPath(
255     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
256     StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC)
257 {
258     VM& vm = state.graph.m_vm;
259
260     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
261     if (iter == recordMap.end()) {
262         // It was optimized out.
263         return;
264     }
265     
266     Vector<StackMaps::RecordAndIndex>& records = iter->value;
267     
268     RELEASE_ASSERT(records.size() == ic.m_generators.size());
269
270     for (unsigned i = records.size(); i--;) {
271         StackMaps::Record& record = records[i].record;
272         auto generator = ic.m_generators[i];
273
274         StructureStubInfo& stubInfo = *generator.m_stub;
275         auto call = generator.m_slowCall;
276         auto slowPathBegin = generator.m_beginLabel;
277
278         CCallHelpers fastPathJIT(&vm, codeBlock);
279         
280         auto jump = fastPathJIT.patchableJump();
281         auto done = fastPathJIT.label();
282
283         char* startOfIC =
284             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
285
286         auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) {
287             LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer;
288
289             state.finalizer->sideCodeLinkBuffer->link(
290                 ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
291
292             CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin);
293             fastPath.link(jump, slowPathBeginLoc);
294
295             CodeLocationCall callReturnLocation = slowPath.locationOf(call);
296
297             stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
298                 callReturnLocation, fastPath.locationOf(done));
299
300             stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
301                 callReturnLocation, fastPath.locationOf(jump));
302             stubInfo.callReturnLocation = callReturnLocation;
303             stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
304                 callReturnLocation, slowPathBeginLoc);
305         };
306
307         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink);
308     }
309 }
310
311 static void generateBinaryOpICFastPath(
312     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
313     StackMaps::RecordMap& recordMap, BinaryOpDescriptor& ic)
314 {
315     VM& vm = state.graph.m_vm;
316     size_t sizeOfIC = ic.size();
317
318     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
319     if (iter == recordMap.end())
320         return; // It was optimized out.
321
322     Vector<StackMaps::RecordAndIndex>& records = iter->value;
323
324     RELEASE_ASSERT(records.size() == ic.m_slowPathStarts.size());
325
326     for (unsigned i = records.size(); i--;) {
327         StackMaps::Record& record = records[i].record;
328
329         CCallHelpers fastPathJIT(&vm, codeBlock);
330
331         GPRReg result = record.locations[0].directGPR();
332         GPRReg left = record.locations[1].directGPR();
333         GPRReg right = record.locations[2].directGPR();
334         RegisterSet usedRegisters = usedRegistersFor(record);
335
336         CCallHelpers::Jump done;
337         CCallHelpers::Jump slowPathStart;
338
339         generateBinaryOpFastPath(ic, fastPathJIT, result, left, right, usedRegisters, done, slowPathStart);
340
341         char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
342         const char* fastPathICName = ic.fastPathICName();
343         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, fastPathICName, [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
344             linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
345             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
346             
347             linkBuffer.link(slowPathStart, state.finalizer->sideCodeLinkBuffer->locationOf(ic.m_slowPathStarts[i]));
348         });
349     }
350 }
351
352 #if ENABLE(MASM_PROBE)
353
354 static void generateProbe(
355     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
356     StackMaps::RecordMap& recordMap, ProbeDescriptor& ic)
357 {
358     VM& vm = state.graph.m_vm;
359     size_t sizeOfIC = sizeOfProbe();
360
361     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
362     if (iter == recordMap.end())
363         return; // It was optimized out.
364
365     CCallHelpers fastPathJIT(&vm, codeBlock);
366     Vector<StackMaps::RecordAndIndex>& records = iter->value;
367     for (unsigned i = records.size(); i--;) {
368         StackMaps::Record& record = records[i].record;
369
370         fastPathJIT.probe(ic.probeFunction());
371         CCallHelpers::Jump done = fastPathJIT.jump();
372
373         char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
374         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "Probe", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
375             linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
376         });
377     }
378 }
379
380 #endif // ENABLE(MASM_PROBE)
381
382 static RegisterSet usedRegistersFor(const StackMaps::Record& record)
383 {
384     if (Options::assumeAllRegsInFTLICAreLive())
385         return RegisterSet::allRegisters();
386     return RegisterSet(record.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
387 }
388
389 template<typename CallType>
390 void adjustCallICsForStackmaps(Vector<CallType>& calls, StackMaps::RecordMap& recordMap, ExceptionHandlerManager& exceptionHandlerManager)
391 {
392     // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
393     // generated code. That implies first pruning the ones that LLVM didn't generate.
394
395     Vector<CallType> oldCalls;
396     oldCalls.swap(calls);
397     
398     for (unsigned i = 0; i < oldCalls.size(); ++i) {
399         CallType& call = oldCalls[i];
400         
401         StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID());
402         if (iter == recordMap.end())
403             continue;
404         
405         for (unsigned j = 0; j < iter->value.size(); ++j) {
406             CallType copy = call;
407             copy.m_instructionOffset = iter->value[j].record.instructionOffset;
408             copy.setCallSiteIndex(exceptionHandlerManager.procureCallSiteIndex(iter->value[j].index, copy));
409             copy.setCorrespondingGenericUnwindOSRExit(exceptionHandlerManager.getCallOSRExit(iter->value[j].index, copy));
410
411             calls.append(copy);
412         }
413     }
414
415     std::sort(calls.begin(), calls.end());
416 }
417
418 static void fixFunctionBasedOnStackMaps(
419     State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
420     StackMaps::RecordMap& recordMap)
421 {
422     Graph& graph = state.graph;
423     VM& vm = graph.m_vm;
424     StackMaps& stackmaps = jitCode->stackmaps;
425
426     ExceptionHandlerManager exceptionHandlerManager(state);
427     
428     int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
429     int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
430     int osrExitFromGenericUnwindStackSpillSlot  = offsetOfStackRegion(recordMap, state.exceptionHandlingSpillSlotStackmapID);
431     jitCode->osrExitFromGenericUnwindStackSpillSlot = osrExitFromGenericUnwindStackSpillSlot;
432     
433     for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
434         InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
435         
436         if (inlineCallFrame->argumentCountRegister.isValid())
437             inlineCallFrame->argumentCountRegister += localsOffset;
438         
439         for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
440             inlineCallFrame->arguments[argument] =
441                 inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
442         }
443         
444         if (inlineCallFrame->isClosureCall) {
445             inlineCallFrame->calleeRecovery =
446                 inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
447         }
448
449         if (graph.hasDebuggerEnabled())
450             codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
451     }
452     
453     MacroAssembler::Label stackOverflowException;
454
455     {
456         CCallHelpers checkJIT(&vm, codeBlock);
457         
458         // At this point it's perfectly fair to just blow away all state and restore the
459         // JS JIT view of the universe.
460         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
461         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
462         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
463         MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
464         checkJIT.jumpToExceptionHandler();
465
466         stackOverflowException = checkJIT.label();
467         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
468         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
469         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
470         MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
471         checkJIT.jumpToExceptionHandler();
472
473         auto linkBuffer = std::make_unique<LinkBuffer>(
474             vm, checkJIT, codeBlock, JITCompilationCanFail);
475         if (linkBuffer->didFailToAllocate()) {
476             state.allocationFailed = true;
477             return;
478         }
479         linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
480         linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
481
482         state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
483     }
484
485     RELEASE_ASSERT(state.jitCode->osrExit.size() == 0);
486     HashMap<OSRExitDescriptor*, OSRExitDescriptorImpl*> genericUnwindOSRExitDescriptors;
487     for (unsigned i = 0; i < state.jitCode->osrExitDescriptors.size(); i++) {
488         OSRExitDescriptor* exitDescriptor = &state.jitCode->osrExitDescriptors[i];
489         auto iter = recordMap.find(exitDescriptor->m_stackmapID);
490         if (iter == recordMap.end()) {
491             // It was optimized out.
492             continue;
493         }
494
495         OSRExitDescriptorImpl& exitDescriptorImpl = state.osrExitDescriptorImpls[i];
496         if (exceptionTypeWillArriveAtOSRExitFromGenericUnwind(exitDescriptorImpl.m_exceptionType))
497             genericUnwindOSRExitDescriptors.add(exitDescriptor, &exitDescriptorImpl);
498
499         for (unsigned j = exitDescriptor->m_values.size(); j--;)
500             exitDescriptor->m_values[j] = exitDescriptor->m_values[j].withLocalsOffset(localsOffset);
501         for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
502             materialization->accountForLocalsOffset(localsOffset);
503
504         for (unsigned j = 0; j < iter->value.size(); j++) {
505             {
506                 uint32_t stackmapRecordIndex = iter->value[j].index;
507                 OSRExit exit(exitDescriptor, exitDescriptorImpl, stackmapRecordIndex);
508                 state.jitCode->osrExit.append(exit);
509                 state.finalizer->osrExit.append(OSRExitCompilationInfo());
510             }
511
512             OSRExit& exit = state.jitCode->osrExit.last();
513             if (exit.willArriveAtExitFromIndirectExceptionCheck()) {
514                 StackMaps::Record& record = iter->value[j].record;
515                 RELEASE_ASSERT(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader.isSet());
516                 CallSiteIndex callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader);
517                 exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
518
519                 OSRExit* callOperationExit = nullptr;
520                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::BinaryOpGenerator) {
521                     exceptionHandlerManager.addNewCallOperationExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
522                     callOperationExit = &exit;
523                 } else
524                     exceptionHandlerManager.addNewExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
525                 
526                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById || exitDescriptorImpl.m_exceptionType == ExceptionType::PutById) {
527                     // We create two different OSRExits for GetById and PutById.
528                     // One exit that will be arrived at from the genericUnwind exception handler path,
529                     // and the other that will be arrived at from the callOperation exception handler path.
530                     // This code here generates the second callOperation variant.
531                     uint32_t stackmapRecordIndex = iter->value[j].index;
532                     OSRExit exit(exitDescriptor, exitDescriptorImpl, stackmapRecordIndex);
533                     if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById)
534                         exit.m_exceptionType = ExceptionType::GetByIdCallOperation;
535                     else
536                         exit.m_exceptionType = ExceptionType::PutByIdCallOperation;
537                     CallSiteIndex callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader);
538                     exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
539
540                     state.jitCode->osrExit.append(exit);
541                     state.finalizer->osrExit.append(OSRExitCompilationInfo());
542
543                     exceptionHandlerManager.addNewCallOperationExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
544                     callOperationExit = &state.jitCode->osrExit.last();
545                 }
546
547                 // Subs and GetByIds have an interesting register preservation story,
548                 // see comment below at GetById to read about it.
549                 //
550                 // We set the registers needing spillage here because they need to be set
551                 // before we generate OSR exits so the exit knows to do the proper recovery.
552                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::JSCall) {
553                     // Call patchpoints might have values we want to do value recovery
554                     // on inside volatile registers. We need to collect the volatile
555                     // registers we want to do value recovery on here because they must
556                     // be preserved to the stack before the call, that way the OSR exit
557                     // exception handler can recover them into the proper registers.
558                     exit.gatherRegistersToSpillForCallIfException(stackmaps, record);
559                 } else if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById) {
560                     GPRReg result = record.locations[0].directGPR();
561                     GPRReg base = record.locations[1].directGPR();
562                     if (base == result)
563                         callOperationExit->registersToPreserveForCallThatMightThrow.set(base);
564                 } else if (exitDescriptorImpl.m_exceptionType == ExceptionType::BinaryOpGenerator) {
565                     GPRReg result = record.locations[0].directGPR();
566                     GPRReg left = record.locations[1].directGPR();
567                     GPRReg right = record.locations[2].directGPR();
568                     if (result == left || result == right)
569                         callOperationExit->registersToPreserveForCallThatMightThrow.set(result);
570                 }
571             }
572         }
573     }
574     ExitThunkGenerator exitThunkGenerator(state);
575     exitThunkGenerator.emitThunks();
576     if (exitThunkGenerator.didThings()) {
577         RELEASE_ASSERT(state.finalizer->osrExit.size());
578         
579         auto linkBuffer = std::make_unique<LinkBuffer>(
580             vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
581         if (linkBuffer->didFailToAllocate()) {
582             state.allocationFailed = true;
583             return;
584         }
585         
586         RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
587         
588         codeBlock->clearExceptionHandlers();
589
590         for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
591             OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
592             OSRExit& exit = state.jitCode->osrExit[i];
593             
594             if (verboseCompilationEnabled())
595                 dataLog("Handling OSR stackmap #", exit.m_descriptor->m_stackmapID, " for ", exit.m_codeOrigin, "\n");
596
597             info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
598             exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
599
600             if (exit.willArriveAtOSRExitFromGenericUnwind()) {
601                 HandlerInfo newHandler = genericUnwindOSRExitDescriptors.get(exit.m_descriptor)->m_baselineExceptionHandler;
602                 newHandler.start = exit.m_exceptionHandlerCallSiteIndex.bits();
603                 newHandler.end = exit.m_exceptionHandlerCallSiteIndex.bits() + 1;
604                 newHandler.nativeCode = info.m_thunkAddress;
605                 codeBlock->appendExceptionHandler(newHandler);
606             }
607
608             if (verboseCompilationEnabled()) {
609                 DumpContext context;
610                 dataLog("    Exit values: ", inContext(exit.m_descriptor->m_values, &context), "\n");
611                 if (!exit.m_descriptor->m_materializations.isEmpty()) {
612                     dataLog("    Materializations: \n");
613                     for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
614                         dataLog("        Materialize(", pointerDump(materialization), ")\n");
615                 }
616             }
617         }
618         
619         state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
620     }
621
622     if (!state.getByIds.isEmpty()
623         || !state.putByIds.isEmpty()
624         || !state.checkIns.isEmpty()
625         || !state.binaryOps.isEmpty()
626         || !state.lazySlowPaths.isEmpty()) {
627         CCallHelpers slowPathJIT(&vm, codeBlock);
628         
629         CCallHelpers::JumpList exceptionTarget;
630
631         Vector<std::pair<CCallHelpers::JumpList, CodeLocationLabel>> exceptionJumpsToLink;
632         auto addNewExceptionJumpIfNecessary = [&] (uint32_t recordIndex) {
633             CodeLocationLabel exceptionTarget = exceptionHandlerManager.callOperationExceptionTarget(recordIndex);
634             if (!exceptionTarget)
635                 return false;
636             exceptionJumpsToLink.append(
637                 std::make_pair(CCallHelpers::JumpList(), exceptionTarget));
638             return true;
639         };
640         
641         for (unsigned i = state.getByIds.size(); i--;) {
642             GetByIdDescriptor& getById = state.getByIds[i];
643             
644             if (verboseCompilationEnabled())
645                 dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
646             
647             auto iter = recordMap.find(getById.stackmapID());
648             if (iter == recordMap.end()) {
649                 // It was optimized out.
650                 continue;
651             }
652             
653             CodeOrigin codeOrigin = getById.codeOrigin();
654             for (unsigned i = 0; i < iter->value.size(); ++i) {
655                 StackMaps::Record& record = iter->value[i].record;
656             
657                 RegisterSet usedRegisters = usedRegistersFor(record);
658                 
659                 GPRReg result = record.locations[0].directGPR();
660                 GPRReg base = record.locations[1].directGPR();
661                 
662                 JITGetByIdGenerator gen(
663                     codeBlock, codeOrigin, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin), usedRegisters, JSValueRegs(base),
664                     JSValueRegs(result));
665                 
666                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
667                 MacroAssembler::Label begin = slowPathJIT.label();
668                 if (result == base) {
669                     // This situation has a really interesting story. We may have a GetById inside
670                     // a try block where LLVM assigns the result and the base to the same register.
671                     // The inline cache may miss and we may end up at this slow path callOperation. 
672                     // Then, suppose the base and the result are both the same register, so the return
673                     // value of the C call gets stored into the original base register. If the operationGetByIdOptimize
674                     // throws, it will return "undefined" and we will be stuck with "undefined" in the base
675                     // register that we would like to do value recovery on. We combat this situation from ever
676                     // taking place by ensuring we spill the original base value and then recover it from
677                     // the spill slot as the first step in OSR exit.
678                     if (OSRExit* exit = exceptionHandlerManager.callOperationOSRExit(iter->value[i].index))
679                         exit->spillRegistersToSpillSlot(slowPathJIT, osrExitFromGenericUnwindStackSpillSlot);
680                 }
681                 MacroAssembler::Call call = callOperation(
682                     state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
683                     operationGetByIdOptimize, result, CCallHelpers::TrustedImmPtr(gen.stubInfo()),
684                     base, CCallHelpers::TrustedImmPtr(getById.uid())).call();
685
686                 gen.reportSlowPathCall(begin, call);
687
688                 getById.m_slowPathDone.append(slowPathJIT.jump());
689                 getById.m_generators.append(gen);
690             }
691         }
692         
693         for (unsigned i = state.putByIds.size(); i--;) {
694             PutByIdDescriptor& putById = state.putByIds[i];
695             
696             if (verboseCompilationEnabled())
697                 dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
698             
699             auto iter = recordMap.find(putById.stackmapID());
700             if (iter == recordMap.end()) {
701                 // It was optimized out.
702                 continue;
703             }
704             
705             CodeOrigin codeOrigin = putById.codeOrigin();
706             for (unsigned i = 0; i < iter->value.size(); ++i) {
707                 StackMaps::Record& record = iter->value[i].record;
708                 
709                 RegisterSet usedRegisters = usedRegistersFor(record);
710                 
711                 GPRReg base = record.locations[0].directGPR();
712                 GPRReg value = record.locations[1].directGPR();
713                 
714                 JITPutByIdGenerator gen(
715                     codeBlock, codeOrigin, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin), usedRegisters, JSValueRegs(base),
716                     JSValueRegs(value), GPRInfo::patchpointScratchRegister, putById.ecmaMode(), putById.putKind());
717                 
718                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
719
720                 MacroAssembler::Label begin = slowPathJIT.label();
721
722                 MacroAssembler::Call call = callOperation(
723                     state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
724                     gen.slowPathFunction(), InvalidGPRReg,
725                     CCallHelpers::TrustedImmPtr(gen.stubInfo()), value, base,
726                     CCallHelpers::TrustedImmPtr(putById.uid())).call();
727                 
728                 gen.reportSlowPathCall(begin, call);
729                 
730                 putById.m_slowPathDone.append(slowPathJIT.jump());
731                 putById.m_generators.append(gen);
732             }
733         }
734
735         for (unsigned i = state.checkIns.size(); i--;) {
736             CheckInDescriptor& checkIn = state.checkIns[i];
737             
738             if (verboseCompilationEnabled())
739                 dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
740             
741             auto iter = recordMap.find(checkIn.stackmapID());
742             if (iter == recordMap.end()) {
743                 // It was optimized out.
744                 continue;
745             }
746             
747             CodeOrigin codeOrigin = checkIn.codeOrigin();
748             for (unsigned i = 0; i < iter->value.size(); ++i) {
749                 StackMaps::Record& record = iter->value[i].record;
750                 RegisterSet usedRegisters = usedRegistersFor(record);
751                 GPRReg result = record.locations[0].directGPR();
752                 GPRReg obj = record.locations[1].directGPR();
753                 StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
754                 stubInfo->codeOrigin = codeOrigin;
755                 stubInfo->callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(codeOrigin);
756                 stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
757                 stubInfo->patch.valueGPR = static_cast<int8_t>(result);
758                 stubInfo->patch.usedRegisters = usedRegisters;
759
760                 MacroAssembler::Label begin = slowPathJIT.label();
761
762                 MacroAssembler::Call slowCall = callOperation(
763                     state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
764                     operationInOptimize, result, CCallHelpers::TrustedImmPtr(stubInfo), obj,
765                     CCallHelpers::TrustedImmPtr(checkIn.uid())).call();
766
767                 checkIn.m_slowPathDone.append(slowPathJIT.jump());
768                 
769                 checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
770             }
771         }
772
773         for (size_t i = state.binaryOps.size(); i--;) {
774             BinaryOpDescriptor& binaryOp = state.binaryOps[i];
775             
776             if (verboseCompilationEnabled())
777                 dataLog("Handling ", binaryOp.name(), " stackmap #", binaryOp.stackmapID(), "\n");
778             
779             auto iter = recordMap.find(binaryOp.stackmapID());
780             if (iter == recordMap.end())
781                 continue; // It was optimized out.
782             
783             CodeOrigin codeOrigin = binaryOp.codeOrigin();
784             for (unsigned i = 0; i < iter->value.size(); ++i) {
785                 StackMaps::Record& record = iter->value[i].record;
786                 RegisterSet usedRegisters = usedRegistersFor(record);
787
788                 GPRReg result = record.locations[0].directGPR();
789                 GPRReg left = record.locations[1].directGPR();
790                 GPRReg right = record.locations[2].directGPR();
791
792                 binaryOp.m_slowPathStarts.append(slowPathJIT.label());
793                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
794                 if (result == left || result == right) {
795                     // This situation has a really interesting register preservation story.
796                     // See comment above for GetByIds.
797                     if (OSRExit* exit = exceptionHandlerManager.callOperationOSRExit(iter->value[i].index))
798                         exit->spillRegistersToSpillSlot(slowPathJIT, osrExitFromGenericUnwindStackSpillSlot);
799                 }
800
801                 callOperation(state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
802                     binaryOp.slowPathFunction(), result, left, right).call();
803
804                 binaryOp.m_slowPathDone.append(slowPathJIT.jump());
805             }
806         }
807
808         for (unsigned i = state.lazySlowPaths.size(); i--;) {
809             LazySlowPathDescriptor& descriptor = state.lazySlowPaths[i];
810
811             if (verboseCompilationEnabled())
812                 dataLog("Handling lazySlowPath stackmap #", descriptor.stackmapID(), "\n");
813
814             auto iter = recordMap.find(descriptor.stackmapID());
815             if (iter == recordMap.end()) {
816                 // It was optimized out.
817                 continue;
818             }
819             CodeOrigin codeOrigin = descriptor.codeOrigin();
820             for (unsigned i = 0; i < iter->value.size(); ++i) {
821                 StackMaps::Record& record = iter->value[i].record;
822                 RegisterSet usedRegisters = usedRegistersFor(record);
823                 char* startOfIC =
824                     bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
825                 CodeLocationLabel patchpoint((MacroAssemblerCodePtr(startOfIC)));
826                 CodeLocationLabel exceptionTarget = exceptionHandlerManager.lazySlowPathExceptionTarget(iter->value[i].index);
827                 if (!exceptionTarget)
828                     exceptionTarget = state.finalizer->handleExceptionsLinkBuffer->entrypoint();
829
830                 ScratchRegisterAllocator scratchAllocator(usedRegisters);
831                 GPRReg newZero = InvalidGPRReg;
832                 Vector<Location> locations;
833                 for (auto stackmapLocation : record.locations) {
834                     FTL::Location location = Location::forStackmaps(&stackmaps, stackmapLocation);
835                     if (isARM64()) {
836                         // If LLVM proves that something is zero, it may pass us the zero register (aka, the stack pointer). Our assembler
837                         // isn't prepared to handle this well. We need to move it into a different register if such a case arises.
838                         if (location.isGPR() && location.gpr() == MacroAssembler::stackPointerRegister) {
839                             if (newZero == InvalidGPRReg) {
840                                 newZero = scratchAllocator.allocateScratchGPR();
841                                 usedRegisters.set(newZero);
842                             }
843                             location = FTL::Location::forRegister(DWARFRegister(static_cast<uint16_t>(newZero)), 0); // DWARF GPRs for arm64 are sensibly numbered.
844                         }
845                     }
846                     locations.append(location);
847                 }
848
849                 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>(
850                     patchpoint, exceptionTarget, usedRegisters, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin),
851                     descriptor.m_linker->run(locations), newZero, scratchAllocator);
852
853                 CCallHelpers::Label begin = slowPathJIT.label();
854
855                 slowPathJIT.pushToSaveImmediateWithoutTouchingRegisters(
856                     CCallHelpers::TrustedImm32(state.jitCode->lazySlowPaths.size()));
857                 CCallHelpers::Jump generatorJump = slowPathJIT.jump();
858                 
859                 descriptor.m_generators.append(std::make_tuple(lazySlowPath.get(), begin));
860
861                 state.jitCode->lazySlowPaths.append(WTF::move(lazySlowPath));
862                 state.finalizer->lazySlowPathGeneratorJumps.append(generatorJump);
863             }
864         }
865         
866         exceptionTarget.link(&slowPathJIT);
867         MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
868         
869         state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
870         if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
871             state.allocationFailed = true;
872             return;
873         }
874         state.finalizer->sideCodeLinkBuffer->link(
875             exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
876         
877         for (unsigned i = state.getByIds.size(); i--;) {
878             generateICFastPath(
879                 state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
880                 sizeOfGetById());
881         }
882         for (unsigned i = state.putByIds.size(); i--;) {
883             generateICFastPath(
884                 state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
885                 sizeOfPutById());
886         }
887         for (unsigned i = state.checkIns.size(); i--;) {
888             generateCheckInICFastPath(
889                 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
890                 sizeOfIn()); 
891         }
892         for (unsigned i = state.binaryOps.size(); i--;) {
893             BinaryOpDescriptor& binaryOp = state.binaryOps[i];
894             generateBinaryOpICFastPath(state, codeBlock, generatedFunction, recordMap, binaryOp);
895         }
896         for (unsigned i = state.lazySlowPaths.size(); i--;) {
897             LazySlowPathDescriptor& lazySlowPath = state.lazySlowPaths[i];
898             for (auto& tuple : lazySlowPath.m_generators) {
899                 MacroAssembler::replaceWithJump(
900                     std::get<0>(tuple)->patchpoint(),
901                     state.finalizer->sideCodeLinkBuffer->locationOf(std::get<1>(tuple)));
902             }
903         }
904 #if ENABLE(MASM_PROBE)
905         for (unsigned i = state.probes.size(); i--;) {
906             ProbeDescriptor& probe = state.probes[i];
907             generateProbe(state, codeBlock, generatedFunction, recordMap, probe);
908         }
909 #endif
910         for (auto& pair : exceptionJumpsToLink)
911             state.finalizer->sideCodeLinkBuffer->link(pair.first, pair.second);
912     }
913     
914     adjustCallICsForStackmaps(state.jsCalls, recordMap, exceptionHandlerManager);
915     
916     for (unsigned i = state.jsCalls.size(); i--;) {
917         JSCall& call = state.jsCalls[i];
918
919         CCallHelpers fastPathJIT(&vm, codeBlock);
920         call.emit(fastPathJIT, state, osrExitFromGenericUnwindStackSpillSlot);
921
922         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
923
924         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
925             call.link(vm, linkBuffer);
926         });
927     }
928     
929     adjustCallICsForStackmaps(state.jsCallVarargses, recordMap, exceptionHandlerManager);
930     
931     for (unsigned i = state.jsCallVarargses.size(); i--;) {
932         JSCallVarargs& call = state.jsCallVarargses[i];
933         
934         CCallHelpers fastPathJIT(&vm, codeBlock);
935         call.emit(fastPathJIT, state, varargsSpillSlotsOffset, osrExitFromGenericUnwindStackSpillSlot);
936
937         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
938         size_t sizeOfIC = sizeOfICFor(call.node());
939
940         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
941             call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
942         });
943     }
944
945     adjustCallICsForStackmaps(state.jsTailCalls, recordMap, exceptionHandlerManager);
946
947     for (unsigned i = state.jsTailCalls.size(); i--;) {
948         JSTailCall& call = state.jsTailCalls[i];
949
950         CCallHelpers fastPathJIT(&vm, codeBlock);
951         call.emit(*state.jitCode.get(), fastPathJIT);
952
953         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
954         size_t sizeOfIC = call.estimatedSize();
955
956         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
957             call.link(vm, linkBuffer);
958         });
959     }
960     
961     auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
962     // It's sort of remotely possible that we won't have an in-band exception handling
963     // path, for some kinds of functions.
964     if (iter != recordMap.end()) {
965         for (unsigned i = iter->value.size(); i--;) {
966             StackMaps::Record& record = iter->value[i].record;
967             
968             CodeLocationLabel source = CodeLocationLabel(
969                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
970
971             RELEASE_ASSERT(stackOverflowException.isSet());
972
973             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
974         }
975     }
976     
977     iter = recordMap.find(state.handleExceptionStackmapID);
978     // It's sort of remotely possible that we won't have an in-band exception handling
979     // path, for some kinds of functions.
980     if (iter != recordMap.end()) {
981         for (unsigned i = iter->value.size(); i--;) {
982             StackMaps::Record& record = iter->value[i].record;
983             
984             CodeLocationLabel source = CodeLocationLabel(
985                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
986             
987             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
988         }
989     }
990     
991     for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
992         OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
993         OSRExit& exit = jitCode->osrExit[exitIndex];
994         Vector<const void*> codeAddresses;
995
996         if (exit.willArriveAtExitFromIndirectExceptionCheck()) // This jump doesn't happen directly from a patchpoint/stackmap we compile. It happens indirectly through an exception check somewhere.
997             continue;
998         
999         StackMaps::Record& record = jitCode->stackmaps.records[exit.m_stackmapRecordIndex];
1000         
1001         CodeLocationLabel source = CodeLocationLabel(
1002             bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
1003         
1004         codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
1005         
1006         if (exit.m_descriptor->m_isInvalidationPoint)
1007             jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
1008         else
1009             MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
1010         
1011         if (graph.compilation())
1012             graph.compilation()->addOSRExitSite(codeAddresses);
1013     }
1014 }
1015
1016 void compile(State& state, Safepoint::Result& safepointResult)
1017 {
1018     char* error = 0;
1019     
1020     {
1021         GraphSafepoint safepoint(state.graph, safepointResult);
1022         
1023         LLVMMCJITCompilerOptions options;
1024         llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
1025         options.OptLevel = Options::llvmBackendOptimizationLevel();
1026         options.NoFramePointerElim = true;
1027         if (Options::useLLVMSmallCodeModel())
1028             options.CodeModel = LLVMCodeModelSmall;
1029         options.EnableFastISel = enableLLVMFastISel;
1030         options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
1031             &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
1032     
1033         LLVMExecutionEngineRef engine;
1034         
1035         if (isARM64()) {
1036 #if OS(DARWIN)
1037             llvm->SetTarget(state.module, "arm64-apple-ios");
1038 #elif OS(LINUX)
1039             llvm->SetTarget(state.module, "aarch64-linux-gnu");
1040 #else
1041 #error "Unrecognized OS"
1042 #endif
1043         }
1044
1045         if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
1046             dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
1047             CRASH();
1048         }
1049         
1050         // At this point we no longer own the module.
1051         LModule module = state.module;
1052         state.module = nullptr;
1053
1054         // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
1055         // it to the module.
1056         LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
1057         LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
1058         char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
1059         llvm->SetDataLayout(module, stringRepOfTargetData);
1060         free(stringRepOfTargetData);
1061
1062         LLVMPassManagerRef functionPasses = 0;
1063         LLVMPassManagerRef modulePasses;
1064
1065         if (Options::llvmSimpleOpt()) {
1066             modulePasses = llvm->CreatePassManager();
1067             llvm->AddTargetData(targetData, modulePasses);
1068             llvm->AddAnalysisPasses(targetMachine, modulePasses);
1069             llvm->AddPromoteMemoryToRegisterPass(modulePasses);
1070             llvm->AddGlobalOptimizerPass(modulePasses);
1071             llvm->AddFunctionInliningPass(modulePasses);
1072             llvm->AddPruneEHPass(modulePasses);
1073             llvm->AddGlobalDCEPass(modulePasses);
1074             llvm->AddConstantPropagationPass(modulePasses);
1075             llvm->AddAggressiveDCEPass(modulePasses);
1076             llvm->AddInstructionCombiningPass(modulePasses);
1077             // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
1078             llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
1079             llvm->AddBasicAliasAnalysisPass(modulePasses);
1080             // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
1081             llvm->AddGVNPass(modulePasses);
1082             llvm->AddCFGSimplificationPass(modulePasses);
1083             llvm->AddDeadStoreEliminationPass(modulePasses);
1084             
1085             if (enableLLVMFastISel)
1086                 llvm->AddLowerSwitchPass(modulePasses);
1087
1088             llvm->RunPassManager(modulePasses, module);
1089         } else {
1090             LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
1091             llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
1092             llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
1093             llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
1094         
1095             functionPasses = llvm->CreateFunctionPassManagerForModule(module);
1096             modulePasses = llvm->CreatePassManager();
1097         
1098             llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
1099         
1100             llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
1101             llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
1102         
1103             llvm->PassManagerBuilderDispose(passBuilder);
1104         
1105             llvm->InitializeFunctionPassManager(functionPasses);
1106             for (LValue function = llvm->GetFirstFunction(module); function; function = llvm->GetNextFunction(function))
1107                 llvm->RunFunctionPassManager(functionPasses, function);
1108             llvm->FinalizeFunctionPassManager(functionPasses);
1109         
1110             llvm->RunPassManager(modulePasses, module);
1111         }
1112
1113         if (shouldDumpDisassembly() || verboseCompilationEnabled())
1114             state.dumpState(module, "after optimization");
1115         
1116         // FIXME: Need to add support for the case where JIT memory allocation failed.
1117         // https://bugs.webkit.org/show_bug.cgi?id=113620
1118         state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
1119         if (functionPasses)
1120             llvm->DisposePassManager(functionPasses);
1121         llvm->DisposePassManager(modulePasses);
1122         llvm->DisposeExecutionEngine(engine);
1123     }
1124
1125     if (safepointResult.didGetCancelled())
1126         return;
1127     RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
1128     
1129     if (state.allocationFailed)
1130         return;
1131     
1132     if (shouldDumpDisassembly()) {
1133         for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
1134             ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
1135             dataLog(
1136                 "Generated LLVM code for ",
1137                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1138                 " #", i, ", ", state.codeSectionNames[i], ":\n");
1139             disassemble(
1140                 MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
1141                 "    ", WTF::dataFile(), LLVMSubset);
1142         }
1143         
1144         for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
1145             DataSection* section = state.jitCode->dataSections()[i].get();
1146             dataLog(
1147                 "Generated LLVM data section for ",
1148                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1149                 " #", i, ", ", state.dataSectionNames[i], ":\n");
1150             dumpDataSection(section, "    ");
1151         }
1152     }
1153     
1154     std::unique_ptr<RegisterAtOffsetList> registerOffsets = parseUnwindInfo(
1155         state.unwindDataSection, state.unwindDataSectionSize,
1156         state.generatedFunction);
1157     if (shouldDumpDisassembly()) {
1158         dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
1159         dataLog("    ", *registerOffsets, "\n");
1160     }
1161     state.graph.m_codeBlock->setCalleeSaveRegisters(WTF::move(registerOffsets));
1162     
1163     if (state.stackmapsSection && state.stackmapsSection->size()) {
1164         if (shouldDumpDisassembly()) {
1165             dataLog(
1166                 "Generated LLVM stackmaps section for ",
1167                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
1168             dataLog("    Raw data:\n");
1169             dumpDataSection(state.stackmapsSection.get(), "    ");
1170         }
1171         
1172         RefPtr<DataView> stackmapsData = DataView::create(
1173             ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
1174         state.jitCode->stackmaps.parse(stackmapsData.get());
1175     
1176         if (shouldDumpDisassembly()) {
1177             dataLog("    Structured data:\n");
1178             state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
1179         }
1180         
1181         StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
1182         fixFunctionBasedOnStackMaps(
1183             state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
1184             recordMap);
1185         if (state.allocationFailed)
1186             return;
1187         
1188         if (shouldDumpDisassembly() || Options::asyncDisassembly()) {
1189             for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
1190                 if (state.codeSectionNames[i] != SECTION_NAME("text"))
1191                     continue;
1192                 
1193                 ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
1194                 
1195                 CString header = toCString(
1196                     "Generated LLVM code after stackmap-based fix-up for ",
1197                     CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1198                     " in ", state.graph.m_plan.mode, " #", i, ", ",
1199                     state.codeSectionNames[i], ":\n");
1200                 
1201                 if (Options::asyncDisassembly()) {
1202                     disassembleAsynchronously(
1203                         header, MacroAssemblerCodeRef(handle), handle->sizeInBytes(), "    ",
1204                         LLVMSubset);
1205                     continue;
1206                 }
1207                 
1208                 dataLog(header);
1209                 disassemble(
1210                     MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
1211                     "    ", WTF::dataFile(), LLVMSubset);
1212             }
1213         }
1214     }
1215 }
1216
1217 } } // namespace JSC::FTL
1218
1219 #endif // ENABLE(FTL_JIT) && FTL_USES_B3
1220