024aabebfe2a1f1e2494a8f37e397e465ba165af
[WebKit-https.git] / Source / JavaScriptCore / ftl / FTLCompile.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2014 Samsung Electronics
4  * Copyright (C) 2014 University of Szeged
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
26  */
27
28 #include "config.h"
29 #include "FTLCompile.h"
30
31 #if ENABLE(FTL_JIT) && !FTL_USES_B3
32
33 #include "CodeBlockWithJITType.h"
34 #include "CCallHelpers.h"
35 #include "DFGCommon.h"
36 #include "DFGGraphSafepoint.h"
37 #include "DFGOperations.h"
38 #include "DataView.h"
39 #include "Disassembler.h"
40 #include "FTLCompileBinaryOp.h"
41 #include "FTLExceptionHandlerManager.h"
42 #include "FTLExitThunkGenerator.h"
43 #include "FTLInlineCacheDescriptorInlines.h"
44 #include "FTLInlineCacheSize.h"
45 #include "FTLJITCode.h"
46 #include "FTLThunks.h"
47 #include "FTLUnwindInfo.h"
48 #include "LLVMAPI.h"
49 #include "LinkBuffer.h"
50 #include "ScratchRegisterAllocator.h"
51
52 namespace JSC { namespace FTL {
53
54 using namespace DFG;
55
56 static RegisterSet usedRegistersFor(const StackMaps::Record&);
57
58 static uint8_t* mmAllocateCodeSection(
59     void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
60 {
61     State& state = *static_cast<State*>(opaqueState);
62     
63     RELEASE_ASSERT(alignment <= jitAllocationGranule);
64     
65     RefPtr<ExecutableMemoryHandle> result =
66         state.graph.m_vm.executableAllocator.allocate(
67             state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationCanFail);
68     
69     if (!result) {
70         // Signal failure. This compilation will get tossed.
71         state.allocationFailed = true;
72         
73         // Fake an allocation, since LLVM cannot handle failures in the memory manager.
74         RefPtr<DataSection> fakeSection = adoptRef(new DataSection(size, jitAllocationGranule));
75         state.jitCode->addDataSection(fakeSection);
76         return bitwise_cast<uint8_t*>(fakeSection->base());
77     }
78     
79     // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
80     // for clients that use older LLVMs.
81     if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
82         state.unwindDataSection = result->start();
83         state.unwindDataSectionSize = result->sizeInBytes();
84     }
85     
86     state.jitCode->addHandle(result);
87     state.codeSectionNames.append(sectionName);
88     
89     return static_cast<uint8_t*>(result->start());
90 }
91
92 static uint8_t* mmAllocateDataSection(
93     void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
94     const char* sectionName, LLVMBool isReadOnly)
95 {
96     UNUSED_PARAM(sectionID);
97     UNUSED_PARAM(isReadOnly);
98
99     // Allocate the GOT in the code section to make it reachable for all code.
100     if (!strcmp(sectionName, SECTION_NAME("got")))
101         return mmAllocateCodeSection(opaqueState, size, alignment, sectionID, sectionName);
102
103     State& state = *static_cast<State*>(opaqueState);
104
105     RefPtr<DataSection> section = adoptRef(new DataSection(size, alignment));
106
107     if (!strcmp(sectionName, SECTION_NAME("llvm_stackmaps")))
108         state.stackmapsSection = section;
109     else {
110         state.jitCode->addDataSection(section);
111         state.dataSectionNames.append(sectionName);
112 #if OS(DARWIN)
113         if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
114 #elif OS(LINUX)
115         if (!strcmp(sectionName, SECTION_NAME("eh_frame"))) {
116 #else
117 #error "Unrecognized OS"
118 #endif
119             state.unwindDataSection = section->base();
120             state.unwindDataSectionSize = size;
121         }
122     }
123
124     return bitwise_cast<uint8_t*>(section->base());
125 }
126
127 static LLVMBool mmApplyPermissions(void*, char**)
128 {
129     return false;
130 }
131
132 static void mmDestroy(void*)
133 {
134 }
135
136 static void dumpDataSection(DataSection* section, const char* prefix)
137 {
138     for (unsigned j = 0; j < section->size() / sizeof(int64_t); ++j) {
139         char buf[32];
140         int64_t* wordPointer = static_cast<int64_t*>(section->base()) + j;
141         snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(wordPointer)));
142         dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(*wordPointer));
143     }
144 }
145
146 static int offsetOfStackRegion(StackMaps::RecordMap& recordMap, uint32_t stackmapID)
147 {
148     if (stackmapID == UINT_MAX)
149         return 0;
150     
151     StackMaps::RecordMap::iterator iter = recordMap.find(stackmapID);
152     RELEASE_ASSERT(iter != recordMap.end());
153     RELEASE_ASSERT(iter->value.size() == 1);
154     RELEASE_ASSERT(iter->value[0].record.locations.size() == 1);
155     Location capturedLocation =
156         Location::forStackmaps(nullptr, iter->value[0].record.locations[0]);
157     RELEASE_ASSERT(capturedLocation.kind() == Location::Register);
158     RELEASE_ASSERT(capturedLocation.gpr() == GPRInfo::callFrameRegister);
159     RELEASE_ASSERT(!(capturedLocation.addend() % sizeof(Register)));
160     return capturedLocation.addend() / sizeof(Register);
161 }
162
163 static void generateInlineIfPossibleOutOfLineIfNot(State& state, VM& vm, CodeBlock* codeBlock, CCallHelpers& code, char* startOfInlineCode, size_t sizeOfInlineCode, const char* codeDescription, const std::function<void(LinkBuffer&, CCallHelpers&, bool wasCompiledInline)>& callback)
164 {
165     std::unique_ptr<LinkBuffer> codeLinkBuffer;
166     size_t actualCodeSize = code.m_assembler.buffer().codeSize();
167
168     if (actualCodeSize <= sizeOfInlineCode) {
169         LinkBuffer codeLinkBuffer(vm, code, startOfInlineCode, sizeOfInlineCode);
170
171         // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
172         MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + actualCodeSize, sizeOfInlineCode - actualCodeSize);
173
174         callback(codeLinkBuffer, code, true);
175
176         return;
177     }
178
179     if (Options::assertICSizing() || Options::dumpFailedICSizing()) {
180         static size_t maxSize = 0;
181         if (maxSize < actualCodeSize)
182             maxSize = actualCodeSize;
183         dataLogF("ALERT: Under-estimated FTL Inline Cache Size for %s: estimated %zu, actual %zu, max %zu\n", codeDescription, sizeOfInlineCode, actualCodeSize, maxSize);
184         if (Options::assertICSizing())
185             CRASH();
186     }
187
188     // If there isn't enough space in the provided inline code area, allocate out of line
189     // executable memory to link the provided code. Place a jump at the beginning of the
190     // inline area and jump to the out of line code. Similarly return by appending a jump
191     // to the provided code that goes to the instruction after the inline code.
192     // Fill the middle with nop's.
193     MacroAssembler::Jump returnToMainline = code.jump();
194
195     // Allocate out of line executable memory and link the provided code there.
196     codeLinkBuffer = std::make_unique<LinkBuffer>(vm, code, codeBlock, JITCompilationMustSucceed);
197
198     // Plant a jmp in the inline buffer to the out of line code.
199     MacroAssembler callToOutOfLineCode;
200     MacroAssembler::Jump jumpToOutOfLine = callToOutOfLineCode.jump();
201     LinkBuffer inlineBuffer(vm, callToOutOfLineCode, startOfInlineCode, sizeOfInlineCode);
202     inlineBuffer.link(jumpToOutOfLine, codeLinkBuffer->entrypoint());
203
204     // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
205     MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + inlineBuffer.size(), sizeOfInlineCode - inlineBuffer.size());
206
207     // Link the end of the out of line code to right after the inline area.
208     codeLinkBuffer->link(returnToMainline, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(startOfInlineCode)).labelAtOffset(sizeOfInlineCode));
209
210     callback(*codeLinkBuffer.get(), code, false);
211
212     state.finalizer->outOfLineCodeInfos.append(OutOfLineCodeInfo(WTF::move(codeLinkBuffer), codeDescription));
213 }
214
215 template<typename DescriptorType>
216 void generateICFastPath(
217     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
218     StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
219 {
220     VM& vm = state.graph.m_vm;
221
222     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
223     if (iter == recordMap.end()) {
224         // It was optimized out.
225         return;
226     }
227     
228     Vector<StackMaps::RecordAndIndex>& records = iter->value;
229     
230     RELEASE_ASSERT(records.size() == ic.m_generators.size());
231     
232     for (unsigned i = records.size(); i--;) {
233         StackMaps::Record& record = records[i].record;
234         auto generator = ic.m_generators[i];
235
236         CCallHelpers fastPathJIT(&vm, codeBlock);
237         generator.generateFastPath(fastPathJIT);
238         
239         char* startOfIC =
240             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
241
242         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
243             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i],
244                 CodeLocationLabel(startOfIC + sizeOfIC));
245
246             linkBuffer.link(generator.slowPathJump(),
247                 state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
248
249             generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
250         });
251     }
252 }
253
254 static void generateCheckInICFastPath(
255     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
256     StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC)
257 {
258     VM& vm = state.graph.m_vm;
259
260     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
261     if (iter == recordMap.end()) {
262         // It was optimized out.
263         return;
264     }
265     
266     Vector<StackMaps::RecordAndIndex>& records = iter->value;
267     
268     RELEASE_ASSERT(records.size() == ic.m_generators.size());
269
270     for (unsigned i = records.size(); i--;) {
271         StackMaps::Record& record = records[i].record;
272         auto generator = ic.m_generators[i];
273
274         StructureStubInfo& stubInfo = *generator.m_stub;
275         auto call = generator.m_slowCall;
276         auto slowPathBegin = generator.m_beginLabel;
277
278         CCallHelpers fastPathJIT(&vm, codeBlock);
279         
280         auto jump = fastPathJIT.patchableJump();
281         auto done = fastPathJIT.label();
282
283         char* startOfIC =
284             bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
285
286         auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) {
287             LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer;
288
289             state.finalizer->sideCodeLinkBuffer->link(
290                 ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
291
292             CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin);
293             fastPath.link(jump, slowPathBeginLoc);
294
295             CodeLocationCall callReturnLocation = slowPath.locationOf(call);
296
297             stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
298                 callReturnLocation, fastPath.locationOf(done));
299
300             stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
301                 callReturnLocation, fastPath.locationOf(jump));
302             stubInfo.callReturnLocation = callReturnLocation;
303             stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
304                 callReturnLocation, slowPathBeginLoc);
305         };
306
307         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink);
308     }
309 }
310
311 static void generateBinaryOpICFastPath(
312     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
313     StackMaps::RecordMap& recordMap, BinaryOpDescriptor& ic)
314 {
315     VM& vm = state.graph.m_vm;
316     size_t sizeOfIC = ic.size();
317
318     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
319     if (iter == recordMap.end())
320         return; // It was optimized out.
321
322     Vector<StackMaps::RecordAndIndex>& records = iter->value;
323
324     RELEASE_ASSERT(records.size() == ic.m_slowPathStarts.size());
325
326     for (unsigned i = records.size(); i--;) {
327         StackMaps::Record& record = records[i].record;
328
329         CCallHelpers fastPathJIT(&vm, codeBlock);
330
331         GPRReg result = record.locations[0].directGPR();
332         GPRReg left = record.locations[1].directGPR();
333         GPRReg right = record.locations[2].directGPR();
334         RegisterSet usedRegisters = usedRegistersFor(record);
335
336         CCallHelpers::Jump done;
337         CCallHelpers::Jump slowPathStart;
338
339         generateBinaryOpFastPath(ic, fastPathJIT, result, left, right, usedRegisters, done, slowPathStart);
340
341         char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
342         const char* fastPathICName = ic.fastPathICName();
343         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, fastPathICName, [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
344             linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
345             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
346             
347             linkBuffer.link(slowPathStart, state.finalizer->sideCodeLinkBuffer->locationOf(ic.m_slowPathStarts[i]));
348         });
349     }
350 }
351
352 #if ENABLE(MASM_PROBE)
353
354 static void generateProbe(
355     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
356     StackMaps::RecordMap& recordMap, ProbeDescriptor& ic)
357 {
358     VM& vm = state.graph.m_vm;
359     size_t sizeOfIC = sizeOfProbe();
360
361     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
362     if (iter == recordMap.end())
363         return; // It was optimized out.
364
365     CCallHelpers fastPathJIT(&vm, codeBlock);
366     Vector<StackMaps::RecordAndIndex>& records = iter->value;
367     for (unsigned i = records.size(); i--;) {
368         StackMaps::Record& record = records[i].record;
369
370         fastPathJIT.probe(ic.probeFunction());
371         CCallHelpers::Jump done = fastPathJIT.jump();
372
373         char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
374         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "Probe", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
375             linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
376         });
377     }
378 }
379
380 #endif // ENABLE(MASM_PROBE)
381
382 static RegisterSet usedRegistersFor(const StackMaps::Record& record)
383 {
384     if (Options::assumeAllRegsInFTLICAreLive())
385         return RegisterSet::allRegisters();
386     return RegisterSet(record.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
387 }
388
389 template<typename CallType>
390 void adjustCallICsForStackmaps(Vector<CallType>& calls, StackMaps::RecordMap& recordMap, ExceptionHandlerManager& exceptionHandlerManager)
391 {
392     // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
393     // generated code. That implies first pruning the ones that LLVM didn't generate.
394
395     Vector<CallType> oldCalls;
396     oldCalls.swap(calls);
397     
398     for (unsigned i = 0; i < oldCalls.size(); ++i) {
399         CallType& call = oldCalls[i];
400         
401         StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID());
402         if (iter == recordMap.end())
403             continue;
404         
405         for (unsigned j = 0; j < iter->value.size(); ++j) {
406             CallType copy = call;
407             copy.m_instructionOffset = iter->value[j].record.instructionOffset;
408             copy.setCallSiteIndex(exceptionHandlerManager.procureCallSiteIndex(iter->value[j].index, copy));
409             copy.setCorrespondingGenericUnwindOSRExit(exceptionHandlerManager.getCallOSRExit(iter->value[j].index, copy));
410
411             calls.append(copy);
412         }
413     }
414
415     std::sort(calls.begin(), calls.end());
416 }
417
418 static void fixFunctionBasedOnStackMaps(
419     State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
420     StackMaps::RecordMap& recordMap)
421 {
422     Graph& graph = state.graph;
423     VM& vm = graph.m_vm;
424     StackMaps& stackmaps = jitCode->stackmaps;
425
426     ExceptionHandlerManager exceptionHandlerManager(state);
427     
428     int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
429     int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
430     int jsCallThatMightThrowSpillOffset = offsetOfStackRegion(recordMap, state.exceptionHandlingSpillSlotStackmapID);
431     
432     for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
433         InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
434         
435         if (inlineCallFrame->argumentCountRegister.isValid())
436             inlineCallFrame->argumentCountRegister += localsOffset;
437         
438         for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
439             inlineCallFrame->arguments[argument] =
440                 inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
441         }
442         
443         if (inlineCallFrame->isClosureCall) {
444             inlineCallFrame->calleeRecovery =
445                 inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
446         }
447
448         if (graph.hasDebuggerEnabled())
449             codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
450     }
451     
452     MacroAssembler::Label stackOverflowException;
453
454     {
455         CCallHelpers checkJIT(&vm, codeBlock);
456         
457         // At this point it's perfectly fair to just blow away all state and restore the
458         // JS JIT view of the universe.
459         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
460         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
461         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
462         MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
463         checkJIT.jumpToExceptionHandler();
464
465         stackOverflowException = checkJIT.label();
466         checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
467         checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
468         checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
469         MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
470         checkJIT.jumpToExceptionHandler();
471
472         auto linkBuffer = std::make_unique<LinkBuffer>(
473             vm, checkJIT, codeBlock, JITCompilationCanFail);
474         if (linkBuffer->didFailToAllocate()) {
475             state.allocationFailed = true;
476             return;
477         }
478         linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
479         linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
480
481         state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
482     }
483
484     RELEASE_ASSERT(state.jitCode->osrExit.size() == 0);
485     HashMap<OSRExitDescriptor*, OSRExitDescriptorImpl*> genericUnwindOSRExitDescriptors;
486     for (unsigned i = 0; i < state.jitCode->osrExitDescriptors.size(); i++) {
487         OSRExitDescriptor* exitDescriptor = &state.jitCode->osrExitDescriptors[i];
488         auto iter = recordMap.find(exitDescriptor->m_stackmapID);
489         if (iter == recordMap.end()) {
490             // It was optimized out.
491             continue;
492         }
493
494         OSRExitDescriptorImpl& exitDescriptorImpl = state.osrExitDescriptorImpls[i];
495         if (exceptionTypeWillArriveAtOSRExitFromGenericUnwind(exitDescriptorImpl.m_exceptionType))
496             genericUnwindOSRExitDescriptors.add(exitDescriptor, &exitDescriptorImpl);
497
498         for (unsigned j = exitDescriptor->m_values.size(); j--;)
499             exitDescriptor->m_values[j] = exitDescriptor->m_values[j].withLocalsOffset(localsOffset);
500         for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
501             materialization->accountForLocalsOffset(localsOffset);
502
503         for (unsigned j = 0; j < iter->value.size(); j++) {
504             {
505                 uint32_t stackmapRecordIndex = iter->value[j].index;
506                 OSRExit exit(exitDescriptor, exitDescriptorImpl, stackmapRecordIndex);
507                 state.jitCode->osrExit.append(exit);
508                 state.finalizer->osrExit.append(OSRExitCompilationInfo());
509             }
510
511             OSRExit& exit = state.jitCode->osrExit.last();
512             if (exit.willArriveAtExitFromIndirectExceptionCheck()) {
513                 StackMaps::Record& record = iter->value[j].record;
514                 RELEASE_ASSERT(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader.isSet());
515                 CallSiteIndex callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader);
516                 exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
517
518                 OSRExit* callOperationExit = nullptr;
519                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::BinaryOpGenerator) {
520                     exceptionHandlerManager.addNewCallOperationExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
521                     callOperationExit = &exit;
522                 } else
523                     exceptionHandlerManager.addNewExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
524                 
525                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById || exitDescriptorImpl.m_exceptionType == ExceptionType::PutById) {
526                     // We create two different OSRExits for GetById and PutById.
527                     // One exit that will be arrived at from the genericUnwind exception handler path,
528                     // and the other that will be arrived at from the callOperation exception handler path.
529                     // This code here generates the second callOperation variant.
530                     uint32_t stackmapRecordIndex = iter->value[j].index;
531                     OSRExit exit(exitDescriptor, exitDescriptorImpl, stackmapRecordIndex);
532                     if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById)
533                         exit.m_exceptionType = ExceptionType::GetByIdCallOperation;
534                     else
535                         exit.m_exceptionType = ExceptionType::PutByIdCallOperation;
536                     CallSiteIndex callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(exitDescriptorImpl.m_semanticCodeOriginForCallFrameHeader);
537                     exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
538
539                     state.jitCode->osrExit.append(exit);
540                     state.finalizer->osrExit.append(OSRExitCompilationInfo());
541
542                     exceptionHandlerManager.addNewCallOperationExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
543                     callOperationExit = &state.jitCode->osrExit.last();
544                 }
545
546                 // Subs and GetByIds have an interesting register preservation story,
547                 // see comment below at GetById to read about it.
548                 //
549                 // We set the registers needing spillage here because they need to be set
550                 // before we generate OSR exits so the exit knows to do the proper recovery.
551                 if (exitDescriptorImpl.m_exceptionType == ExceptionType::JSCall) {
552                     // Call patchpoints might have values we want to do value recovery
553                     // on inside volatile registers. We need to collect the volatile
554                     // registers we want to do value recovery on here because they must
555                     // be preserved to the stack before the call, that way the OSR exit
556                     // exception handler can recover them into the proper registers.
557                     exit.gatherRegistersToSpillForCallIfException(stackmaps, record);
558                 } else if (exitDescriptorImpl.m_exceptionType == ExceptionType::GetById) {
559                     GPRReg result = record.locations[0].directGPR();
560                     GPRReg base = record.locations[1].directGPR();
561                     if (base == result)
562                         callOperationExit->registersToPreserveForCallThatMightThrow.set(base);
563                 } else if (exitDescriptorImpl.m_exceptionType == ExceptionType::BinaryOpGenerator) {
564                     GPRReg result = record.locations[0].directGPR();
565                     GPRReg left = record.locations[1].directGPR();
566                     GPRReg right = record.locations[2].directGPR();
567                     if (result == left || result == right)
568                         callOperationExit->registersToPreserveForCallThatMightThrow.set(result);
569                 }
570             }
571         }
572     }
573     ExitThunkGenerator exitThunkGenerator(state);
574     exitThunkGenerator.emitThunks(jsCallThatMightThrowSpillOffset);
575     if (exitThunkGenerator.didThings()) {
576         RELEASE_ASSERT(state.finalizer->osrExit.size());
577         
578         auto linkBuffer = std::make_unique<LinkBuffer>(
579             vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
580         if (linkBuffer->didFailToAllocate()) {
581             state.allocationFailed = true;
582             return;
583         }
584         
585         RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
586         
587         codeBlock->clearExceptionHandlers();
588
589         for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
590             OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
591             OSRExit& exit = state.jitCode->osrExit[i];
592             
593             if (verboseCompilationEnabled())
594                 dataLog("Handling OSR stackmap #", exit.m_descriptor->m_stackmapID, " for ", exit.m_codeOrigin, "\n");
595
596             info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
597             exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
598
599             if (exit.willArriveAtOSRExitFromGenericUnwind()) {
600                 HandlerInfo newHandler = genericUnwindOSRExitDescriptors.get(exit.m_descriptor)->m_baselineExceptionHandler;
601                 newHandler.start = exit.m_exceptionHandlerCallSiteIndex.bits();
602                 newHandler.end = exit.m_exceptionHandlerCallSiteIndex.bits() + 1;
603                 newHandler.nativeCode = info.m_thunkAddress;
604                 codeBlock->appendExceptionHandler(newHandler);
605             }
606
607             if (verboseCompilationEnabled()) {
608                 DumpContext context;
609                 dataLog("    Exit values: ", inContext(exit.m_descriptor->m_values, &context), "\n");
610                 if (!exit.m_descriptor->m_materializations.isEmpty()) {
611                     dataLog("    Materializations: \n");
612                     for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
613                         dataLog("        Materialize(", pointerDump(materialization), ")\n");
614                 }
615             }
616         }
617         
618         state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
619     }
620
621     if (!state.getByIds.isEmpty()
622         || !state.putByIds.isEmpty()
623         || !state.checkIns.isEmpty()
624         || !state.binaryOps.isEmpty()
625         || !state.lazySlowPaths.isEmpty()) {
626         CCallHelpers slowPathJIT(&vm, codeBlock);
627         
628         CCallHelpers::JumpList exceptionTarget;
629
630         Vector<std::pair<CCallHelpers::JumpList, CodeLocationLabel>> exceptionJumpsToLink;
631         auto addNewExceptionJumpIfNecessary = [&] (uint32_t recordIndex) {
632             CodeLocationLabel exceptionTarget = exceptionHandlerManager.callOperationExceptionTarget(recordIndex);
633             if (!exceptionTarget)
634                 return false;
635             exceptionJumpsToLink.append(
636                 std::make_pair(CCallHelpers::JumpList(), exceptionTarget));
637             return true;
638         };
639         
640         for (unsigned i = state.getByIds.size(); i--;) {
641             GetByIdDescriptor& getById = state.getByIds[i];
642             
643             if (verboseCompilationEnabled())
644                 dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
645             
646             auto iter = recordMap.find(getById.stackmapID());
647             if (iter == recordMap.end()) {
648                 // It was optimized out.
649                 continue;
650             }
651             
652             CodeOrigin codeOrigin = getById.codeOrigin();
653             for (unsigned i = 0; i < iter->value.size(); ++i) {
654                 StackMaps::Record& record = iter->value[i].record;
655             
656                 RegisterSet usedRegisters = usedRegistersFor(record);
657                 
658                 GPRReg result = record.locations[0].directGPR();
659                 GPRReg base = record.locations[1].directGPR();
660                 
661                 JITGetByIdGenerator gen(
662                     codeBlock, codeOrigin, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin), usedRegisters, JSValueRegs(base),
663                     JSValueRegs(result));
664                 
665                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
666                 MacroAssembler::Label begin = slowPathJIT.label();
667                 if (result == base) {
668                     // This situation has a really interesting story. We may have a GetById inside
669                     // a try block where LLVM assigns the result and the base to the same register.
670                     // The inline cache may miss and we may end up at this slow path callOperation. 
671                     // Then, suppose the base and the result are both the same register, so the return
672                     // value of the C call gets stored into the original base register. If the operationGetByIdOptimize
673                     // throws, it will return "undefined" and we will be stuck with "undefined" in the base
674                     // register that we would like to do value recovery on. We combat this situation from ever
675                     // taking place by ensuring we spill the original base value and then recover it from
676                     // the spill slot as the first step in OSR exit.
677                     if (OSRExit* exit = exceptionHandlerManager.callOperationOSRExit(iter->value[i].index))
678                         exit->spillRegistersToSpillSlot(slowPathJIT, jsCallThatMightThrowSpillOffset);
679                 }
680                 MacroAssembler::Call call = callOperation(
681                     state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
682                     operationGetByIdOptimize, result, CCallHelpers::TrustedImmPtr(gen.stubInfo()),
683                     base, CCallHelpers::TrustedImmPtr(getById.uid())).call();
684
685                 gen.reportSlowPathCall(begin, call);
686
687                 getById.m_slowPathDone.append(slowPathJIT.jump());
688                 getById.m_generators.append(gen);
689             }
690         }
691         
692         for (unsigned i = state.putByIds.size(); i--;) {
693             PutByIdDescriptor& putById = state.putByIds[i];
694             
695             if (verboseCompilationEnabled())
696                 dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
697             
698             auto iter = recordMap.find(putById.stackmapID());
699             if (iter == recordMap.end()) {
700                 // It was optimized out.
701                 continue;
702             }
703             
704             CodeOrigin codeOrigin = putById.codeOrigin();
705             for (unsigned i = 0; i < iter->value.size(); ++i) {
706                 StackMaps::Record& record = iter->value[i].record;
707                 
708                 RegisterSet usedRegisters = usedRegistersFor(record);
709                 
710                 GPRReg base = record.locations[0].directGPR();
711                 GPRReg value = record.locations[1].directGPR();
712                 
713                 JITPutByIdGenerator gen(
714                     codeBlock, codeOrigin, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin), usedRegisters, JSValueRegs(base),
715                     JSValueRegs(value), GPRInfo::patchpointScratchRegister, putById.ecmaMode(), putById.putKind());
716                 
717                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
718
719                 MacroAssembler::Label begin = slowPathJIT.label();
720
721                 MacroAssembler::Call call = callOperation(
722                     state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
723                     gen.slowPathFunction(), InvalidGPRReg,
724                     CCallHelpers::TrustedImmPtr(gen.stubInfo()), value, base,
725                     CCallHelpers::TrustedImmPtr(putById.uid())).call();
726                 
727                 gen.reportSlowPathCall(begin, call);
728                 
729                 putById.m_slowPathDone.append(slowPathJIT.jump());
730                 putById.m_generators.append(gen);
731             }
732         }
733
734         for (unsigned i = state.checkIns.size(); i--;) {
735             CheckInDescriptor& checkIn = state.checkIns[i];
736             
737             if (verboseCompilationEnabled())
738                 dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
739             
740             auto iter = recordMap.find(checkIn.stackmapID());
741             if (iter == recordMap.end()) {
742                 // It was optimized out.
743                 continue;
744             }
745             
746             CodeOrigin codeOrigin = checkIn.codeOrigin();
747             for (unsigned i = 0; i < iter->value.size(); ++i) {
748                 StackMaps::Record& record = iter->value[i].record;
749                 RegisterSet usedRegisters = usedRegistersFor(record);
750                 GPRReg result = record.locations[0].directGPR();
751                 GPRReg obj = record.locations[1].directGPR();
752                 StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
753                 stubInfo->codeOrigin = codeOrigin;
754                 stubInfo->callSiteIndex = state.jitCode->common.addUniqueCallSiteIndex(codeOrigin);
755                 stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
756                 stubInfo->patch.valueGPR = static_cast<int8_t>(result);
757                 stubInfo->patch.usedRegisters = usedRegisters;
758
759                 MacroAssembler::Label begin = slowPathJIT.label();
760
761                 MacroAssembler::Call slowCall = callOperation(
762                     state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
763                     operationInOptimize, result, CCallHelpers::TrustedImmPtr(stubInfo), obj,
764                     CCallHelpers::TrustedImmPtr(checkIn.uid())).call();
765
766                 checkIn.m_slowPathDone.append(slowPathJIT.jump());
767                 
768                 checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
769             }
770         }
771
772         for (size_t i = state.binaryOps.size(); i--;) {
773             BinaryOpDescriptor& binaryOp = state.binaryOps[i];
774             
775             if (verboseCompilationEnabled())
776                 dataLog("Handling ", binaryOp.name(), " stackmap #", binaryOp.stackmapID(), "\n");
777             
778             auto iter = recordMap.find(binaryOp.stackmapID());
779             if (iter == recordMap.end())
780                 continue; // It was optimized out.
781             
782             CodeOrigin codeOrigin = binaryOp.codeOrigin();
783             for (unsigned i = 0; i < iter->value.size(); ++i) {
784                 StackMaps::Record& record = iter->value[i].record;
785                 RegisterSet usedRegisters = usedRegistersFor(record);
786
787                 GPRReg result = record.locations[0].directGPR();
788                 GPRReg left = record.locations[1].directGPR();
789                 GPRReg right = record.locations[2].directGPR();
790
791                 binaryOp.m_slowPathStarts.append(slowPathJIT.label());
792                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
793                 if (result == left || result == right) {
794                     // This situation has a really interesting register preservation story.
795                     // See comment above for GetByIds.
796                     if (OSRExit* exit = exceptionHandlerManager.callOperationOSRExit(iter->value[i].index))
797                         exit->spillRegistersToSpillSlot(slowPathJIT, jsCallThatMightThrowSpillOffset);
798                 }
799
800                 callOperation(state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
801                     binaryOp.slowPathFunction(), result, left, right).call();
802
803                 binaryOp.m_slowPathDone.append(slowPathJIT.jump());
804             }
805         }
806
807         for (unsigned i = state.lazySlowPaths.size(); i--;) {
808             LazySlowPathDescriptor& descriptor = state.lazySlowPaths[i];
809
810             if (verboseCompilationEnabled())
811                 dataLog("Handling lazySlowPath stackmap #", descriptor.stackmapID(), "\n");
812
813             auto iter = recordMap.find(descriptor.stackmapID());
814             if (iter == recordMap.end()) {
815                 // It was optimized out.
816                 continue;
817             }
818             CodeOrigin codeOrigin = descriptor.codeOrigin();
819             for (unsigned i = 0; i < iter->value.size(); ++i) {
820                 StackMaps::Record& record = iter->value[i].record;
821                 RegisterSet usedRegisters = usedRegistersFor(record);
822                 char* startOfIC =
823                     bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
824                 CodeLocationLabel patchpoint((MacroAssemblerCodePtr(startOfIC)));
825                 CodeLocationLabel exceptionTarget = exceptionHandlerManager.lazySlowPathExceptionTarget(iter->value[i].index);
826                 if (!exceptionTarget)
827                     exceptionTarget = state.finalizer->handleExceptionsLinkBuffer->entrypoint();
828
829                 ScratchRegisterAllocator scratchAllocator(usedRegisters);
830                 GPRReg newZero = InvalidGPRReg;
831                 Vector<Location> locations;
832                 for (auto stackmapLocation : record.locations) {
833                     FTL::Location location = Location::forStackmaps(&stackmaps, stackmapLocation);
834                     if (isARM64()) {
835                         // If LLVM proves that something is zero, it may pass us the zero register (aka, the stack pointer). Our assembler
836                         // isn't prepared to handle this well. We need to move it into a different register if such a case arises.
837                         if (location.isGPR() && location.gpr() == MacroAssembler::stackPointerRegister) {
838                             if (newZero == InvalidGPRReg) {
839                                 newZero = scratchAllocator.allocateScratchGPR();
840                                 usedRegisters.set(newZero);
841                             }
842                             location = FTL::Location::forRegister(DWARFRegister(static_cast<uint16_t>(newZero)), 0); // DWARF GPRs for arm64 are sensibly numbered.
843                         }
844                     }
845                     locations.append(location);
846                 }
847
848                 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>(
849                     patchpoint, exceptionTarget, usedRegisters, exceptionHandlerManager.procureCallSiteIndex(iter->value[i].index, codeOrigin),
850                     descriptor.m_linker->run(locations), newZero, scratchAllocator);
851
852                 CCallHelpers::Label begin = slowPathJIT.label();
853
854                 slowPathJIT.pushToSaveImmediateWithoutTouchingRegisters(
855                     CCallHelpers::TrustedImm32(state.jitCode->lazySlowPaths.size()));
856                 CCallHelpers::Jump generatorJump = slowPathJIT.jump();
857                 
858                 descriptor.m_generators.append(std::make_tuple(lazySlowPath.get(), begin));
859
860                 state.jitCode->lazySlowPaths.append(WTF::move(lazySlowPath));
861                 state.finalizer->lazySlowPathGeneratorJumps.append(generatorJump);
862             }
863         }
864         
865         exceptionTarget.link(&slowPathJIT);
866         MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
867         
868         state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
869         if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
870             state.allocationFailed = true;
871             return;
872         }
873         state.finalizer->sideCodeLinkBuffer->link(
874             exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
875         
876         for (unsigned i = state.getByIds.size(); i--;) {
877             generateICFastPath(
878                 state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
879                 sizeOfGetById());
880         }
881         for (unsigned i = state.putByIds.size(); i--;) {
882             generateICFastPath(
883                 state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
884                 sizeOfPutById());
885         }
886         for (unsigned i = state.checkIns.size(); i--;) {
887             generateCheckInICFastPath(
888                 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
889                 sizeOfIn()); 
890         }
891         for (unsigned i = state.binaryOps.size(); i--;) {
892             BinaryOpDescriptor& binaryOp = state.binaryOps[i];
893             generateBinaryOpICFastPath(state, codeBlock, generatedFunction, recordMap, binaryOp);
894         }
895         for (unsigned i = state.lazySlowPaths.size(); i--;) {
896             LazySlowPathDescriptor& lazySlowPath = state.lazySlowPaths[i];
897             for (auto& tuple : lazySlowPath.m_generators) {
898                 MacroAssembler::replaceWithJump(
899                     std::get<0>(tuple)->patchpoint(),
900                     state.finalizer->sideCodeLinkBuffer->locationOf(std::get<1>(tuple)));
901             }
902         }
903 #if ENABLE(MASM_PROBE)
904         for (unsigned i = state.probes.size(); i--;) {
905             ProbeDescriptor& probe = state.probes[i];
906             generateProbe(state, codeBlock, generatedFunction, recordMap, probe);
907         }
908 #endif
909         for (auto& pair : exceptionJumpsToLink)
910             state.finalizer->sideCodeLinkBuffer->link(pair.first, pair.second);
911     }
912     
913     adjustCallICsForStackmaps(state.jsCalls, recordMap, exceptionHandlerManager);
914     
915     for (unsigned i = state.jsCalls.size(); i--;) {
916         JSCall& call = state.jsCalls[i];
917
918         CCallHelpers fastPathJIT(&vm, codeBlock);
919         call.emit(fastPathJIT, state, jsCallThatMightThrowSpillOffset);
920
921         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
922
923         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
924             call.link(vm, linkBuffer);
925         });
926     }
927     
928     adjustCallICsForStackmaps(state.jsCallVarargses, recordMap, exceptionHandlerManager);
929     
930     for (unsigned i = state.jsCallVarargses.size(); i--;) {
931         JSCallVarargs& call = state.jsCallVarargses[i];
932         
933         CCallHelpers fastPathJIT(&vm, codeBlock);
934         call.emit(fastPathJIT, state, varargsSpillSlotsOffset, jsCallThatMightThrowSpillOffset);
935
936         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
937         size_t sizeOfIC = sizeOfICFor(call.node());
938
939         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
940             call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
941         });
942     }
943
944     adjustCallICsForStackmaps(state.jsTailCalls, recordMap, exceptionHandlerManager);
945
946     for (unsigned i = state.jsTailCalls.size(); i--;) {
947         JSTailCall& call = state.jsTailCalls[i];
948
949         CCallHelpers fastPathJIT(&vm, codeBlock);
950         call.emit(*state.jitCode.get(), fastPathJIT);
951
952         char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
953         size_t sizeOfIC = call.estimatedSize();
954
955         generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
956             call.link(vm, linkBuffer);
957         });
958     }
959     
960     auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
961     // It's sort of remotely possible that we won't have an in-band exception handling
962     // path, for some kinds of functions.
963     if (iter != recordMap.end()) {
964         for (unsigned i = iter->value.size(); i--;) {
965             StackMaps::Record& record = iter->value[i].record;
966             
967             CodeLocationLabel source = CodeLocationLabel(
968                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
969
970             RELEASE_ASSERT(stackOverflowException.isSet());
971
972             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
973         }
974     }
975     
976     iter = recordMap.find(state.handleExceptionStackmapID);
977     // It's sort of remotely possible that we won't have an in-band exception handling
978     // path, for some kinds of functions.
979     if (iter != recordMap.end()) {
980         for (unsigned i = iter->value.size(); i--;) {
981             StackMaps::Record& record = iter->value[i].record;
982             
983             CodeLocationLabel source = CodeLocationLabel(
984                 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
985             
986             MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
987         }
988     }
989     
990     for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
991         OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
992         OSRExit& exit = jitCode->osrExit[exitIndex];
993         Vector<const void*> codeAddresses;
994
995         if (exit.willArriveAtExitFromIndirectExceptionCheck()) // This jump doesn't happen directly from a patchpoint/stackmap we compile. It happens indirectly through an exception check somewhere.
996             continue;
997         
998         StackMaps::Record& record = jitCode->stackmaps.records[exit.m_stackmapRecordIndex];
999         
1000         CodeLocationLabel source = CodeLocationLabel(
1001             bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
1002         
1003         codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
1004         
1005         if (exit.m_descriptor->m_isInvalidationPoint)
1006             jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
1007         else
1008             MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
1009         
1010         if (graph.compilation())
1011             graph.compilation()->addOSRExitSite(codeAddresses);
1012     }
1013 }
1014
1015 void compile(State& state, Safepoint::Result& safepointResult)
1016 {
1017     char* error = 0;
1018     
1019     {
1020         GraphSafepoint safepoint(state.graph, safepointResult);
1021         
1022         LLVMMCJITCompilerOptions options;
1023         llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
1024         options.OptLevel = Options::llvmBackendOptimizationLevel();
1025         options.NoFramePointerElim = true;
1026         if (Options::useLLVMSmallCodeModel())
1027             options.CodeModel = LLVMCodeModelSmall;
1028         options.EnableFastISel = enableLLVMFastISel;
1029         options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
1030             &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
1031     
1032         LLVMExecutionEngineRef engine;
1033         
1034         if (isARM64()) {
1035 #if OS(DARWIN)
1036             llvm->SetTarget(state.module, "arm64-apple-ios");
1037 #elif OS(LINUX)
1038             llvm->SetTarget(state.module, "aarch64-linux-gnu");
1039 #else
1040 #error "Unrecognized OS"
1041 #endif
1042         }
1043
1044         if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
1045             dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
1046             CRASH();
1047         }
1048         
1049         // At this point we no longer own the module.
1050         LModule module = state.module;
1051         state.module = nullptr;
1052
1053         // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
1054         // it to the module.
1055         LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
1056         LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
1057         char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
1058         llvm->SetDataLayout(module, stringRepOfTargetData);
1059         free(stringRepOfTargetData);
1060
1061         LLVMPassManagerRef functionPasses = 0;
1062         LLVMPassManagerRef modulePasses;
1063
1064         if (Options::llvmSimpleOpt()) {
1065             modulePasses = llvm->CreatePassManager();
1066             llvm->AddTargetData(targetData, modulePasses);
1067             llvm->AddAnalysisPasses(targetMachine, modulePasses);
1068             llvm->AddPromoteMemoryToRegisterPass(modulePasses);
1069             llvm->AddGlobalOptimizerPass(modulePasses);
1070             llvm->AddFunctionInliningPass(modulePasses);
1071             llvm->AddPruneEHPass(modulePasses);
1072             llvm->AddGlobalDCEPass(modulePasses);
1073             llvm->AddConstantPropagationPass(modulePasses);
1074             llvm->AddAggressiveDCEPass(modulePasses);
1075             llvm->AddInstructionCombiningPass(modulePasses);
1076             // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
1077             llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
1078             llvm->AddBasicAliasAnalysisPass(modulePasses);
1079             // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
1080             llvm->AddGVNPass(modulePasses);
1081             llvm->AddCFGSimplificationPass(modulePasses);
1082             llvm->AddDeadStoreEliminationPass(modulePasses);
1083             
1084             if (enableLLVMFastISel)
1085                 llvm->AddLowerSwitchPass(modulePasses);
1086
1087             llvm->RunPassManager(modulePasses, module);
1088         } else {
1089             LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
1090             llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
1091             llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
1092             llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
1093         
1094             functionPasses = llvm->CreateFunctionPassManagerForModule(module);
1095             modulePasses = llvm->CreatePassManager();
1096         
1097             llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
1098         
1099             llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
1100             llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
1101         
1102             llvm->PassManagerBuilderDispose(passBuilder);
1103         
1104             llvm->InitializeFunctionPassManager(functionPasses);
1105             for (LValue function = llvm->GetFirstFunction(module); function; function = llvm->GetNextFunction(function))
1106                 llvm->RunFunctionPassManager(functionPasses, function);
1107             llvm->FinalizeFunctionPassManager(functionPasses);
1108         
1109             llvm->RunPassManager(modulePasses, module);
1110         }
1111
1112         if (shouldDumpDisassembly() || verboseCompilationEnabled())
1113             state.dumpState(module, "after optimization");
1114         
1115         // FIXME: Need to add support for the case where JIT memory allocation failed.
1116         // https://bugs.webkit.org/show_bug.cgi?id=113620
1117         state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
1118         if (functionPasses)
1119             llvm->DisposePassManager(functionPasses);
1120         llvm->DisposePassManager(modulePasses);
1121         llvm->DisposeExecutionEngine(engine);
1122     }
1123
1124     if (safepointResult.didGetCancelled())
1125         return;
1126     RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
1127     
1128     if (state.allocationFailed)
1129         return;
1130     
1131     if (shouldDumpDisassembly()) {
1132         for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
1133             ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
1134             dataLog(
1135                 "Generated LLVM code for ",
1136                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1137                 " #", i, ", ", state.codeSectionNames[i], ":\n");
1138             disassemble(
1139                 MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
1140                 "    ", WTF::dataFile(), LLVMSubset);
1141         }
1142         
1143         for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
1144             DataSection* section = state.jitCode->dataSections()[i].get();
1145             dataLog(
1146                 "Generated LLVM data section for ",
1147                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1148                 " #", i, ", ", state.dataSectionNames[i], ":\n");
1149             dumpDataSection(section, "    ");
1150         }
1151     }
1152     
1153     std::unique_ptr<RegisterAtOffsetList> registerOffsets = parseUnwindInfo(
1154         state.unwindDataSection, state.unwindDataSectionSize,
1155         state.generatedFunction);
1156     if (shouldDumpDisassembly()) {
1157         dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
1158         dataLog("    ", *registerOffsets, "\n");
1159     }
1160     state.graph.m_codeBlock->setCalleeSaveRegisters(WTF::move(registerOffsets));
1161     
1162     if (state.stackmapsSection && state.stackmapsSection->size()) {
1163         if (shouldDumpDisassembly()) {
1164             dataLog(
1165                 "Generated LLVM stackmaps section for ",
1166                 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
1167             dataLog("    Raw data:\n");
1168             dumpDataSection(state.stackmapsSection.get(), "    ");
1169         }
1170         
1171         RefPtr<DataView> stackmapsData = DataView::create(
1172             ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
1173         state.jitCode->stackmaps.parse(stackmapsData.get());
1174     
1175         if (shouldDumpDisassembly()) {
1176             dataLog("    Structured data:\n");
1177             state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
1178         }
1179         
1180         StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
1181         fixFunctionBasedOnStackMaps(
1182             state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
1183             recordMap);
1184         if (state.allocationFailed)
1185             return;
1186         
1187         if (shouldDumpDisassembly() || Options::asyncDisassembly()) {
1188             for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
1189                 if (state.codeSectionNames[i] != SECTION_NAME("text"))
1190                     continue;
1191                 
1192                 ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
1193                 
1194                 CString header = toCString(
1195                     "Generated LLVM code after stackmap-based fix-up for ",
1196                     CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
1197                     " in ", state.graph.m_plan.mode, " #", i, ", ",
1198                     state.codeSectionNames[i], ":\n");
1199                 
1200                 if (Options::asyncDisassembly()) {
1201                     disassembleAsynchronously(
1202                         header, MacroAssemblerCodeRef(handle), handle->sizeInBytes(), "    ",
1203                         LLVMSubset);
1204                     continue;
1205                 }
1206                 
1207                 dataLog(header);
1208                 disassemble(
1209                     MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
1210                     "    ", WTF::dataFile(), LLVMSubset);
1211             }
1212         }
1213     }
1214 }
1215
1216 } } // namespace JSC::FTL
1217
1218 #endif // ENABLE(FTL_JIT) && FTL_USES_B3
1219