Replace WTF::move with WTFMove
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGFailedFinalizer.h"
33 #include "DFGInlineCacheWrapperInlines.h"
34 #include "DFGJITCode.h"
35 #include "DFGJITFinalizer.h"
36 #include "DFGOSRExitCompiler.h"
37 #include "DFGOperations.h"
38 #include "DFGRegisterBank.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DFGSpeculativeJIT.h"
41 #include "DFGThunks.h"
42 #include "JSCJSValueInlines.h"
43 #include "LinkBuffer.h"
44 #include "MaxFrameExtentForSlowPathCall.h"
45 #include "JSCInlines.h"
46 #include "VM.h"
47
48 namespace JSC { namespace DFG {
49
50 JITCompiler::JITCompiler(Graph& dfg)
51     : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
52     , m_graph(dfg)
53     , m_jitCode(adoptRef(new JITCode()))
54     , m_blockHeads(dfg.numBlocks())
55 {
56     if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
57         m_disassembler = std::make_unique<Disassembler>(dfg);
58 }
59
60 JITCompiler::~JITCompiler()
61 {
62 }
63
64 void JITCompiler::linkOSRExits()
65 {
66     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
67     if (m_graph.compilation()) {
68         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
69             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
70             Vector<Label> labels;
71             if (!info.m_failureJumps.empty()) {
72                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
73                     labels.append(info.m_failureJumps.jumps()[j].label());
74             } else
75                 labels.append(info.m_replacementSource);
76             m_exitSiteLabels.append(labels);
77         }
78     }
79     
80     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
81         OSRExit& exit = m_jitCode->osrExit[i];
82         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
83         JumpList& failureJumps = info.m_failureJumps;
84         if (!failureJumps.empty())
85             failureJumps.link(this);
86         else
87             info.m_replacementDestination = label();
88
89         jitAssertHasValidCallFrame();
90         store32(TrustedImm32(i), &vm()->osrExitIndex);
91         exit.setPatchableCodeOffset(patchableJump());
92     }
93 }
94
95 void JITCompiler::compileEntry()
96 {
97     // This code currently matches the old JIT. In the function header we need to
98     // save return address and call frame via the prologue and perform a fast stack check.
99     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
100     // We'll need to convert the remaining cti_ style calls (specifically the stack
101     // check) which will be dependent on stack layout. (We'd need to account for this in
102     // both normal return code and when jumping to an exception handler).
103     emitFunctionPrologue();
104     emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
105 }
106
107 void JITCompiler::compileSetupRegistersForEntry()
108 {
109     emitSaveCalleeSaves();
110     emitMaterializeTagCheckRegisters();    
111 }
112
113 void JITCompiler::compileBody()
114 {
115     // We generate the speculative code path, followed by OSR exit code to return
116     // to the old JIT code if speculations fail.
117
118     bool compiledSpeculative = m_speculative->compile();
119     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
120 }
121
122 void JITCompiler::compileExceptionHandlers()
123 {
124     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
125         m_exceptionChecksWithCallFrameRollback.link(this);
126
127         copyCalleeSavesToVMCalleeSavesBuffer();
128
129         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
130         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
131         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
132         addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
133
134 #if CPU(X86)
135         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
136         poke(GPRInfo::argumentGPR0);
137         poke(GPRInfo::argumentGPR1, 1);
138 #endif
139         m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
140
141         jumpToExceptionHandler();
142     }
143
144     if (!m_exceptionChecks.empty()) {
145         m_exceptionChecks.link(this);
146
147         copyCalleeSavesToVMCalleeSavesBuffer();
148
149         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
150         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
151         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
152
153 #if CPU(X86)
154         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
155         poke(GPRInfo::argumentGPR0);
156         poke(GPRInfo::argumentGPR1, 1);
157 #endif
158         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
159
160         jumpToExceptionHandler();
161     }
162 }
163
164 void JITCompiler::link(LinkBuffer& linkBuffer)
165 {
166     // Link the code, populate data in CodeBlock data structures.
167     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
168     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
169
170     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
171         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
172     
173 #if USE(JSVALUE32_64)
174     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
175 #endif
176     
177     m_graph.registerFrozenValues();
178
179     BitVector usedJumpTables;
180     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
181         SwitchData& data = **iter;
182         if (!data.didUseJumpTable)
183             continue;
184         
185         if (data.kind == SwitchString)
186             continue;
187         
188         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
189         
190         usedJumpTables.set(data.switchTableIndex);
191         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
192         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
193         table.ctiOffsets.grow(table.branchOffsets.size());
194         for (unsigned j = table.ctiOffsets.size(); j--;)
195             table.ctiOffsets[j] = table.ctiDefault;
196         for (unsigned j = data.cases.size(); j--;) {
197             SwitchCase& myCase = data.cases[j];
198             table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
199                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
200         }
201     }
202     
203     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
204         if (usedJumpTables.get(i))
205             continue;
206         
207         m_codeBlock->switchJumpTable(i).clear();
208     }
209
210     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
211     // and we cannot deref StringImpl's and (2) it would be weird to deref those
212     // StringImpl's since we refer to them.
213     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
214         SwitchData& data = **switchDataIter;
215         if (!data.didUseJumpTable)
216             continue;
217         
218         if (data.kind != SwitchString)
219             continue;
220         
221         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
222         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
223         StringJumpTable::StringOffsetTable::iterator iter;
224         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
225         for (iter = table.offsetTable.begin(); iter != end; ++iter)
226             iter->value.ctiOffset = table.ctiDefault;
227         for (unsigned j = data.cases.size(); j--;) {
228             SwitchCase& myCase = data.cases[j];
229             iter = table.offsetTable.find(myCase.value.stringImpl());
230             RELEASE_ASSERT(iter != end);
231             iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
232         }
233     }
234
235     // Link all calls out from the JIT code to their respective functions.
236     for (unsigned i = 0; i < m_calls.size(); ++i)
237         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
238
239     for (unsigned i = m_getByIds.size(); i--;)
240         m_getByIds[i].finalize(linkBuffer);
241     for (unsigned i = m_putByIds.size(); i--;)
242         m_putByIds[i].finalize(linkBuffer);
243
244     for (unsigned i = 0; i < m_ins.size(); ++i) {
245         StructureStubInfo& info = *m_ins[i].m_stubInfo;
246         CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
247         info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
248         info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
249         info.callReturnLocation = callReturnLocation;
250         info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
251     }
252     
253     for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
254         JSCallRecord& record = m_jsCalls[i];
255         CallLinkInfo& info = *record.m_info;
256         linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
257         info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
258             linkBuffer.locationOf(record.m_targetToCheck),
259             linkBuffer.locationOfNearCall(record.m_fastCall));
260     }
261     
262     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
263     CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
264     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
265         OSRExit& exit = m_jitCode->osrExit[i];
266         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
267         linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
268         exit.correctJump(linkBuffer);
269         if (info.m_replacementSource.isSet()) {
270             m_jitCode->common.jumpReplacements.append(JumpReplacement(
271                 linkBuffer.locationOf(info.m_replacementSource),
272                 linkBuffer.locationOf(info.m_replacementDestination)));
273         }
274     }
275     
276     if (m_graph.compilation()) {
277         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
278         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
279             Vector<Label>& labels = m_exitSiteLabels[i];
280             Vector<const void*> addresses;
281             for (unsigned j = 0; j < labels.size(); ++j)
282                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
283             m_graph.compilation()->addOSRExitSite(addresses);
284         }
285     } else
286         ASSERT(!m_exitSiteLabels.size());
287
288     m_jitCode->common.compilation = m_graph.compilation();
289     
290     // Link new DFG exception handlers and remove baseline JIT handlers.
291     m_codeBlock->clearExceptionHandlers();
292     for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
293         OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
294         if (info.m_replacementDestination.isSet()) {
295             // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
296             // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
297             // If this *is set*, it means we will be landing at this code location from genericUnwind from an
298             // exception thrown in a child call frame.
299             CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
300             HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
301             CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
302             newExceptionHandler.start = callSite.bits();
303             newExceptionHandler.end = callSite.bits() + 1;
304             newExceptionHandler.nativeCode = catchLabel;
305             m_codeBlock->appendExceptionHandler(newExceptionHandler);
306         }
307     }
308 }
309
310 void JITCompiler::compile()
311 {
312     SamplingRegion samplingRegion("DFG Backend");
313
314     setStartOfCode();
315     compileEntry();
316     m_speculative = std::make_unique<SpeculativeJIT>(*this);
317
318     // Plant a check that sufficient space is available in the JSStack.
319     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
320     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
321
322     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
323     checkStackPointerAlignment();
324     compileSetupRegistersForEntry();
325     compileBody();
326     setEndOfMainPath();
327
328     // === Footer code generation ===
329     //
330     // Generate the stack overflow handling; if the stack check in the entry head fails,
331     // we need to call out to a helper function to throw the StackOverflowError.
332     stackOverflow.link(this);
333
334     emitStoreCodeOrigin(CodeOrigin(0));
335
336     if (maxFrameExtentForSlowPathCall)
337         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
338
339     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
340
341     // Generate slow path code.
342     m_speculative->runSlowPathGenerators();
343     
344     compileExceptionHandlers();
345     linkOSRExits();
346     
347     // Create OSR entry trampolines if necessary.
348     m_speculative->createOSREntries();
349     setEndOfCode();
350
351     auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
352     if (linkBuffer->didFailToAllocate()) {
353         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
354         return;
355     }
356     
357     link(*linkBuffer);
358     m_speculative->linkOSREntries(*linkBuffer);
359
360     m_jitCode->shrinkToFit();
361     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
362
363     disassemble(*linkBuffer);
364     
365     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
366         m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer));
367 }
368
369 void JITCompiler::compileFunction()
370 {
371     SamplingRegion samplingRegion("DFG Backend");
372     
373     setStartOfCode();
374     compileEntry();
375
376     // === Function header code generation ===
377     // This is the main entry point, without performing an arity check.
378     // If we needed to perform an arity check we will already have moved the return address,
379     // so enter after this.
380     Label fromArityCheck(this);
381     // Plant a check that sufficient space is available in the JSStack.
382     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
383     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
384
385     // Move the stack pointer down to accommodate locals
386     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
387     checkStackPointerAlignment();
388
389     compileSetupRegistersForEntry();
390
391     // === Function body code generation ===
392     m_speculative = std::make_unique<SpeculativeJIT>(*this);
393     compileBody();
394     setEndOfMainPath();
395
396     // === Function footer code generation ===
397     //
398     // Generate code to perform the stack overflow handling (if the stack check in
399     // the function header fails), and generate the entry point with arity check.
400     //
401     // Generate the stack overflow handling; if the stack check in the function head fails,
402     // we need to call out to a helper function to throw the StackOverflowError.
403     stackOverflow.link(this);
404
405     emitStoreCodeOrigin(CodeOrigin(0));
406
407     if (maxFrameExtentForSlowPathCall)
408         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
409
410     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
411     
412     // The fast entry point into a function does not check the correct number of arguments
413     // have been passed to the call (we only use the fast entry point where we can statically
414     // determine the correct number of arguments have been passed, or have already checked).
415     // In cases where an arity check is necessary, we enter here.
416     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
417     m_arityCheck = label();
418     compileEntry();
419
420     load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
421     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
422     emitStoreCodeOrigin(CodeOrigin(0));
423     if (maxFrameExtentForSlowPathCall)
424         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
425     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
426     if (maxFrameExtentForSlowPathCall)
427         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
428     branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
429     emitStoreCodeOrigin(CodeOrigin(0));
430     move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
431     m_callArityFixup = call();
432     jump(fromArityCheck);
433     
434     // Generate slow path code.
435     m_speculative->runSlowPathGenerators();
436     
437     compileExceptionHandlers();
438     linkOSRExits();
439     
440     // Create OSR entry trampolines if necessary.
441     m_speculative->createOSREntries();
442     setEndOfCode();
443
444     // === Link ===
445     auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
446     if (linkBuffer->didFailToAllocate()) {
447         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
448         return;
449     }
450     link(*linkBuffer);
451     m_speculative->linkOSREntries(*linkBuffer);
452     
453     m_jitCode->shrinkToFit();
454     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
455     
456     linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
457     
458     disassemble(*linkBuffer);
459
460     MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
461
462     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
463         m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck);
464 }
465
466 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
467 {
468     if (shouldDumpDisassembly()) {
469         m_disassembler->dump(linkBuffer);
470         linkBuffer.didAlreadyDisassemble();
471     }
472     
473     if (m_graph.m_plan.compilation)
474         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
475 }
476
477 #if USE(JSVALUE32_64)
478 void* JITCompiler::addressOfDoubleConstant(Node* node)
479 {
480     double value = node->asNumber();
481     int64_t valueBits = bitwise_cast<int64_t>(value);
482     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
483     if (it != m_graph.m_doubleConstantsMap.end())
484         return it->second;
485
486     if (!m_graph.m_doubleConstants)
487         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
488
489     double* addressInConstantPool = m_graph.m_doubleConstants->add();
490     *addressInConstantPool = value;
491     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
492     return addressInConstantPool;
493 }
494 #endif
495
496 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
497 {
498     // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
499     if (!basicBlock.intersectionOfCFAHasVisited)
500         return;
501         
502     OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
503     
504     entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
505         
506     // Fix the expected values: in our protocol, a dead variable will have an expected
507     // value of (None, []). But the old JIT may stash some values there. So we really
508     // need (Top, TOP).
509     for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
510         Node* node = basicBlock.variablesAtHead.argument(argument);
511         if (!node || !node->shouldGenerate())
512             entry->m_expectedValues.argument(argument).makeHeapTop();
513     }
514     for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
515         Node* node = basicBlock.variablesAtHead.local(local);
516         if (!node || !node->shouldGenerate())
517             entry->m_expectedValues.local(local).makeHeapTop();
518         else {
519             VariableAccessData* variable = node->variableAccessData();
520             entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
521                 
522             switch (variable->flushFormat()) {
523             case FlushedDouble:
524                 entry->m_localsForcedDouble.set(local);
525                 break;
526             case FlushedInt52:
527                 entry->m_localsForcedMachineInt.set(local);
528                 break;
529             default:
530                 break;
531             }
532             
533             if (variable->local() != variable->machineLocal()) {
534                 entry->m_reshufflings.append(
535                     OSREntryReshuffling(
536                         variable->local().offset(), variable->machineLocal().offset()));
537             }
538         }
539     }
540         
541     entry->m_reshufflings.shrinkToFit();
542 }
543
544 void JITCompiler::appendExceptionHandlingOSRExit(unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
545 {
546     OSRExit exit(Uncountable, JSValueRegs(), graph().methodOfGettingAValueProfileFor(nullptr), m_speculative.get(), eventStreamIndex);
547     exit.m_willArriveAtOSRExitFromGenericUnwind = jumpsToFail.empty(); // If jumps are empty, we're going to jump here from genericUnwind from a child call frame.
548     exit.m_isExceptionHandler = true;
549     exit.m_codeOrigin = opCatchOrigin;
550     exit.m_exceptionHandlerCallSiteIndex = callSite;
551     OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
552     jitCode()->appendOSRExit(exit);
553     m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
554 }
555
556 void JITCompiler::exceptionCheck()
557 {
558     // It's important that we use origin.forExit here. Consider if we hoist string
559     // addition outside a loop, and that we exit at the point of that concatenation
560     // from an out of memory exception.
561     // If the original loop had a try/catch around string concatenation, if we "catch"
562     // that exception inside the loop, then the loops induction variable will be undefined 
563     // in the OSR exit value recovery. It's more defensible for the string concatenation, 
564     // then, to not be caught by the for loops' try/catch.
565     // Here is the program I'm speaking about:
566     //
567     // >>>> lets presume "c = a + b" gets hoisted here.
568     // for (var i = 0; i < length; i++) {
569     //     try {
570     //         c = a + b
571     //     } catch(e) { 
572     //         If we threw an out of memory error, and we cought the exception
573     //         right here, then "i" would almost certainly be undefined, which
574     //         would make no sense.
575     //         ... 
576     //     }
577     // }
578     CodeOrigin opCatchOrigin;
579     HandlerInfo* exceptionHandler;
580     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
581     if (willCatchException) {
582         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex != UINT_MAX ? m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
583         MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck();
584         // We assume here that this is called after callOpeartion()/appendCall() is called.
585         appendExceptionHandlingOSRExit(streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
586     } else
587         m_exceptionChecks.append(emitExceptionCheck());
588 }
589
590 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
591 {
592     CodeOrigin opCatchOrigin;
593     HandlerInfo* exceptionHandler;
594     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
595     CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
596     if (willCatchException)
597         appendExceptionHandlingOSRExit(eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
598     return callSite;
599 }
600
601 } } // namespace JSC::DFG
602
603 #endif // ENABLE(DFG_JIT)