91529cc7d6fb813929765b98e2edb36c3cef2282
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompiler64.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE64)
30
31 #include "DFGOperations.h"
32
33 namespace JSC { namespace DFG {
34
35 void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
36 {
37     // 1) Pro-forma stuff.
38 #if DFG_ENABLE(DEBUG_VERBOSE)
39     fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
40     for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
41         fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
42         if (!codeOrigin.inlineCallFrame)
43             break;
44         fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
45     }
46     fprintf(stderr, ")  ");
47     exit.dump(stderr);
48 #endif
49 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
50     SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
51     debugInfo->codeBlock = m_jit.codeBlock();
52     debugInfo->nodeIndex = exit.m_nodeIndex;
53     
54     m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
55 #endif
56     
57 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
58     m_jit.breakpoint();
59 #endif
60     
61 #if DFG_ENABLE(SUCCESS_STATS)
62     static SamplingCounter counter("SpeculationFailure");
63     m_jit.emitCount(counter);
64 #endif
65     
66     // 2) Perform speculation recovery. This only comes into play when an operation
67     //    starts mutating state before verifying the speculation it has already made.
68     
69     GPRReg alreadyBoxed = InvalidGPRReg;
70     
71     if (recovery) {
72         switch (recovery->type()) {
73         case SpeculativeAdd:
74             m_jit.sub32(recovery->src(), recovery->dest());
75             m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
76             alreadyBoxed = recovery->dest();
77             break;
78             
79         case BooleanSpeculationCheck:
80             m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
81             break;
82             
83         default:
84             break;
85         }
86     }
87
88     // 3) Refine some value profile, if appropriate.
89     
90     if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
91         if (exit.m_jsValueSource.isAddress()) {
92             // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
93             // since we know how to restore it.
94             m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
95             m_jit.storePtr(GPRInfo::tagTypeNumberRegister, exit.m_valueProfile->specFailBucket(0));
96             m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
97         } else
98             m_jit.storePtr(exit.m_jsValueSource.gpr(), exit.m_valueProfile->specFailBucket(0));
99     }
100
101     // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
102     //    whose destination is now occupied by a DFG virtual register, and we need
103     //    one for every displaced virtual register if there are more than
104     //    GPRInfo::numberOfRegisters of them. Also see if there are any constants,
105     //    any undefined slots, any FPR slots, and any unboxed ints.
106             
107     Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
108     for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
109         poisonedVirtualRegisters[i] = false;
110
111     unsigned numberOfPoisonedVirtualRegisters = 0;
112     unsigned numberOfDisplacedVirtualRegisters = 0;
113     
114     // Booleans for fast checks. We expect that most OSR exits do not have to rebox
115     // Int32s, have no FPRs, and have no constants. If there are constants, we
116     // expect most of them to be jsUndefined(); if that's true then we handle that
117     // specially to minimize code size and execution time.
118     bool haveUnboxedInt32s = false;
119     bool haveFPRs = false;
120     bool haveConstants = false;
121     bool haveUndefined = false;
122     
123     for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
124         const ValueRecovery& recovery = exit.valueRecovery(index);
125         switch (recovery.technique()) {
126         case Int32DisplacedInRegisterFile:
127         case DoubleDisplacedInRegisterFile:
128         case DisplacedInRegisterFile:
129             numberOfDisplacedVirtualRegisters++;
130             ASSERT((int)recovery.virtualRegister() >= 0);
131             
132             // See if we might like to store to this virtual register before doing
133             // virtual register shuffling. If so, we say that the virtual register
134             // is poisoned: it cannot be stored to until after displaced virtual
135             // registers are handled. We track poisoned virtual register carefully
136             // to ensure this happens efficiently. Note that we expect this case
137             // to be rare, so the handling of it is optimized for the cases in
138             // which it does not happen.
139             if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
140                 switch (exit.m_variables[recovery.virtualRegister()].technique()) {
141                 case InGPR:
142                 case UnboxedInt32InGPR:
143                 case InFPR:
144                     if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
145                         poisonedVirtualRegisters[recovery.virtualRegister()] = true;
146                         numberOfPoisonedVirtualRegisters++;
147                     }
148                     break;
149                 default:
150                     break;
151                 }
152             }
153             break;
154             
155         case UnboxedInt32InGPR:
156         case AlreadyInRegisterFileAsUnboxedInt32:
157             haveUnboxedInt32s = true;
158             break;
159             
160         case InFPR:
161             haveFPRs = true;
162             break;
163             
164         case Constant:
165             haveConstants = true;
166             if (recovery.constant().isUndefined())
167                 haveUndefined = true;
168             break;
169             
170         default:
171             break;
172         }
173     }
174     
175 #if DFG_ENABLE(DEBUG_VERBOSE)
176     fprintf(stderr, "  ");
177     if (numberOfPoisonedVirtualRegisters)
178         fprintf(stderr, "Poisoned=%u ", numberOfPoisonedVirtualRegisters);
179     if (numberOfDisplacedVirtualRegisters)
180         fprintf(stderr, "Displaced=%u ", numberOfDisplacedVirtualRegisters);
181     if (haveUnboxedInt32s)
182         fprintf(stderr, "UnboxedInt32 ");
183     if (haveFPRs)
184         fprintf(stderr, "FPR ");
185     if (haveConstants)
186         fprintf(stderr, "Constants ");
187     if (haveUndefined)
188         fprintf(stderr, "Undefined ");
189     fprintf(stderr, " ");
190 #endif
191     
192     EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
193
194     // From here on, the code assumes that it is profitable to maximize the distance
195     // between when something is computed and when it is stored.
196     
197     // 5) Perform all reboxing of integers.
198     
199     if (haveUnboxedInt32s) {
200         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
201             const ValueRecovery& recovery = exit.valueRecovery(index);
202             switch (recovery.technique()) {
203             case UnboxedInt32InGPR:
204                 if (recovery.gpr() != alreadyBoxed)
205                     m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
206                 break;
207                 
208             case AlreadyInRegisterFileAsUnboxedInt32:
209                 m_jit.store32(AssemblyHelpers::Imm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
210                 break;
211                 
212             default:
213                 break;
214             }
215         }
216     }
217     
218     // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
219     //    Note that GPRs do not have a fast change (like haveFPRs) because we expect that
220     //    most OSR failure points will have at least one GPR that needs to be dumped.
221     
222     unsigned scratchIndex = 0;
223     for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
224         const ValueRecovery& recovery = exit.valueRecovery(index);
225         int operand = exit.operandForIndex(index);
226         switch (recovery.technique()) {
227         case InGPR:
228         case UnboxedInt32InGPR:
229             if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
230                 m_jit.storePtr(recovery.gpr(), scratchBuffer + scratchIndex++);
231             else
232                 m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
233             break;
234         default:
235             break;
236         }
237     }
238     
239     // At this point all GPRs are available for scratch use.
240     
241     if (haveFPRs) {
242         // 7) Box all doubles (relies on there being more GPRs than FPRs)
243         
244         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
245             const ValueRecovery& recovery = exit.valueRecovery(index);
246             if (recovery.technique() != InFPR)
247                 continue;
248             FPRReg fpr = recovery.fpr();
249             GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
250             m_jit.boxDouble(fpr, gpr);
251         }
252         
253         // 8) Dump all doubles into the register file, or to the scratch storage if
254         //    the destination virtual register is poisoned.
255         
256         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
257             const ValueRecovery& recovery = exit.valueRecovery(index);
258             if (recovery.technique() != InFPR)
259                 continue;
260             GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
261             if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
262                 m_jit.storePtr(gpr, scratchBuffer + scratchIndex++);
263             else
264                 m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
265         }
266     }
267     
268     ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
269     
270     // 9) Reshuffle displaced virtual registers. Optimize for the case that
271     //    the number of displaced virtual registers is not more than the number
272     //    of available physical registers.
273     
274     if (numberOfDisplacedVirtualRegisters) {
275         if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
276             // So far this appears to be the case that triggers all the time, but
277             // that is far from guaranteed.
278         
279             unsigned displacementIndex = 0;
280             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
281                 const ValueRecovery& recovery = exit.valueRecovery(index);
282                 switch (recovery.technique()) {
283                 case DisplacedInRegisterFile:
284                     m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
285                     break;
286                     
287                 case Int32DisplacedInRegisterFile: {
288                     GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
289                     m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
290                     m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
291                     break;
292                 }
293                     
294                 case DoubleDisplacedInRegisterFile: {
295                     GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
296                     m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
297                     m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
298                     break;
299                 }
300                     
301                 default:
302                     break;
303                 }
304             }
305         
306             displacementIndex = 0;
307             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
308                 const ValueRecovery& recovery = exit.valueRecovery(index);
309                 switch (recovery.technique()) {
310                 case DisplacedInRegisterFile:
311                 case Int32DisplacedInRegisterFile:
312                 case DoubleDisplacedInRegisterFile:
313                     m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
314                     break;
315                     
316                 default:
317                     break;
318                 }
319             }
320         } else {
321             // FIXME: This should use the shuffling algorithm that we use
322             // for speculative->non-speculative jumps, if we ever discover that
323             // some hot code with lots of live values that get displaced and
324             // spilled really enjoys frequently failing speculation.
325         
326             // For now this code is engineered to be correct but probably not
327             // super. In particular, it correctly handles cases where for example
328             // the displacements are a permutation of the destination values, like
329             //
330             // 1 -> 2
331             // 2 -> 1
332             //
333             // It accomplishes this by simply lifting all of the virtual registers
334             // from their old (DFG JIT) locations and dropping them in a scratch
335             // location in memory, and then transferring from that scratch location
336             // to their new (old JIT) locations.
337         
338             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
339                 const ValueRecovery& recovery = exit.valueRecovery(index);
340                 
341                 switch (recovery.technique()) {
342                 case DisplacedInRegisterFile:
343                     m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
344                     m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
345                     break;
346                     
347                 case Int32DisplacedInRegisterFile: {
348                     m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
349                     m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
350                     m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
351                     break;
352                 }
353                     
354                 case DoubleDisplacedInRegisterFile: {
355                     m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
356                     m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
357                     m_jit.storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
358                     break;
359                 }
360                     
361                 default:
362                     break;
363                 }
364             }
365         
366             scratchIndex = numberOfPoisonedVirtualRegisters;
367             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
368                 const ValueRecovery& recovery = exit.valueRecovery(index);
369                 switch (recovery.technique()) {
370                 case DisplacedInRegisterFile:
371                 case Int32DisplacedInRegisterFile:
372                 case DoubleDisplacedInRegisterFile:
373                     m_jit.loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
374                     m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
375                     break;
376                     
377                 default:
378                     break;
379                 }
380             }
381         
382             ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
383         }
384     }
385     
386     // 10) Dump all poisoned virtual registers.
387     
388     scratchIndex = 0;
389     if (numberOfPoisonedVirtualRegisters) {
390         for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
391             if (!poisonedVirtualRegisters[virtualRegister])
392                 continue;
393             
394             const ValueRecovery& recovery = exit.m_variables[virtualRegister];
395             switch (recovery.technique()) {
396             case InGPR:
397             case UnboxedInt32InGPR:
398             case InFPR:
399                 m_jit.loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
400                 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
401                 break;
402                 
403             default:
404                 break;
405             }
406         }
407     }
408     ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
409     
410     // 11) Dump all constants. Optimize for Undefined, since that's a constant we see
411     //     often.
412
413     if (haveConstants) {
414         if (haveUndefined)
415             m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
416         
417         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
418             const ValueRecovery& recovery = exit.valueRecovery(index);
419             if (recovery.technique() != Constant)
420                 continue;
421             if (recovery.constant().isUndefined())
422                 m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
423             else
424                 m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
425         }
426     }
427     
428     // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
429     //     that all new calls into this code will go to the new JIT, so the execute
430     //     counter only affects call frames that performed OSR exit and call frames
431     //     that were still executing the old JIT at the time of another call frame's
432     //     OSR exit. We want to ensure that the following is true:
433     //
434     //     (a) Code the performs an OSR exit gets a chance to reenter optimized
435     //         code eventually, since optimized code is faster. But we don't
436     //         want to do such reentery too aggressively (see (c) below).
437     //
438     //     (b) If there is code on the call stack that is still running the old
439     //         JIT's code and has never OSR'd, then it should get a chance to
440     //         perform OSR entry despite the fact that we've exited.
441     //
442     //     (c) Code the performs an OSR exit should not immediately retry OSR
443     //         entry, since both forms of OSR are expensive. OSR entry is
444     //         particularly expensive.
445     //
446     //     (d) Frequent OSR failures, even those that do not result in the code
447     //         running in a hot loop, result in recompilation getting triggered.
448     //
449     //     To ensure (c), we'd like to set the execute counter to
450     //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
451     //     (a) and (b), since then every OSR exit would delay the opportunity for
452     //     every call frame to perform OSR entry. Essentially, if OSR exit happens
453     //     frequently and the function has few loops, then the counter will never
454     //     become non-negative and OSR entry will never be triggered. OSR entry
455     //     will only happen if a loop gets hot in the old JIT, which does a pretty
456     //     good job of ensuring (a) and (b). But that doesn't take care of (d),
457     //     since each speculation failure would reset the execute counter.
458     //     So we check here if the number of speculation failures is significantly
459     //     larger than the number of successes (we want 90% success rate), and if
460     //     there have been a large enough number of failures. If so, we set the
461     //     counter to 0; otherwise we set the counter to
462     //     counterValueForOptimizeAfterWarmUp().
463     
464     m_jit.add32(AssemblyHelpers::Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
465     
466     m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
467     
468     m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
469     m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
470     m_jit.add32(AssemblyHelpers::Imm32(1), GPRInfo::regT2);
471     m_jit.add32(AssemblyHelpers::Imm32(-1), GPRInfo::regT1);
472     m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
473     m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
474     
475     m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
476     
477     AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::Imm32(m_jit.codeBlock()->largeFailCountThreshold()));
478     m_jit.mul32(AssemblyHelpers::Imm32(Heuristics::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
479     
480     AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
481     
482     // Reoptimize as soon as possible.
483     m_jit.store32(AssemblyHelpers::Imm32(Heuristics::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
484     AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
485     
486     fewFails.link(&m_jit);
487     lowFailRate.link(&m_jit);
488     
489     m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
490     
491     doneAdjusting.link(&m_jit);
492     
493     // 13) Load the result of the last bytecode operation into regT0.
494     
495     if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
496         m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
497     
498     // 14) Fix call frame(s).
499     
500     ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
501     m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
502     
503     for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
504         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
505         CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
506         CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
507         Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
508         unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
509         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
510         
511         ASSERT(mapping);
512         ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
513         
514         void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
515
516         GPRReg callerFrameGPR;
517         if (inlineCallFrame->caller.inlineCallFrame) {
518             m_jit.addPtr(AssemblyHelpers::Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
519             callerFrameGPR = GPRInfo::regT3;
520         } else
521             callerFrameGPR = GPRInfo::callFrameRegister;
522         
523         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
524         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
525         m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
526         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
527         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsNumber(inlineCallFrame->arguments.size()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
528         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
529     }
530     
531     if (exit.m_codeOrigin.inlineCallFrame)
532         m_jit.addPtr(AssemblyHelpers::Imm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
533     
534     // 15) Jump into the corresponding baseline JIT code.
535     
536     CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
537     Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
538     
539     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
540     
541     ASSERT(mapping);
542     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
543     
544     void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
545     
546     ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
547     
548     m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
549     m_jit.jump(GPRInfo::regT1);
550
551 #if DFG_ENABLE(DEBUG_VERBOSE)
552     fprintf(stderr, "-> %p\n", jumpTarget);
553 #endif
554 }
555
556 } } // namespace JSC::DFG
557
558 #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)