f9fe56c7c0de92cd7edc5558159fd7d179677cc0
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompiler32_64.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
30
31 #include "DFGOperations.h"
32
33 namespace JSC { namespace DFG {
34
35 void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery)
36 {
37     // 1) Pro-forma stuff.
38 #if DFG_ENABLE(DEBUG_VERBOSE)
39     fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
40     for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
41         fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
42         if (!codeOrigin.inlineCallFrame)
43             break;
44         fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
45     }
46     fprintf(stderr, ") at JIT offset 0x%x  ", m_jit.debugOffset());
47     exit.dump(stderr);
48 #endif
49 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
50     SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
51     debugInfo->codeBlock = m_jit.codeBlock();
52     debugInfo->nodeIndex = exit.m_nodeIndex;
53     
54     m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
55 #endif
56     
57 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
58     m_jit.breakpoint();
59 #endif
60     
61 #if DFG_ENABLE(SUCCESS_STATS)
62     static SamplingCounter counter("SpeculationFailure");
63     m_jit.emitCount(counter);
64 #endif
65
66     // 2) Perform speculation recovery. This only comes into play when an operation
67     //    starts mutating state before verifying the speculation it has already made.
68     
69     if (recovery) {
70         switch (recovery->type()) {
71         case SpeculativeAdd:
72             m_jit.sub32(recovery->src(), recovery->dest());
73             break;
74             
75         case BooleanSpeculationCheck:
76             break;
77             
78         default:
79             break;
80         }
81     }
82
83     // 3) Refine some value profile, if appropriate.
84     
85     if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
86         if (exit.m_jsValueSource.isAddress()) {
87             // Save a register so we can use it.
88             GPRReg scratch = GPRInfo::regT0;
89             if (scratch == exit.m_jsValueSource.base())
90                 scratch = GPRInfo::regT1;
91             EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t)));
92             m_jit.store32(scratch, scratchBuffer);
93             m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
94             m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
95             m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
96             m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
97             m_jit.load32(scratchBuffer, scratch);
98         } else if (exit.m_jsValueSource.hasKnownTag()) {
99             m_jit.store32(AssemblyHelpers::Imm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
100             m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
101         } else {
102             m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
103             m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
104         }
105     }
106     
107     // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
108     //    whose destination is now occupied by a DFG virtual register, and we need
109     //    one for every displaced virtual register if there are more than
110     //    GPRInfo::numberOfRegisters of them. Also see if there are any constants,
111     //    any undefined slots, any FPR slots, and any unboxed ints.
112             
113     Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
114     for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
115         poisonedVirtualRegisters[i] = false;
116
117     unsigned numberOfPoisonedVirtualRegisters = 0;
118     unsigned numberOfDisplacedVirtualRegisters = 0;
119     
120     // Booleans for fast checks. We expect that most OSR exits do not have to rebox
121     // Int32s, have no FPRs, and have no constants. If there are constants, we
122     // expect most of them to be jsUndefined(); if that's true then we handle that
123     // specially to minimize code size and execution time.
124     bool haveUnboxedInt32InRegisterFile = false;
125     bool haveUnboxedCellInRegisterFile = false;
126     bool haveUnboxedBooleanInRegisterFile = false;
127     bool haveFPRs = false;
128     bool haveConstants = false;
129     bool haveUndefined = false;
130     
131     for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
132         const ValueRecovery& recovery = exit.valueRecovery(index);
133         switch (recovery.technique()) {
134         case DisplacedInRegisterFile:
135             numberOfDisplacedVirtualRegisters++;
136             ASSERT((int)recovery.virtualRegister() >= 0);
137             
138             // See if we might like to store to this virtual register before doing
139             // virtual register shuffling. If so, we say that the virtual register
140             // is poisoned: it cannot be stored to until after displaced virtual
141             // registers are handled. We track poisoned virtual register carefully
142             // to ensure this happens efficiently. Note that we expect this case
143             // to be rare, so the handling of it is optimized for the cases in
144             // which it does not happen.
145             if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
146                 switch (exit.m_variables[recovery.virtualRegister()].technique()) {
147                 case InGPR:
148                 case UnboxedInt32InGPR:
149                 case UnboxedBooleanInGPR:
150                 case InPair:
151                 case InFPR:
152                     if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
153                         poisonedVirtualRegisters[recovery.virtualRegister()] = true;
154                         numberOfPoisonedVirtualRegisters++;
155                     }
156                     break;
157                 default:
158                     break;
159                 }
160             }
161             break;
162             
163         case AlreadyInRegisterFileAsUnboxedInt32:
164             haveUnboxedInt32InRegisterFile = true;
165             break;
166             
167         case AlreadyInRegisterFileAsUnboxedCell:
168             haveUnboxedCellInRegisterFile = true;
169             break;
170             
171         case AlreadyInRegisterFileAsUnboxedBoolean:
172             haveUnboxedBooleanInRegisterFile = true;
173             break;
174             
175         case InFPR:
176             haveFPRs = true;
177             break;
178             
179         case Constant:
180             haveConstants = true;
181             if (recovery.constant().isUndefined())
182                 haveUndefined = true;
183             break;
184             
185         default:
186             break;
187         }
188     }
189     
190     EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
191
192     // From here on, the code assumes that it is profitable to maximize the distance
193     // between when something is computed and when it is stored.
194     
195     // 5) Perform all reboxing of integers and cells, except for those in registers.
196
197     if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) {
198         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
199             const ValueRecovery& recovery = exit.valueRecovery(index);
200             switch (recovery.technique()) {
201             case AlreadyInRegisterFileAsUnboxedInt32:
202                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
203                 break;
204
205             case AlreadyInRegisterFileAsUnboxedCell:
206                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
207                 break;
208
209             case AlreadyInRegisterFileAsUnboxedBoolean:
210                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index))));
211                 break;
212
213             default:
214                 break;
215             }
216         }
217     }
218
219     // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
220     //    Note that GPRs do not have a fast change (like haveFPRs) because we expect that
221     //    most OSR failure points will have at least one GPR that needs to be dumped.
222     
223     unsigned scratchIndex = 0;
224     for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
225         const ValueRecovery& recovery = exit.valueRecovery(index);
226         int operand = exit.operandForIndex(index);
227         switch (recovery.technique()) {
228         case InGPR:
229         case UnboxedInt32InGPR:
230         case UnboxedBooleanInGPR:
231             if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
232                 m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
233             else {
234                 uint32_t tag = JSValue::EmptyValueTag;
235                 if (recovery.technique() == InGPR)
236                     tag = JSValue::CellTag;
237                 else if (recovery.technique() == UnboxedInt32InGPR)
238                     tag = JSValue::Int32Tag;
239                 else
240                     tag = JSValue::BooleanTag;
241                 m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
242                 m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
243             }
244             break;
245         case InPair:
246             if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
247                 m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
248                 m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
249                 scratchIndex++;
250             } else {
251                 m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand));
252                 m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
253             }
254             break;
255         default:
256             break;
257         }
258     }
259     
260     // At this point all GPRs are available for scratch use.
261     
262     if (haveFPRs) {
263         // 7) Box all doubles (relies on there being more GPRs than FPRs)
264         //    For JSValue32_64, no need to box doubles.
265         
266         // 8) Dump all doubles into the register file, or to the scratch storage if
267         //    the destination virtual register is poisoned.
268         
269         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
270             const ValueRecovery& recovery = exit.valueRecovery(index);
271             if (recovery.technique() != InFPR)
272                 continue;
273             if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
274                 m_jit.storeDouble(recovery.fpr(), scratchBuffer + scratchIndex++);
275             else
276                 m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)));
277         }
278     }
279     
280     ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
281     
282     // 9) Reshuffle displaced virtual registers. Optimize for the case that
283     //    the number of displaced virtual registers is not more than the number
284     //    of available physical registers.
285     
286     if (numberOfDisplacedVirtualRegisters) {
287         if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) {
288             // So far this appears to be the case that triggers all the time, but
289             // that is far from guaranteed.
290         
291             unsigned displacementIndex = 0;
292             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
293                 const ValueRecovery& recovery = exit.valueRecovery(index);
294                 if (recovery.technique() != DisplacedInRegisterFile)
295                     continue;
296                 m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
297                 m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
298             }
299         
300             displacementIndex = 0;
301             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
302                 const ValueRecovery& recovery = exit.valueRecovery(index);
303                 if (recovery.technique() != DisplacedInRegisterFile)
304                     continue;
305                 m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
306                 m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
307             }
308         } else {
309             // FIXME: This should use the shuffling algorithm that we use
310             // for speculative->non-speculative jumps, if we ever discover that
311             // some hot code with lots of live values that get displaced and
312             // spilled really enjoys frequently failing speculation.
313         
314             // For now this code is engineered to be correct but probably not
315             // super. In particular, it correctly handles cases where for example
316             // the displacements are a permutation of the destination values, like
317             //
318             // 1 -> 2
319             // 2 -> 1
320             //
321             // It accomplishes this by simply lifting all of the virtual registers
322             // from their old (DFG JIT) locations and dropping them in a scratch
323             // location in memory, and then transferring from that scratch location
324             // to their new (old JIT) locations.
325         
326             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
327                 const ValueRecovery& recovery = exit.valueRecovery(index);
328                 if (recovery.technique() != DisplacedInRegisterFile)
329                     continue;
330                 m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
331                 m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
332                 m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
333                 m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
334                 scratchIndex++;
335             }
336         
337             scratchIndex = numberOfPoisonedVirtualRegisters;
338             for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
339                 const ValueRecovery& recovery = exit.valueRecovery(index);
340                 if (recovery.technique() != DisplacedInRegisterFile)
341                     continue;
342                 m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
343                 m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
344                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
345                 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
346                 scratchIndex++;
347             }
348         
349             ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
350         }
351     }
352     
353     // 10) Dump all poisoned virtual registers.
354     
355     scratchIndex = 0;
356     if (numberOfPoisonedVirtualRegisters) {
357         for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
358             if (!poisonedVirtualRegisters[virtualRegister])
359                 continue;
360             
361             const ValueRecovery& recovery = exit.m_variables[virtualRegister];
362             switch (recovery.technique()) {
363             case InGPR:
364             case UnboxedInt32InGPR:
365             case UnboxedBooleanInGPR: {
366                 m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
367                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
368                 uint32_t tag = JSValue::EmptyValueTag;
369                 if (recovery.technique() == InGPR)
370                     tag = JSValue::CellTag;
371                 else if (recovery.technique() == UnboxedInt32InGPR)
372                     tag = JSValue::Int32Tag;
373                 else
374                     tag = JSValue::BooleanTag;
375                 m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
376                 break;
377             }
378
379             case InFPR:
380             case InPair:
381                 m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
382                 m_jit.load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
383                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
384                 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
385                 scratchIndex++;
386                 break;
387                 
388             default:
389                 break;
390             }
391         }
392     }
393     ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
394     
395     // 11) Dump all constants. Optimize for Undefined, since that's a constant we see
396     //     often.
397
398     if (haveConstants) {
399         if (haveUndefined) {
400             m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0);
401             m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
402         }
403         
404         for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
405             const ValueRecovery& recovery = exit.valueRecovery(index);
406             if (recovery.technique() != Constant)
407                 continue;
408             if (recovery.constant().isUndefined()) {
409                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
410                 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
411             } else {
412                 m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index)));
413                 m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index)));
414             }
415         }
416     }
417     
418     // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
419     //     that all new calls into this code will go to the new JIT, so the execute
420     //     counter only affects call frames that performed OSR exit and call frames
421     //     that were still executing the old JIT at the time of another call frame's
422     //     OSR exit. We want to ensure that the following is true:
423     //
424     //     (a) Code the performs an OSR exit gets a chance to reenter optimized
425     //         code eventually, since optimized code is faster. But we don't
426     //         want to do such reentery too aggressively (see (c) below).
427     //
428     //     (b) If there is code on the call stack that is still running the old
429     //         JIT's code and has never OSR'd, then it should get a chance to
430     //         perform OSR entry despite the fact that we've exited.
431     //
432     //     (c) Code the performs an OSR exit should not immediately retry OSR
433     //         entry, since both forms of OSR are expensive. OSR entry is
434     //         particularly expensive.
435     //
436     //     (d) Frequent OSR failures, even those that do not result in the code
437     //         running in a hot loop, result in recompilation getting triggered.
438     //
439     //     To ensure (c), we'd like to set the execute counter to
440     //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
441     //     (a) and (b), since then every OSR exit would delay the opportunity for
442     //     every call frame to perform OSR entry. Essentially, if OSR exit happens
443     //     frequently and the function has few loops, then the counter will never
444     //     become non-negative and OSR entry will never be triggered. OSR entry
445     //     will only happen if a loop gets hot in the old JIT, which does a pretty
446     //     good job of ensuring (a) and (b). But that doesn't take care of (d),
447     //     since each speculation failure would reset the execute counter.
448     //     So we check here if the number of speculation failures is significantly
449     //     larger than the number of successes (we want 90% success rate), and if
450     //     there have been a large enough number of failures. If so, we set the
451     //     counter to 0; otherwise we set the counter to
452     //     counterValueForOptimizeAfterWarmUp().
453     
454     m_jit.add32(AssemblyHelpers::Imm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
455     
456     m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
457     
458     m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
459     m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
460     m_jit.add32(AssemblyHelpers::Imm32(1), GPRInfo::regT2);
461     m_jit.add32(AssemblyHelpers::Imm32(-1), GPRInfo::regT1);
462     m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
463     m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
464     
465     m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
466     
467     AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::Imm32(m_jit.codeBlock()->largeFailCountThreshold()));
468     m_jit.mul32(AssemblyHelpers::Imm32(Heuristics::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
469     
470     AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
471     
472     // Reoptimize as soon as possible.
473     m_jit.store32(AssemblyHelpers::Imm32(Heuristics::executionCounterValueForOptimizeNextInvocation), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
474     AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
475     
476     fewFails.link(&m_jit);
477     lowFailRate.link(&m_jit);
478     
479     m_jit.store32(AssemblyHelpers::Imm32(m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp()), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
480     
481     doneAdjusting.link(&m_jit);
482     
483     // 13) Load the result of the last bytecode operation into regT0.
484     
485     if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
486         m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
487         m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
488     }
489     
490     // 14) Fix call frame (s).
491     
492     ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
493     m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock));
494     
495     for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
496         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
497         CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
498         CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
499         Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
500         unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
501         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
502         
503         ASSERT(mapping);
504         ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
505         
506         void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
507
508         GPRReg callerFrameGPR;
509         if (inlineCallFrame->caller.inlineCallFrame) {
510             m_jit.add32(AssemblyHelpers::Imm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
511             callerFrameGPR = GPRInfo::regT3;
512         } else
513             callerFrameGPR = GPRInfo::callFrameRegister;
514         
515         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock)));
516         m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
517         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain)));
518         m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
519         m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame)));
520         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC)));
521         m_jit.store32(AssemblyHelpers::Imm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
522         m_jit.store32(AssemblyHelpers::Imm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount)));
523         m_jit.store32(AssemblyHelpers::Imm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
524         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee)));
525     }
526     
527     if (exit.m_codeOrigin.inlineCallFrame)
528         m_jit.addPtr(AssemblyHelpers::Imm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
529
530     // 15) Jump into the corresponding baseline JIT code.
531     
532     CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
533     Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
534     
535     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
536     
537     ASSERT(mapping);
538     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
539     
540     void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
541     
542     ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2);
543     
544     m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
545     m_jit.jump(GPRInfo::regT2);
546
547 #if DFG_ENABLE(DEBUG_VERBOSE)
548     fprintf(stderr, "   -> %p\n", jumpTarget);
549 #endif
550 }
551
552 } } // namespace JSC::DFG
553
554 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)