Rename dataLog() and dataLogV() to dataLogF() and dataLogFV()
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompiler64.cpp
1 /*
2  * Copyright (C) 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE64)
30
31 #include "DFGOperations.h"
32 #include <wtf/DataLog.h>
33
34 namespace JSC { namespace DFG {
35
36 void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
37 {
38     // 1) Pro-forma stuff.
39 #if DFG_ENABLE(DEBUG_VERBOSE)
40     dataLogF("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
41     for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
42         dataLogF("bc#%u", codeOrigin.bytecodeIndex);
43         if (!codeOrigin.inlineCallFrame)
44             break;
45         dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
46     }
47     dataLogF(")  ");
48     dumpOperands(operands, WTF::dataFile());
49 #endif
50 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
51     SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
52     debugInfo->codeBlock = m_jit.codeBlock();
53     debugInfo->nodeIndex = exit.m_nodeIndex;
54     
55     m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
56 #endif
57     
58 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
59     m_jit.breakpoint();
60 #endif
61     
62 #if DFG_ENABLE(SUCCESS_STATS)
63     static SamplingCounter counter("SpeculationFailure");
64     m_jit.emitCount(counter);
65 #endif
66     
67     // 2) Perform speculation recovery. This only comes into play when an operation
68     //    starts mutating state before verifying the speculation it has already made.
69     
70     GPRReg alreadyBoxed = InvalidGPRReg;
71     
72     if (recovery) {
73         switch (recovery->type()) {
74         case SpeculativeAdd:
75             m_jit.sub32(recovery->src(), recovery->dest());
76             m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
77             alreadyBoxed = recovery->dest();
78             break;
79             
80         case BooleanSpeculationCheck:
81             m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
82             break;
83             
84         default:
85             break;
86         }
87     }
88
89     // 3) Refine some array and/or value profile, if appropriate.
90     
91     if (!!exit.m_jsValueSource) {
92         if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
93             // If the instruction that this originated from has an array profile, then
94             // refine it. If it doesn't, then do nothing. The latter could happen for
95             // hoisted checks, or checks emitted for operations that didn't have array
96             // profiling - either ops that aren't array accesses at all, or weren't
97             // known to be array acceses in the bytecode. The latter case is a FIXME
98             // while the former case is an outcome of a CheckStructure not knowing why
99             // it was emitted (could be either due to an inline cache of a property
100             // property access, or due to an array profile).
101             
102             CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
103             if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
104                 GPRReg usedRegister;
105                 if (exit.m_jsValueSource.isAddress())
106                     usedRegister = exit.m_jsValueSource.base();
107                 else
108                     usedRegister = exit.m_jsValueSource.gpr();
109                 
110                 GPRReg scratch1;
111                 GPRReg scratch2;
112                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
113                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
114                 
115                 m_jit.push(scratch1);
116                 m_jit.push(scratch2);
117                 
118                 GPRReg value;
119                 if (exit.m_jsValueSource.isAddress()) {
120                     value = scratch1;
121                     m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
122                 } else
123                     value = exit.m_jsValueSource.gpr();
124                 
125                 m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
126                 m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
127                 m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
128                 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
129                 m_jit.lshift32(scratch1, scratch2);
130                 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
131                 
132                 m_jit.pop(scratch2);
133                 m_jit.pop(scratch1);
134             }
135         }
136         
137         if (!!exit.m_valueProfile) {
138             EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
139             
140 #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
141             dataLogF("  (have exit profile, bucket %p)  ", bucket);
142 #endif
143             
144             if (exit.m_jsValueSource.isAddress()) {
145                 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
146                 // since we know how to restore it.
147                 m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
148                 m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
149                 m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
150             } else
151                 m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
152         }
153     }
154
155     // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
156     //    whose destination is now occupied by a DFG virtual register, and we need
157     //    one for every displaced virtual register if there are more than
158     //    GPRInfo::numberOfRegisters of them. Also see if there are any constants,
159     //    any undefined slots, any FPR slots, and any unboxed ints.
160             
161     Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
162     for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
163         poisonedVirtualRegisters[i] = false;
164
165     unsigned numberOfPoisonedVirtualRegisters = 0;
166     unsigned numberOfDisplacedVirtualRegisters = 0;
167     
168     // Booleans for fast checks. We expect that most OSR exits do not have to rebox
169     // Int32s, have no FPRs, and have no constants. If there are constants, we
170     // expect most of them to be jsUndefined(); if that's true then we handle that
171     // specially to minimize code size and execution time.
172     bool haveUnboxedInt32s = false;
173     bool haveUnboxedDoubles = false;
174     bool haveFPRs = false;
175     bool haveConstants = false;
176     bool haveUndefined = false;
177     bool haveUInt32s = false;
178     bool haveArguments = false;
179     
180     for (size_t index = 0; index < operands.size(); ++index) {
181         const ValueRecovery& recovery = operands[index];
182         switch (recovery.technique()) {
183         case Int32DisplacedInJSStack:
184         case DoubleDisplacedInJSStack:
185         case DisplacedInJSStack:
186             numberOfDisplacedVirtualRegisters++;
187             ASSERT((int)recovery.virtualRegister() >= 0);
188             
189             // See if we might like to store to this virtual register before doing
190             // virtual register shuffling. If so, we say that the virtual register
191             // is poisoned: it cannot be stored to until after displaced virtual
192             // registers are handled. We track poisoned virtual register carefully
193             // to ensure this happens efficiently. Note that we expect this case
194             // to be rare, so the handling of it is optimized for the cases in
195             // which it does not happen.
196             if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
197                 switch (operands.local(recovery.virtualRegister()).technique()) {
198                 case InGPR:
199                 case UnboxedInt32InGPR:
200                 case UInt32InGPR:
201                 case InFPR:
202                     if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
203                         poisonedVirtualRegisters[recovery.virtualRegister()] = true;
204                         numberOfPoisonedVirtualRegisters++;
205                     }
206                     break;
207                 default:
208                     break;
209                 }
210             }
211             break;
212             
213         case UnboxedInt32InGPR:
214         case AlreadyInJSStackAsUnboxedInt32:
215             haveUnboxedInt32s = true;
216             break;
217             
218         case AlreadyInJSStackAsUnboxedDouble:
219             haveUnboxedDoubles = true;
220             break;
221             
222         case UInt32InGPR:
223             haveUInt32s = true;
224             break;
225             
226         case InFPR:
227             haveFPRs = true;
228             break;
229             
230         case Constant:
231             haveConstants = true;
232             if (recovery.constant().isUndefined())
233                 haveUndefined = true;
234             break;
235             
236         case ArgumentsThatWereNotCreated:
237             haveArguments = true;
238             break;
239             
240         default:
241             break;
242         }
243     }
244     
245 #if DFG_ENABLE(DEBUG_VERBOSE)
246     dataLogF("  ");
247     if (numberOfPoisonedVirtualRegisters)
248         dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
249     if (numberOfDisplacedVirtualRegisters)
250         dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters);
251     if (haveUnboxedInt32s)
252         dataLogF("UnboxedInt32 ");
253     if (haveUnboxedDoubles)
254         dataLogF("UnboxedDoubles ");
255     if (haveUInt32s)
256         dataLogF("UInt32 ");
257     if (haveFPRs)
258         dataLogF("FPR ");
259     if (haveConstants)
260         dataLogF("Constants ");
261     if (haveUndefined)
262         dataLogF("Undefined ");
263     dataLogF(" ");
264 #endif
265     
266     ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
267     EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
268
269     // From here on, the code assumes that it is profitable to maximize the distance
270     // between when something is computed and when it is stored.
271     
272     // 5) Perform all reboxing of integers.
273     
274     if (haveUnboxedInt32s || haveUInt32s) {
275         for (size_t index = 0; index < operands.size(); ++index) {
276             const ValueRecovery& recovery = operands[index];
277             switch (recovery.technique()) {
278             case UnboxedInt32InGPR:
279                 if (recovery.gpr() != alreadyBoxed)
280                     m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
281                 break;
282                 
283             case AlreadyInJSStackAsUnboxedInt32:
284                 m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
285                 break;
286                 
287             case UInt32InGPR: {
288                 // This occurs when the speculative JIT left an unsigned 32-bit integer
289                 // in a GPR. If it's positive, we can just box the int. Otherwise we
290                 // need to turn it into a boxed double.
291                 
292                 // We don't try to be clever with register allocation here; we assume
293                 // that the program is using FPRs and we don't try to figure out which
294                 // ones it is using. Instead just temporarily save fpRegT0 and then
295                 // restore it. This makes sense because this path is not cheap to begin
296                 // with, and should happen very rarely.
297                 
298                 GPRReg addressGPR = GPRInfo::regT0;
299                 if (addressGPR == recovery.gpr())
300                     addressGPR = GPRInfo::regT1;
301                 
302                 m_jit.store64(addressGPR, scratchDataBuffer);
303                 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
304                 m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
305                 
306                 AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
307
308                 m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
309                 m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
310                 m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
311                 
312                 AssemblyHelpers::Jump done = m_jit.jump();
313                 
314                 positive.link(&m_jit);
315                 
316                 m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
317                 
318                 done.link(&m_jit);
319                 
320                 m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
321                 m_jit.load64(scratchDataBuffer, addressGPR);
322                 break;
323             }
324                 
325             default:
326                 break;
327             }
328         }
329     }
330     
331     // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
332     //    Note that GPRs do not have a fast change (like haveFPRs) because we expect that
333     //    most OSR failure points will have at least one GPR that needs to be dumped.
334     
335     initializePoisoned(operands.numberOfLocals());
336     unsigned currentPoisonIndex = 0;
337     
338     for (size_t index = 0; index < operands.size(); ++index) {
339         const ValueRecovery& recovery = operands[index];
340         int operand = operands.operandForIndex(index);
341         switch (recovery.technique()) {
342         case InGPR:
343         case UnboxedInt32InGPR:
344         case UInt32InGPR:
345             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
346                 m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
347                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
348                 currentPoisonIndex++;
349             } else
350                 m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
351             break;
352         default:
353             break;
354         }
355     }
356     
357     // At this point all GPRs are available for scratch use.
358     
359     if (haveFPRs) {
360         // 7) Box all doubles (relies on there being more GPRs than FPRs)
361         
362         for (size_t index = 0; index < operands.size(); ++index) {
363             const ValueRecovery& recovery = operands[index];
364             if (recovery.technique() != InFPR)
365                 continue;
366             FPRReg fpr = recovery.fpr();
367             GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
368             m_jit.boxDouble(fpr, gpr);
369         }
370         
371         // 8) Dump all doubles into the stack, or to the scratch storage if
372         //    the destination virtual register is poisoned.
373         
374         for (size_t index = 0; index < operands.size(); ++index) {
375             const ValueRecovery& recovery = operands[index];
376             if (recovery.technique() != InFPR)
377                 continue;
378             GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
379             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
380                 m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
381                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
382                 currentPoisonIndex++;
383             } else
384                 m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
385         }
386     }
387     
388     // At this point all GPRs and FPRs are available for scratch use.
389     
390     // 9) Box all unboxed doubles in the stack.
391     if (haveUnboxedDoubles) {
392         for (size_t index = 0; index < operands.size(); ++index) {
393             const ValueRecovery& recovery = operands[index];
394             if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble)
395                 continue;
396             m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
397             m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
398             m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
399         }
400     }
401     
402     ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
403     
404     // 10) Reshuffle displaced virtual registers. Optimize for the case that
405     //    the number of displaced virtual registers is not more than the number
406     //    of available physical registers.
407     
408     if (numberOfDisplacedVirtualRegisters) {
409         if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
410             // So far this appears to be the case that triggers all the time, but
411             // that is far from guaranteed.
412         
413             unsigned displacementIndex = 0;
414             for (size_t index = 0; index < operands.size(); ++index) {
415                 const ValueRecovery& recovery = operands[index];
416                 switch (recovery.technique()) {
417                 case DisplacedInJSStack:
418                     m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
419                     break;
420                     
421                 case Int32DisplacedInJSStack: {
422                     GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
423                     m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
424                     m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
425                     break;
426                 }
427                     
428                 case DoubleDisplacedInJSStack: {
429                     GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
430                     m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
431                     m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
432                     break;
433                 }
434                     
435                 default:
436                     break;
437                 }
438             }
439         
440             displacementIndex = 0;
441             for (size_t index = 0; index < operands.size(); ++index) {
442                 const ValueRecovery& recovery = operands[index];
443                 switch (recovery.technique()) {
444                 case DisplacedInJSStack:
445                 case Int32DisplacedInJSStack:
446                 case DoubleDisplacedInJSStack:
447                     m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
448                     break;
449                     
450                 default:
451                     break;
452                 }
453             }
454         } else {
455             // FIXME: This should use the shuffling algorithm that we use
456             // for speculative->non-speculative jumps, if we ever discover that
457             // some hot code with lots of live values that get displaced and
458             // spilled really enjoys frequently failing speculation.
459         
460             // For now this code is engineered to be correct but probably not
461             // super. In particular, it correctly handles cases where for example
462             // the displacements are a permutation of the destination values, like
463             //
464             // 1 -> 2
465             // 2 -> 1
466             //
467             // It accomplishes this by simply lifting all of the virtual registers
468             // from their old (DFG JIT) locations and dropping them in a scratch
469             // location in memory, and then transferring from that scratch location
470             // to their new (old JIT) locations.
471         
472             unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
473             for (size_t index = 0; index < operands.size(); ++index) {
474                 const ValueRecovery& recovery = operands[index];
475                 
476                 switch (recovery.technique()) {
477                 case DisplacedInJSStack:
478                     m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
479                     m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
480                     break;
481                     
482                 case Int32DisplacedInJSStack: {
483                     m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
484                     m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
485                     m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
486                     break;
487                 }
488                     
489                 case DoubleDisplacedInJSStack: {
490                     m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
491                     m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
492                     m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
493                     break;
494                 }
495                     
496                 default:
497                     break;
498                 }
499             }
500         
501             scratchIndex = numberOfPoisonedVirtualRegisters;
502             for (size_t index = 0; index < operands.size(); ++index) {
503                 const ValueRecovery& recovery = operands[index];
504                 switch (recovery.technique()) {
505                 case DisplacedInJSStack:
506                 case Int32DisplacedInJSStack:
507                 case DoubleDisplacedInJSStack:
508                     m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
509                     m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
510                     break;
511                     
512                 default:
513                     break;
514                 }
515             }
516         
517             ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
518         }
519     }
520     
521     // 11) Dump all poisoned virtual registers.
522     
523     if (numberOfPoisonedVirtualRegisters) {
524         for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
525             if (!poisonedVirtualRegisters[virtualRegister])
526                 continue;
527             
528             const ValueRecovery& recovery = operands.local(virtualRegister);
529             switch (recovery.technique()) {
530             case InGPR:
531             case UnboxedInt32InGPR:
532             case UInt32InGPR:
533             case InFPR:
534                 m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
535                 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
536                 break;
537                 
538             default:
539                 break;
540             }
541         }
542     }
543     
544     // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
545     //     often.
546
547     if (haveConstants) {
548         if (haveUndefined)
549             m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
550         
551         for (size_t index = 0; index < operands.size(); ++index) {
552             const ValueRecovery& recovery = operands[index];
553             if (recovery.technique() != Constant)
554                 continue;
555             if (recovery.constant().isUndefined())
556                 m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
557             else
558                 m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
559         }
560     }
561     
562     // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
563     //     that all new calls into this code will go to the new JIT, so the execute
564     //     counter only affects call frames that performed OSR exit and call frames
565     //     that were still executing the old JIT at the time of another call frame's
566     //     OSR exit. We want to ensure that the following is true:
567     //
568     //     (a) Code the performs an OSR exit gets a chance to reenter optimized
569     //         code eventually, since optimized code is faster. But we don't
570     //         want to do such reentery too aggressively (see (c) below).
571     //
572     //     (b) If there is code on the call stack that is still running the old
573     //         JIT's code and has never OSR'd, then it should get a chance to
574     //         perform OSR entry despite the fact that we've exited.
575     //
576     //     (c) Code the performs an OSR exit should not immediately retry OSR
577     //         entry, since both forms of OSR are expensive. OSR entry is
578     //         particularly expensive.
579     //
580     //     (d) Frequent OSR failures, even those that do not result in the code
581     //         running in a hot loop, result in recompilation getting triggered.
582     //
583     //     To ensure (c), we'd like to set the execute counter to
584     //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
585     //     (a) and (b), since then every OSR exit would delay the opportunity for
586     //     every call frame to perform OSR entry. Essentially, if OSR exit happens
587     //     frequently and the function has few loops, then the counter will never
588     //     become non-negative and OSR entry will never be triggered. OSR entry
589     //     will only happen if a loop gets hot in the old JIT, which does a pretty
590     //     good job of ensuring (a) and (b). But that doesn't take care of (d),
591     //     since each speculation failure would reset the execute counter.
592     //     So we check here if the number of speculation failures is significantly
593     //     larger than the number of successes (we want 90% success rate), and if
594     //     there have been a large enough number of failures. If so, we set the
595     //     counter to 0; otherwise we set the counter to
596     //     counterValueForOptimizeAfterWarmUp().
597     
598     handleExitCounts(exit);
599     
600     // 14) Reify inlined call frames.
601     
602     ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
603     m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
604     
605     for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
606         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
607         CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
608         CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
609         Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
610         unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
611         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex);
612         
613         ASSERT(mapping);
614         ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
615         
616         void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
617
618         GPRReg callerFrameGPR;
619         if (inlineCallFrame->caller.inlineCallFrame) {
620             m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
621             callerFrameGPR = GPRInfo::regT3;
622         } else
623             callerFrameGPR = GPRInfo::callFrameRegister;
624         
625         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
626         m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
627         m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
628         m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
629         m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
630         m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
631     }
632     
633     // 15) Create arguments if necessary and place them into the appropriate aliased
634     //     registers.
635     
636     if (haveArguments) {
637         HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
638             NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
639
640         for (size_t index = 0; index < operands.size(); ++index) {
641             const ValueRecovery& recovery = operands[index];
642             if (recovery.technique() != ArgumentsThatWereNotCreated)
643                 continue;
644             int operand = operands.operandForIndex(index);
645             // Find the right inline call frame.
646             InlineCallFrame* inlineCallFrame = 0;
647             for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
648                  current;
649                  current = current->caller.inlineCallFrame) {
650                 if (current->stackOffset <= operand) {
651                     inlineCallFrame = current;
652                     break;
653                 }
654             }
655
656             if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
657                 continue;
658             int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
659             if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
660                 // We know this call frame optimized out an arguments object that
661                 // the baseline JIT would have created. Do that creation now.
662                 if (inlineCallFrame) {
663                     m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
664                     m_jit.setupArguments(GPRInfo::regT0);
665                 } else
666                     m_jit.setupArgumentsExecState();
667                 m_jit.move(
668                     AssemblyHelpers::TrustedImmPtr(
669                         bitwise_cast<void*>(operationCreateArguments)),
670                     GPRInfo::nonArgGPR0);
671                 m_jit.call(GPRInfo::nonArgGPR0);
672                 m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
673                 m_jit.store64(
674                     GPRInfo::returnValueGPR,
675                     AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
676                 m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
677             }
678
679             m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
680             m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
681         }
682     }
683     
684     // 16) Load the result of the last bytecode operation into regT0.
685     
686     if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
687         m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
688     
689     // 17) Adjust the call frame pointer.
690     
691     if (exit.m_codeOrigin.inlineCallFrame)
692         m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
693     
694     // 18) Jump into the corresponding baseline JIT code.
695     
696     CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
697     Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
698     
699     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex);
700     
701     ASSERT(mapping);
702     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
703     
704     void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
705     
706     ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
707     
708     m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
709     
710     m_jit.jump(GPRInfo::regT1);
711
712 #if DFG_ENABLE(DEBUG_VERBOSE)
713     dataLogF("-> %p\n", jumpTarget);
714 #endif
715 }
716
717 } } // namespace JSC::DFG
718
719 #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)