35354cd60fc76f119dee3f8c9da3ba4f30552801
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompiler32_64.cpp
1 /*
2  * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
30
31 #include "DFGOperations.h"
32 #include "DFGOSRExitCompilerCommon.h"
33 #include "Operations.h"
34 #include <wtf/DataLog.h>
35
36 namespace JSC { namespace DFG {
37
38 void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
39 {
40     // 1) Pro-forma stuff.
41 #if DFG_ENABLE(DEBUG_VERBOSE)
42     dataLogF("OSR exit (");
43     for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
44         dataLogF("bc#%u", codeOrigin.bytecodeIndex);
45         if (!codeOrigin.inlineCallFrame)
46             break;
47         dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
48     }
49     dataLogF(") at JIT offset 0x%x  ", m_jit.debugOffset());
50     dataLog(operands);
51 #endif
52     
53     if (Options::printEachOSRExit()) {
54         SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
55         debugInfo->codeBlock = m_jit.codeBlock();
56         
57         m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
58     }
59     
60 #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
61     m_jit.breakpoint();
62 #endif
63     
64 #if DFG_ENABLE(SUCCESS_STATS)
65     static SamplingCounter counter("SpeculationFailure");
66     m_jit.emitCount(counter);
67 #endif
68
69     // 2) Perform speculation recovery. This only comes into play when an operation
70     //    starts mutating state before verifying the speculation it has already made.
71     
72     if (recovery) {
73         switch (recovery->type()) {
74         case SpeculativeAdd:
75             m_jit.sub32(recovery->src(), recovery->dest());
76             break;
77             
78         case BooleanSpeculationCheck:
79             break;
80             
81         default:
82             break;
83         }
84     }
85
86     // 3) Refine some value profile, if appropriate.
87     
88     if (!!exit.m_jsValueSource) {
89         if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
90             // If the instruction that this originated from has an array profile, then
91             // refine it. If it doesn't, then do nothing. The latter could happen for
92             // hoisted checks, or checks emitted for operations that didn't have array
93             // profiling - either ops that aren't array accesses at all, or weren't
94             // known to be array acceses in the bytecode. The latter case is a FIXME
95             // while the former case is an outcome of a CheckStructure not knowing why
96             // it was emitted (could be either due to an inline cache of a property
97             // property access, or due to an array profile).
98             
99             // Note: We are free to assume that the jsValueSource is already known to
100             // be a cell since both BadCache and BadIndexingType exits occur after
101             // the cell check would have already happened.
102             
103             CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
104             if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
105                 GPRReg usedRegister1;
106                 GPRReg usedRegister2;
107                 if (exit.m_jsValueSource.isAddress()) {
108                     usedRegister1 = exit.m_jsValueSource.base();
109                     usedRegister2 = InvalidGPRReg;
110                 } else {
111                     usedRegister1 = exit.m_jsValueSource.payloadGPR();
112                     if (exit.m_jsValueSource.hasKnownTag())
113                         usedRegister2 = InvalidGPRReg;
114                     else
115                         usedRegister2 = exit.m_jsValueSource.tagGPR();
116                 }
117                 
118                 GPRReg scratch1;
119                 GPRReg scratch2;
120                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
121                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
122                 
123                 m_jit.push(scratch1);
124                 m_jit.push(scratch2);
125                 
126                 GPRReg value;
127                 if (exit.m_jsValueSource.isAddress()) {
128                     value = scratch1;
129                     m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
130                 } else
131                     value = exit.m_jsValueSource.payloadGPR();
132                 
133                 m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
134                 m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
135                 m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
136                 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
137                 m_jit.lshift32(scratch1, scratch2);
138                 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
139                 
140                 m_jit.pop(scratch2);
141                 m_jit.pop(scratch1);
142             }
143         }
144         
145         if (!!exit.m_valueProfile) {
146             EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
147         
148             if (exit.m_jsValueSource.isAddress()) {
149                 // Save a register so we can use it.
150                 GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
151                 
152                 m_jit.push(scratch);
153
154                 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
155                 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
156                 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
157                 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
158                 
159                 m_jit.pop(scratch);
160             } else if (exit.m_jsValueSource.hasKnownTag()) {
161                 m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
162                 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
163             } else {
164                 m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
165                 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
166             }
167         }
168     }
169     
170     // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
171     //    whose destination is now occupied by a DFG virtual register, and we need
172     //    one for every displaced virtual register if there are more than
173     //    GPRInfo::numberOfRegisters of them. Also see if there are any constants,
174     //    any undefined slots, any FPR slots, and any unboxed ints.
175             
176     Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
177     for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
178         poisonedVirtualRegisters[i] = false;
179
180     unsigned numberOfPoisonedVirtualRegisters = 0;
181     unsigned numberOfDisplacedVirtualRegisters = 0;
182     
183     // Booleans for fast checks. We expect that most OSR exits do not have to rebox
184     // Int32s, have no FPRs, and have no constants. If there are constants, we
185     // expect most of them to be jsUndefined(); if that's true then we handle that
186     // specially to minimize code size and execution time.
187     bool haveUnboxedInt32InJSStack = false;
188     bool haveUnboxedCellInJSStack = false;
189     bool haveUnboxedBooleanInJSStack = false;
190     bool haveUInt32s = false;
191     bool haveFPRs = false;
192     bool haveConstants = false;
193     bool haveUndefined = false;
194     bool haveArguments = false;
195     
196     for (size_t index = 0; index < operands.size(); ++index) {
197         const ValueRecovery& recovery = operands[index];
198         switch (recovery.technique()) {
199         case DisplacedInJSStack:
200         case Int32DisplacedInJSStack:
201         case CellDisplacedInJSStack:
202         case BooleanDisplacedInJSStack: {
203             numberOfDisplacedVirtualRegisters++;
204             ASSERT(recovery.virtualRegister().isLocal());
205             
206             // See if we might like to store to this virtual register before doing
207             // virtual register shuffling. If so, we say that the virtual register
208             // is poisoned: it cannot be stored to until after displaced virtual
209             // registers are handled. We track poisoned virtual register carefully
210             // to ensure this happens efficiently. Note that we expect this case
211             // to be rare, so the handling of it is optimized for the cases in
212             // which it does not happen.
213             int local = recovery.virtualRegister().toLocal();
214             if (local < (int)operands.numberOfLocals()) {
215                 switch (operands.local(local).technique()) {
216                 case InGPR:
217                 case UnboxedInt32InGPR:
218                 case UnboxedBooleanInGPR:
219                 case UInt32InGPR:
220                 case InPair:
221                 case InFPR:
222                     if (!poisonedVirtualRegisters[local]) {
223                         poisonedVirtualRegisters[local] = true;
224                         numberOfPoisonedVirtualRegisters++;
225                     }
226                     break;
227                 default:
228                     break;
229                 }
230             }
231             break;
232
233             }
234         case UInt32InGPR:
235             haveUInt32s = true;
236             break;
237
238         case AlreadyInJSStackAsUnboxedInt32:
239             haveUnboxedInt32InJSStack = true;
240             break;
241             
242         case AlreadyInJSStackAsUnboxedCell:
243             haveUnboxedCellInJSStack = true;
244             break;
245             
246         case AlreadyInJSStackAsUnboxedBoolean:
247             haveUnboxedBooleanInJSStack = true;
248             break;
249             
250         case InFPR:
251             haveFPRs = true;
252             break;
253             
254         case Constant:
255             haveConstants = true;
256             if (recovery.constant().isUndefined())
257                 haveUndefined = true;
258             break;
259             
260         case ArgumentsThatWereNotCreated:
261             haveArguments = true;
262             break;
263             
264         default:
265             break;
266         }
267     }
268     
269     unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters);
270     ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0)));
271     EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
272
273     // From here on, the code assumes that it is profitable to maximize the distance
274     // between when something is computed and when it is stored.
275     
276     // 5) Perform all reboxing of integers and cells, except for those in registers.
277
278     if (haveUnboxedInt32InJSStack || haveUnboxedCellInJSStack || haveUnboxedBooleanInJSStack) {
279         for (size_t index = 0; index < operands.size(); ++index) {
280             const ValueRecovery& recovery = operands[index];
281             switch (recovery.technique()) {
282             case AlreadyInJSStackAsUnboxedInt32:
283                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
284                 break;
285
286             case AlreadyInJSStackAsUnboxedCell:
287                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
288                 break;
289
290             case AlreadyInJSStackAsUnboxedBoolean:
291                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
292                 break;
293
294             default:
295                 break;
296             }
297         }
298     }
299
300     // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
301     //    Note that GPRs do not have a fast change (like haveFPRs) because we expect that
302     //    most OSR failure points will have at least one GPR that needs to be dumped.
303     
304     initializePoisoned(operands.numberOfLocals());
305     unsigned currentPoisonIndex = 0;
306     
307     for (size_t index = 0; index < operands.size(); ++index) {
308         const ValueRecovery& recovery = operands[index];
309         int operand = operands.operandForIndex(index);
310         switch (recovery.technique()) {
311         case InGPR:
312         case UnboxedInt32InGPR:
313         case UnboxedBooleanInGPR:
314             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
315                 m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
316                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
317                 currentPoisonIndex++;
318             } else {
319                 uint32_t tag = JSValue::EmptyValueTag;
320                 if (recovery.technique() == InGPR)
321                     tag = JSValue::CellTag;
322                 else if (recovery.technique() == UnboxedInt32InGPR)
323                     tag = JSValue::Int32Tag;
324                 else
325                     tag = JSValue::BooleanTag;
326                 m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
327                 m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
328             }
329             break;
330         case InPair:
331             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
332                 m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
333                 m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
334                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
335                 currentPoisonIndex++;
336             } else {
337                 m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand));
338                 m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
339             }
340             break;
341         case UInt32InGPR: {
342             EncodedJSValue* myScratch = scratchDataBuffer + scratchBufferLengthBeforeUInt32s;
343             
344             GPRReg addressGPR = GPRInfo::regT0;
345             if (addressGPR == recovery.gpr())
346                 addressGPR = GPRInfo::regT1;
347             
348             m_jit.storePtr(addressGPR, myScratch);
349             m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
350             m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
351             
352             AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
353             
354             m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
355             m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
356             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
357                 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR);
358                 m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
359             } else
360                 m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand));
361             
362             AssemblyHelpers::Jump done = m_jit.jump();
363             
364             positive.link(&m_jit);
365             
366             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
367                 m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
368                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
369             } else {
370                 m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
371                 m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
372             }
373             
374             done.link(&m_jit);
375             
376             m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
377             m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
378             m_jit.loadPtr(myScratch, addressGPR);
379                               
380             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
381                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
382                 currentPoisonIndex++;
383             }
384             break;
385         }
386         default:
387             break;
388         }
389     }
390     
391     // 7) Dump all doubles into the stack, or to the scratch storage if the
392     //    destination virtual register is poisoned.
393     if (haveFPRs) {
394         for (size_t index = 0; index < operands.size(); ++index) {
395             const ValueRecovery& recovery = operands[index];
396             if (recovery.technique() != InFPR)
397                 continue;
398             if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
399                 m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex);
400                 m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
401                 currentPoisonIndex++;
402             } else
403                 m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
404         }
405     }
406     
407     // At this point all GPRs are available for scratch use.
408     
409     ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
410     
411     // 8) Reshuffle displaced virtual registers. Optimize for the case that
412     //    the number of displaced virtual registers is not more than the number
413     //    of available physical registers.
414     
415     if (numberOfDisplacedVirtualRegisters) {
416         if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) {
417             // So far this appears to be the case that triggers all the time, but
418             // that is far from guaranteed.
419         
420             unsigned displacementIndex = 0;
421             for (size_t index = 0; index < operands.size(); ++index) {
422                 const ValueRecovery& recovery = operands[index];
423                 switch (recovery.technique()) {
424                 case DisplacedInJSStack:
425                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
426                     m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
427                     break;
428                 case Int32DisplacedInJSStack:
429                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
430                     m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++));
431                     break;
432                 case CellDisplacedInJSStack:
433                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
434                     m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++));
435                     break;
436                 case BooleanDisplacedInJSStack:
437                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
438                     m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++));
439                     break;
440                 default:
441                     break;
442                 }
443             }
444         
445             displacementIndex = 0;
446             for (size_t index = 0; index < operands.size(); ++index) {
447                 const ValueRecovery& recovery = operands[index];
448                 switch (recovery.technique()) {
449                 case DisplacedInJSStack:
450                 case Int32DisplacedInJSStack:
451                 case CellDisplacedInJSStack:
452                 case BooleanDisplacedInJSStack:
453                     m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
454                     m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
455                     break;
456                 default:
457                     break;
458                 }
459             }
460         } else {
461             // FIXME: This should use the shuffling algorithm that we use
462             // for speculative->non-speculative jumps, if we ever discover that
463             // some hot code with lots of live values that get displaced and
464             // spilled really enjoys frequently failing speculation.
465         
466             // For now this code is engineered to be correct but probably not
467             // super. In particular, it correctly handles cases where for example
468             // the displacements are a permutation of the destination values, like
469             //
470             // 1 -> 2
471             // 2 -> 1
472             //
473             // It accomplishes this by simply lifting all of the virtual registers
474             // from their old (DFG JIT) locations and dropping them in a scratch
475             // location in memory, and then transferring from that scratch location
476             // to their new (old JIT) locations.
477         
478             unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
479             for (size_t index = 0; index < operands.size(); ++index) {
480                 const ValueRecovery& recovery = operands[index];
481                 switch (recovery.technique()) {
482                 case DisplacedInJSStack:
483                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
484                     m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
485                     m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
486                     m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
487                     scratchIndex++;
488                     break;
489                 case Int32DisplacedInJSStack:
490                 case CellDisplacedInJSStack:
491                 case BooleanDisplacedInJSStack:
492                     m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
493                     m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
494                     break;
495                 default:
496                     break;
497                 }
498             }
499         
500             scratchIndex = numberOfPoisonedVirtualRegisters;
501             for (size_t index = 0; index < operands.size(); ++index) {
502                 const ValueRecovery& recovery = operands[index];
503                 switch (recovery.technique()) {
504                 case DisplacedInJSStack:
505                     m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
506                     m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
507                     m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
508                     m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
509                     scratchIndex++;
510                     break;
511                 case Int32DisplacedInJSStack:
512                     m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
513                     m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
514                     m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
515                     break;
516                 case CellDisplacedInJSStack:
517                     m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
518                     m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
519                     m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
520                     break;
521                 case BooleanDisplacedInJSStack:
522                     m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
523                     m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
524                     m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
525                     break;
526                 default:
527                     break;
528                 }
529             }
530         
531             ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
532         }
533     }
534     
535     // 9) Dump all poisoned virtual registers.
536     
537     if (numberOfPoisonedVirtualRegisters) {
538         for (int localIndex = 0; localIndex < (int)operands.numberOfLocals(); ++localIndex) {
539             if (!poisonedVirtualRegisters[localIndex])
540                 continue;
541             
542             VirtualRegister virtualRegister = virtualRegisterForLocal(localIndex);
543
544             const ValueRecovery& recovery = operands.local(localIndex);
545             switch (recovery.technique()) {
546             case InGPR:
547             case UnboxedInt32InGPR:
548             case UnboxedBooleanInGPR: {
549                 m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(localIndex)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
550                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(virtualRegister));
551                 uint32_t tag = JSValue::EmptyValueTag;
552                 if (recovery.technique() == InGPR)
553                     tag = JSValue::CellTag;
554                 else if (recovery.technique() == UnboxedInt32InGPR)
555                     tag = JSValue::Int32Tag;
556                 else
557                     tag = JSValue::BooleanTag;
558                 m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor(virtualRegister));
559                 break;
560             }
561
562             case InFPR:
563             case InPair:
564             case UInt32InGPR:
565                 m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(localIndex)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
566                 m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(localIndex)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
567                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(virtualRegister));
568                 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor(virtualRegister));
569                 break;
570                 
571             default:
572                 break;
573             }
574         }
575     }
576     
577     // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
578     //     often.
579
580     if (haveConstants) {
581         if (haveUndefined) {
582             m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0);
583             m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
584         }
585         
586         for (size_t index = 0; index < operands.size(); ++index) {
587             const ValueRecovery& recovery = operands[index];
588             if (recovery.technique() != Constant)
589                 continue;
590             if (recovery.constant().isUndefined()) {
591                 m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
592                 m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
593             } else {
594                 m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
595                 m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
596             }
597         }
598     }
599     
600     // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
601     //     that all new calls into this code will go to the new JIT, so the execute
602     //     counter only affects call frames that performed OSR exit and call frames
603     //     that were still executing the old JIT at the time of another call frame's
604     //     OSR exit. We want to ensure that the following is true:
605     //
606     //     (a) Code the performs an OSR exit gets a chance to reenter optimized
607     //         code eventually, since optimized code is faster. But we don't
608     //         want to do such reentery too aggressively (see (c) below).
609     //
610     //     (b) If there is code on the call stack that is still running the old
611     //         JIT's code and has never OSR'd, then it should get a chance to
612     //         perform OSR entry despite the fact that we've exited.
613     //
614     //     (c) Code the performs an OSR exit should not immediately retry OSR
615     //         entry, since both forms of OSR are expensive. OSR entry is
616     //         particularly expensive.
617     //
618     //     (d) Frequent OSR failures, even those that do not result in the code
619     //         running in a hot loop, result in recompilation getting triggered.
620     //
621     //     To ensure (c), we'd like to set the execute counter to
622     //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
623     //     (a) and (b), since then every OSR exit would delay the opportunity for
624     //     every call frame to perform OSR entry. Essentially, if OSR exit happens
625     //     frequently and the function has few loops, then the counter will never
626     //     become non-negative and OSR entry will never be triggered. OSR entry
627     //     will only happen if a loop gets hot in the old JIT, which does a pretty
628     //     good job of ensuring (a) and (b). But that doesn't take care of (d),
629     //     since each speculation failure would reset the execute counter.
630     //     So we check here if the number of speculation failures is significantly
631     //     larger than the number of successes (we want 90% success rate), and if
632     //     there have been a large enough number of failures. If so, we set the
633     //     counter to 0; otherwise we set the counter to
634     //     counterValueForOptimizeAfterWarmUp().
635     
636     handleExitCounts(m_jit, exit);
637     
638     // 13) Reify inlined call frames.
639     
640     reifyInlinedCallFrames(m_jit, exit);
641     
642     // 14) Create arguments if necessary and place them into the appropriate aliased
643     //     registers.
644     
645     if (haveArguments) {
646         HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
647             NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
648
649         for (size_t index = 0; index < operands.size(); ++index) {
650             const ValueRecovery& recovery = operands[index];
651             if (recovery.technique() != ArgumentsThatWereNotCreated)
652                 continue;
653             int operand = operands.operandForIndex(index);
654             // Find the right inline call frame.
655             InlineCallFrame* inlineCallFrame = 0;
656             for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
657                  current;
658                  current = current->caller.inlineCallFrame) {
659                 if (current->stackOffset <= operand) {
660                     inlineCallFrame = current;
661                     break;
662                 }
663             }
664
665             if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
666                 continue;
667             int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
668             if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
669                 // We know this call frame optimized out an arguments object that
670                 // the baseline JIT would have created. Do that creation now.
671                 if (inlineCallFrame) {
672                     m_jit.setupArgumentsWithExecState(
673                         AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
674                     m_jit.move(
675                         AssemblyHelpers::TrustedImmPtr(
676                             bitwise_cast<void*>(operationCreateInlinedArguments)),
677                         GPRInfo::nonArgGPR0);
678                 } else {
679                     m_jit.setupArgumentsExecState();
680                     m_jit.move(
681                         AssemblyHelpers::TrustedImmPtr(
682                             bitwise_cast<void*>(operationCreateArguments)),
683                         GPRInfo::nonArgGPR0);
684                 }
685                 m_jit.call(GPRInfo::nonArgGPR0);
686                 m_jit.store32(
687                     AssemblyHelpers::TrustedImm32(JSValue::CellTag),
688                     AssemblyHelpers::tagFor(argumentsRegister));
689                 m_jit.store32(
690                     GPRInfo::returnValueGPR,
691                     AssemblyHelpers::payloadFor(argumentsRegister));
692                 m_jit.store32(
693                     AssemblyHelpers::TrustedImm32(JSValue::CellTag),
694                     AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
695                 m_jit.store32(
696                     GPRInfo::returnValueGPR,
697                     AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
698                 m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
699             }
700
701             m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
702             m_jit.store32(
703                 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
704                 AssemblyHelpers::tagFor(operand));
705             m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
706         }
707     }
708     
709     // 15) Load the result of the last bytecode operation into regT0.
710     
711     if (exit.m_lastSetOperand.isValid()) {
712         m_jit.load32(AssemblyHelpers::payloadFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
713         m_jit.load32(AssemblyHelpers::tagFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
714     }
715     
716     // 16) And finish.
717     
718     adjustAndJumpToTarget(m_jit, exit);
719 }
720
721 } } // namespace JSC::DFG
722
723 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)