Rolling out r221832: Regresses Speedometer by ~4% and Dromaeo CSS YUI by ~20%.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExit.cpp
1 /*
2  * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExit.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "DFGGraph.h"
33 #include "DFGMayExit.h"
34 #include "DFGOSRExitCompilerCommon.h"
35 #include "DFGOSRExitPreparation.h"
36 #include "DFGOperations.h"
37 #include "DFGSpeculativeJIT.h"
38 #include "FrameTracers.h"
39 #include "JSCInlines.h"
40 #include "OperandsInlines.h"
41
42 namespace JSC { namespace DFG {
43
44 OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
45     : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
46     , m_jsValueSource(jsValueSource)
47     , m_valueProfile(valueProfile)
48     , m_recoveryIndex(recoveryIndex)
49     , m_streamIndex(streamIndex)
50 {
51     bool canExit = jit->m_origin.exitOK;
52     if (!canExit && jit->m_currentNode) {
53         ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode);
54         canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions;
55     }
56     DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit);
57 }
58
59 void OSRExit::setPatchableCodeOffset(MacroAssembler::PatchableJump check)
60 {
61     m_patchableCodeOffset = check.m_jump.m_label.m_offset;
62 }
63
64 MacroAssembler::Jump OSRExit::getPatchableCodeOffsetAsJump() const
65 {
66     return MacroAssembler::Jump(AssemblerLabel(m_patchableCodeOffset));
67 }
68
69 CodeLocationJump OSRExit::codeLocationForRepatch(CodeBlock* dfgCodeBlock) const
70 {
71     return CodeLocationJump(dfgCodeBlock->jitCode()->dataAddressAtOffset(m_patchableCodeOffset));
72 }
73
74 void OSRExit::correctJump(LinkBuffer& linkBuffer)
75 {
76     MacroAssembler::Label label;
77     label.m_label.m_offset = m_patchableCodeOffset;
78     m_patchableCodeOffset = linkBuffer.offsetOf(label);
79 }
80
81 void OSRExit::emitRestoreArguments(CCallHelpers& jit, const Operands<ValueRecovery>& operands)
82 {
83     HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
84     for (size_t index = 0; index < operands.size(); ++index) {
85         const ValueRecovery& recovery = operands[index];
86         int operand = operands.operandForIndex(index);
87
88         if (recovery.technique() != DirectArgumentsThatWereNotCreated
89             && recovery.technique() != ClonedArgumentsThatWereNotCreated)
90             continue;
91
92         MinifiedID id = recovery.nodeID();
93         auto iter = alreadyAllocatedArguments.find(id);
94         if (iter != alreadyAllocatedArguments.end()) {
95             JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
96             jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
97             jit.storeValue(regs, CCallHelpers::addressFor(operand));
98             continue;
99         }
100
101         InlineCallFrame* inlineCallFrame =
102             jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
103
104         int stackOffset;
105         if (inlineCallFrame)
106             stackOffset = inlineCallFrame->stackOffset;
107         else
108             stackOffset = 0;
109
110         if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
111             jit.loadPtr(
112                 AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
113                 GPRInfo::regT0);
114         } else {
115             jit.move(
116                 AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
117                 GPRInfo::regT0);
118         }
119
120         if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
121             jit.load32(
122                 AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCount),
123                 GPRInfo::regT1);
124         } else {
125             jit.move(
126                 AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis),
127                 GPRInfo::regT1);
128         }
129
130         jit.setupArgumentsWithExecState(
131             AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
132         switch (recovery.technique()) {
133         case DirectArgumentsThatWereNotCreated:
134             jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
135             break;
136         case ClonedArgumentsThatWereNotCreated:
137             jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
138             break;
139         default:
140             RELEASE_ASSERT_NOT_REACHED();
141             break;
142         }
143         jit.call(GPRInfo::nonArgGPR0);
144         jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
145
146         alreadyAllocatedArguments.add(id, operand);
147     }
148 }
149
150 void JIT_OPERATION OSRExit::compileOSRExit(ExecState* exec)
151 {
152     VM* vm = &exec->vm();
153     auto scope = DECLARE_THROW_SCOPE(*vm);
154
155     if (vm->callFrameForCatch)
156         RELEASE_ASSERT(vm->callFrameForCatch == exec);
157
158     CodeBlock* codeBlock = exec->codeBlock();
159     ASSERT(codeBlock);
160     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
161
162     // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
163     // really be profitable.
164     DeferGCForAWhile deferGC(vm->heap);
165
166     uint32_t exitIndex = vm->osrExitIndex;
167     OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
168
169     ASSERT(!vm->callFrameForCatch || exit.m_kind == GenericUnwind);
170     EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
171     
172     prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
173
174     // Compute the value recoveries.
175     Operands<ValueRecovery> operands;
176     codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
177
178     SpeculationRecovery* recovery = 0;
179     if (exit.m_recoveryIndex != UINT_MAX)
180         recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
181
182     {
183         CCallHelpers jit(codeBlock);
184
185         if (exit.m_kind == GenericUnwind) {
186             // We are acting as a defacto op_catch because we arrive here from genericUnwind().
187             // So, we must restore our call frame and stack pointer.
188             jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
189             jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
190         }
191         jit.addPtr(
192             CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
193             GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
194
195         jit.jitAssertHasValidCallFrame();
196
197         if (UNLIKELY(vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
198             Profiler::Database& database = *vm->m_perBytecodeProfiler;
199             Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
200
201             Profiler::OSRExit* profilerExit = compilation->addOSRExit(
202                 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
203                 exit.m_kind, exit.m_kind == UncountableInvalidation);
204             jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
205         }
206
207         compileExit(jit, *vm, exit, operands, recovery);
208
209         LinkBuffer patchBuffer(jit, codeBlock);
210         exit.m_code = FINALIZE_CODE_IF(
211             shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
212             patchBuffer,
213             ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
214                 exitIndex, toCString(exit.m_codeOrigin).data(),
215                 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
216                 toCString(ignoringContext<DumpContext>(operands)).data()));
217     }
218
219     MacroAssembler::repatchJump(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
220
221     vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
222 }
223
224 void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
225 {
226     jit.jitAssertTagsInPlace();
227
228     // Pro-forma stuff.
229     if (Options::printEachOSRExit()) {
230         SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
231         debugInfo->codeBlock = jit.codeBlock();
232         debugInfo->kind = exit.m_kind;
233         debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
234
235         jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
236     }
237
238     // Perform speculation recovery. This only comes into play when an operation
239     // starts mutating state before verifying the speculation it has already made.
240
241     if (recovery) {
242         switch (recovery->type()) {
243         case SpeculativeAdd:
244             jit.sub32(recovery->src(), recovery->dest());
245 #if USE(JSVALUE64)
246             jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
247 #endif
248             break;
249
250         case SpeculativeAddImmediate:
251             jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest());
252 #if USE(JSVALUE64)
253             jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
254 #endif
255             break;
256
257         case BooleanSpeculationCheck:
258 #if USE(JSVALUE64)
259             jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
260 #endif
261             break;
262
263         default:
264             break;
265         }
266     }
267
268     // Refine some array and/or value profile, if appropriate.
269
270     if (!!exit.m_jsValueSource) {
271         if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
272             // If the instruction that this originated from has an array profile, then
273             // refine it. If it doesn't, then do nothing. The latter could happen for
274             // hoisted checks, or checks emitted for operations that didn't have array
275             // profiling - either ops that aren't array accesses at all, or weren't
276             // known to be array acceses in the bytecode. The latter case is a FIXME
277             // while the former case is an outcome of a CheckStructure not knowing why
278             // it was emitted (could be either due to an inline cache of a property
279             // property access, or due to an array profile).
280
281             CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
282             if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
283 #if USE(JSVALUE64)
284                 GPRReg usedRegister;
285                 if (exit.m_jsValueSource.isAddress())
286                     usedRegister = exit.m_jsValueSource.base();
287                 else
288                     usedRegister = exit.m_jsValueSource.gpr();
289 #else
290                 GPRReg usedRegister1;
291                 GPRReg usedRegister2;
292                 if (exit.m_jsValueSource.isAddress()) {
293                     usedRegister1 = exit.m_jsValueSource.base();
294                     usedRegister2 = InvalidGPRReg;
295                 } else {
296                     usedRegister1 = exit.m_jsValueSource.payloadGPR();
297                     if (exit.m_jsValueSource.hasKnownTag())
298                         usedRegister2 = InvalidGPRReg;
299                     else
300                         usedRegister2 = exit.m_jsValueSource.tagGPR();
301                 }
302 #endif
303
304                 GPRReg scratch1;
305                 GPRReg scratch2;
306 #if USE(JSVALUE64)
307                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
308                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
309 #else
310                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
311                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
312 #endif
313
314                 if (isARM64()) {
315                     jit.pushToSave(scratch1);
316                     jit.pushToSave(scratch2);
317                 } else {
318                     jit.push(scratch1);
319                     jit.push(scratch2);
320                 }
321
322                 GPRReg value;
323                 if (exit.m_jsValueSource.isAddress()) {
324                     value = scratch1;
325                     jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
326                 } else
327                     value = exit.m_jsValueSource.payloadGPR();
328
329                 jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
330                 jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
331 #if USE(JSVALUE64)
332                 jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeAndMiscOffset()), scratch1);
333 #else
334                 jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeIncludingHistoryOffset()), scratch1);
335 #endif
336                 jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
337                 jit.lshift32(scratch1, scratch2);
338                 jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
339
340                 if (isARM64()) {
341                     jit.popToRestore(scratch2);
342                     jit.popToRestore(scratch1);
343                 } else {
344                     jit.pop(scratch2);
345                     jit.pop(scratch1);
346                 }
347             }
348         }
349
350         if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) {
351 #if USE(JSVALUE64)
352             if (exit.m_jsValueSource.isAddress()) {
353                 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
354                 // since we know how to restore it.
355                 jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
356                 profile.emitReportValue(jit, JSValueRegs(GPRInfo::tagTypeNumberRegister));
357                 jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
358             } else
359                 profile.emitReportValue(jit, JSValueRegs(exit.m_jsValueSource.gpr()));
360 #else // not USE(JSVALUE64)
361             if (exit.m_jsValueSource.isAddress()) {
362                 // Save a register so we can use it.
363                 GPRReg scratchPayload = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
364                 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base(), scratchPayload);
365                 jit.pushToSave(scratchPayload);
366                 jit.pushToSave(scratchTag);
367
368                 JSValueRegs scratch(scratchTag, scratchPayload);
369                 
370                 jit.loadValue(exit.m_jsValueSource.asAddress(), scratch);
371                 profile.emitReportValue(jit, scratch);
372                 
373                 jit.popToRestore(scratchTag);
374                 jit.popToRestore(scratchPayload);
375             } else if (exit.m_jsValueSource.hasKnownTag()) {
376                 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.payloadGPR());
377                 jit.pushToSave(scratchTag);
378                 jit.move(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), scratchTag);
379                 JSValueRegs value(scratchTag, exit.m_jsValueSource.payloadGPR());
380                 profile.emitReportValue(jit, value);
381                 jit.popToRestore(scratchTag);
382             } else
383                 profile.emitReportValue(jit, exit.m_jsValueSource.regs());
384 #endif // USE(JSVALUE64)
385         }
386     }
387
388     // What follows is an intentionally simple OSR exit implementation that generates
389     // fairly poor code but is very easy to hack. In particular, it dumps all state that
390     // needs conversion into a scratch buffer so that in step 6, where we actually do the
391     // conversions, we know that all temp registers are free to use and the variable is
392     // definitely in a well-known spot in the scratch buffer regardless of whether it had
393     // originally been in a register or spilled. This allows us to decouple "where was
394     // the variable" from "how was it represented". Consider that the
395     // Int32DisplacedInJSStack recovery: it tells us that the value is in a
396     // particular place and that that place holds an unboxed int32. We have two different
397     // places that a value could be (displaced, register) and a bunch of different
398     // ways of representing a value. The number of recoveries is two * a bunch. The code
399     // below means that we have to have two + a bunch cases rather than two * a bunch.
400     // Once we have loaded the value from wherever it was, the reboxing is the same
401     // regardless of its location. Likewise, before we do the reboxing, the way we get to
402     // the value (i.e. where we load it from) is the same regardless of its type. Because
403     // the code below always dumps everything into a scratch buffer first, the two
404     // questions become orthogonal, which simplifies adding new types and adding new
405     // locations.
406     //
407     // This raises the question: does using such a suboptimal implementation of OSR exit,
408     // where we always emit code to dump all state into a scratch buffer only to then
409     // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
410     // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
411     // taken more than ~100 times, we jettison the DFG code block along with all of its
412     // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
413     // execute frequently enough for the codegen to matter that much. It probably matters
414     // enough that we don't want to turn this into some super-slow function call, but so
415     // long as we're generating straight-line code, that code can be pretty bad. Also
416     // because we tend to exit only along one OSR exit from any DFG code block - that's an
417     // empirical result that we're extremely confident about - the code size of this
418     // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
419     // harmful to the system: it probably won't reduce either net memory usage or net
420     // execution time. It will only prevent us from cleanly decoupling "where was the
421     // variable" from "how was it represented", which will make it more difficult to add
422     // features in the future and it will make it harder to reason about bugs.
423
424     // Save all state from GPRs into the scratch buffer.
425
426     ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
427     EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
428
429     for (size_t index = 0; index < operands.size(); ++index) {
430         const ValueRecovery& recovery = operands[index];
431
432         switch (recovery.technique()) {
433         case UnboxedInt32InGPR:
434         case UnboxedCellInGPR:
435 #if USE(JSVALUE64)
436         case InGPR:
437         case UnboxedInt52InGPR:
438         case UnboxedStrictInt52InGPR:
439             jit.store64(recovery.gpr(), scratch + index);
440             break;
441 #else
442         case UnboxedBooleanInGPR:
443             jit.store32(
444                 recovery.gpr(),
445                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
446             break;
447             
448         case InPair:
449             jit.store32(
450                 recovery.tagGPR(),
451                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
452             jit.store32(
453                 recovery.payloadGPR(),
454                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
455             break;
456 #endif
457
458         default:
459             break;
460         }
461     }
462
463     // And voila, all GPRs are free to reuse.
464
465     // Save all state from FPRs into the scratch buffer.
466
467     for (size_t index = 0; index < operands.size(); ++index) {
468         const ValueRecovery& recovery = operands[index];
469
470         switch (recovery.technique()) {
471         case UnboxedDoubleInFPR:
472         case InFPR:
473             jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
474             jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
475             break;
476
477         default:
478             break;
479         }
480     }
481
482     // Now, all FPRs are also free.
483
484     // Save all state from the stack into the scratch buffer. For simplicity we
485     // do this even for state that's already in the right place on the stack.
486     // It makes things simpler later.
487
488     for (size_t index = 0; index < operands.size(); ++index) {
489         const ValueRecovery& recovery = operands[index];
490
491         switch (recovery.technique()) {
492         case DisplacedInJSStack:
493         case CellDisplacedInJSStack:
494         case BooleanDisplacedInJSStack:
495         case Int32DisplacedInJSStack:
496         case DoubleDisplacedInJSStack:
497 #if USE(JSVALUE64)
498         case Int52DisplacedInJSStack:
499         case StrictInt52DisplacedInJSStack:
500             jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
501             jit.store64(GPRInfo::regT0, scratch + index);
502             break;
503 #else
504             jit.load32(
505                 AssemblyHelpers::tagFor(recovery.virtualRegister()),
506                 GPRInfo::regT0);
507             jit.load32(
508                 AssemblyHelpers::payloadFor(recovery.virtualRegister()),
509                 GPRInfo::regT1);
510             jit.store32(
511                 GPRInfo::regT0,
512                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
513             jit.store32(
514                 GPRInfo::regT1,
515                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
516             break;
517 #endif
518
519         default:
520             break;
521         }
522     }
523
524     // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
525     // could toast some stack that the DFG used. We need to do it before storing to stack offsets
526     // used by baseline.
527     jit.addPtr(
528         CCallHelpers::TrustedImm32(
529             -jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
530         CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
531
532     // Restore the DFG callee saves and then save the ones the baseline JIT uses.
533     jit.emitRestoreCalleeSaves();
534     jit.emitSaveCalleeSavesFor(jit.baselineCodeBlock());
535
536     // The tag registers are needed to materialize recoveries below.
537     jit.emitMaterializeTagCheckRegisters();
538
539     if (exit.isExceptionHandler())
540         jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
541
542     // Do all data format conversions and store the results into the stack.
543
544     for (size_t index = 0; index < operands.size(); ++index) {
545         const ValueRecovery& recovery = operands[index];
546         VirtualRegister reg = operands.virtualRegisterForIndex(index);
547
548         if (reg.isLocal() && reg.toLocal() < static_cast<int>(jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters()))
549             continue;
550
551         int operand = reg.offset();
552
553         switch (recovery.technique()) {
554         case DisplacedInJSStack:
555         case InFPR:
556 #if USE(JSVALUE64)
557         case InGPR:
558         case UnboxedCellInGPR:
559         case CellDisplacedInJSStack:
560         case BooleanDisplacedInJSStack:
561             jit.load64(scratch + index, GPRInfo::regT0);
562             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
563             break;
564 #else // not USE(JSVALUE64)
565         case InPair:
566             jit.load32(
567                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
568                 GPRInfo::regT0);
569             jit.load32(
570                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
571                 GPRInfo::regT1);
572             jit.store32(
573                 GPRInfo::regT0,
574                 AssemblyHelpers::tagFor(operand));
575             jit.store32(
576                 GPRInfo::regT1,
577                 AssemblyHelpers::payloadFor(operand));
578             break;
579
580         case UnboxedCellInGPR:
581         case CellDisplacedInJSStack:
582             jit.load32(
583                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
584                 GPRInfo::regT0);
585             jit.store32(
586                 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
587                 AssemblyHelpers::tagFor(operand));
588             jit.store32(
589                 GPRInfo::regT0,
590                 AssemblyHelpers::payloadFor(operand));
591             break;
592
593         case UnboxedBooleanInGPR:
594         case BooleanDisplacedInJSStack:
595             jit.load32(
596                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
597                 GPRInfo::regT0);
598             jit.store32(
599                 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
600                 AssemblyHelpers::tagFor(operand));
601             jit.store32(
602                 GPRInfo::regT0,
603                 AssemblyHelpers::payloadFor(operand));
604             break;
605 #endif // USE(JSVALUE64)
606
607         case UnboxedInt32InGPR:
608         case Int32DisplacedInJSStack:
609 #if USE(JSVALUE64)
610             jit.load64(scratch + index, GPRInfo::regT0);
611             jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
612             jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
613             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
614 #else
615             jit.load32(
616                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
617                 GPRInfo::regT0);
618             jit.store32(
619                 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
620                 AssemblyHelpers::tagFor(operand));
621             jit.store32(
622                 GPRInfo::regT0,
623                 AssemblyHelpers::payloadFor(operand));
624 #endif
625             break;
626
627 #if USE(JSVALUE64)
628         case UnboxedInt52InGPR:
629         case Int52DisplacedInJSStack:
630             jit.load64(scratch + index, GPRInfo::regT0);
631             jit.rshift64(
632                 AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
633             jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
634             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
635             break;
636
637         case UnboxedStrictInt52InGPR:
638         case StrictInt52DisplacedInJSStack:
639             jit.load64(scratch + index, GPRInfo::regT0);
640             jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
641             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
642             break;
643 #endif
644
645         case UnboxedDoubleInFPR:
646         case DoubleDisplacedInJSStack:
647             jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
648             jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
649             jit.purifyNaN(FPRInfo::fpRegT0);
650 #if USE(JSVALUE64)
651             jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
652             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
653 #else
654             jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
655 #endif
656             break;
657
658         case Constant:
659 #if USE(JSVALUE64)
660             jit.store64(
661                 AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
662                 AssemblyHelpers::addressFor(operand));
663 #else
664             jit.store32(
665                 AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
666                 AssemblyHelpers::tagFor(operand));
667             jit.store32(
668                 AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
669                 AssemblyHelpers::payloadFor(operand));
670 #endif
671             break;
672
673         case DirectArgumentsThatWereNotCreated:
674         case ClonedArgumentsThatWereNotCreated:
675             // Don't do this, yet.
676             break;
677
678         default:
679             RELEASE_ASSERT_NOT_REACHED();
680             break;
681         }
682     }
683
684     // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
685     // recoveries don't recursively refer to each other. But, we don't try to assume that they only
686     // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
687     // Note that we also roughly assume that the arguments might still be materialized outside of its
688     // inline call frame scope - but for now the DFG wouldn't do that.
689
690     emitRestoreArguments(jit, operands);
691
692     // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
693     // that all new calls into this code will go to the new JIT, so the execute
694     // counter only affects call frames that performed OSR exit and call frames
695     // that were still executing the old JIT at the time of another call frame's
696     // OSR exit. We want to ensure that the following is true:
697     //
698     // (a) Code the performs an OSR exit gets a chance to reenter optimized
699     //     code eventually, since optimized code is faster. But we don't
700     //     want to do such reentery too aggressively (see (c) below).
701     //
702     // (b) If there is code on the call stack that is still running the old
703     //     JIT's code and has never OSR'd, then it should get a chance to
704     //     perform OSR entry despite the fact that we've exited.
705     //
706     // (c) Code the performs an OSR exit should not immediately retry OSR
707     //     entry, since both forms of OSR are expensive. OSR entry is
708     //     particularly expensive.
709     //
710     // (d) Frequent OSR failures, even those that do not result in the code
711     //     running in a hot loop, result in recompilation getting triggered.
712     //
713     // To ensure (c), we'd like to set the execute counter to
714     // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
715     // (a) and (b), since then every OSR exit would delay the opportunity for
716     // every call frame to perform OSR entry. Essentially, if OSR exit happens
717     // frequently and the function has few loops, then the counter will never
718     // become non-negative and OSR entry will never be triggered. OSR entry
719     // will only happen if a loop gets hot in the old JIT, which does a pretty
720     // good job of ensuring (a) and (b). But that doesn't take care of (d),
721     // since each speculation failure would reset the execute counter.
722     // So we check here if the number of speculation failures is significantly
723     // larger than the number of successes (we want 90% success rate), and if
724     // there have been a large enough number of failures. If so, we set the
725     // counter to 0; otherwise we set the counter to
726     // counterValueForOptimizeAfterWarmUp().
727
728     handleExitCounts(jit, exit);
729
730     // Reify inlined call frames.
731
732     reifyInlinedCallFrames(jit, exit);
733
734     // And finish.
735     adjustAndJumpToTarget(vm, jit, exit);
736 }
737
738 void JIT_OPERATION OSRExit::debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw, void* scratch)
739 {
740     VM* vm = &exec->vm();
741     NativeCallFrameTracer tracer(vm, exec);
742
743     SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
744     CodeBlock* codeBlock = debugInfo->codeBlock;
745     CodeBlock* alternative = codeBlock->alternative();
746     dataLog("Speculation failure in ", *codeBlock);
747     dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
748     if (alternative) {
749         dataLog(
750             "executeCounter = ", alternative->jitExecuteCounter(),
751             ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
752             ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
753     } else
754         dataLog("no alternative code block (i.e. we've been jettisoned)");
755     dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
756     dataLog("    GPRs at time of exit:");
757     char* scratchPointer = static_cast<char*>(scratch);
758     for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
759         GPRReg gpr = GPRInfo::toRegister(i);
760         dataLog(" ", GPRInfo::debugName(gpr), ":", RawPointer(*reinterpret_cast_ptr<void**>(scratchPointer)));
761         scratchPointer += sizeof(EncodedJSValue);
762     }
763     dataLog("\n");
764     dataLog("    FPRs at time of exit:");
765     for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
766         FPRReg fpr = FPRInfo::toRegister(i);
767         dataLog(" ", FPRInfo::debugName(fpr), ":");
768         uint64_t bits = *reinterpret_cast_ptr<uint64_t*>(scratchPointer);
769         double value = *reinterpret_cast_ptr<double*>(scratchPointer);
770         dataLogF("%llx:%lf", static_cast<long long>(bits), value);
771         scratchPointer += sizeof(EncodedJSValue);
772     }
773     dataLog("\n");
774 }
775
776 } } // namespace JSC::DFG
777
778 #endif // ENABLE(DFG_JIT)