Unreviewed, reland r223866
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExit.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExit.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "ClonedArguments.h"
33 #include "DFGGraph.h"
34 #include "DFGMayExit.h"
35 #include "DFGOSRExitCompilerCommon.h"
36 #include "DFGOSRExitPreparation.h"
37 #include "DFGOperations.h"
38 #include "DFGSpeculativeJIT.h"
39 #include "DirectArguments.h"
40 #include "FrameTracers.h"
41 #include "InlineCallFrame.h"
42 #include "JSCInlines.h"
43 #include "JSCJSValue.h"
44 #include "OperandsInlines.h"
45 #include "ProbeContext.h"
46 #include "ProbeFrame.h"
47
48 namespace JSC { namespace DFG {
49
50 // Probe based OSR Exit.
51
52 using CPUState = Probe::CPUState;
53 using Context = Probe::Context;
54 using Frame = Probe::Frame;
55
56 static void reifyInlinedCallFrames(Probe::Context&, CodeBlock* baselineCodeBlock, const OSRExitBase&);
57 static void adjustAndJumpToTarget(Probe::Context&, VM&, CodeBlock*, CodeBlock* baselineCodeBlock, OSRExit&);
58 static void printOSRExit(Context&, uint32_t osrExitIndex, const OSRExit&);
59
60 static JSValue jsValueFor(CPUState& cpu, JSValueSource source)
61 {
62     if (source.isAddress()) {
63         JSValue result;
64         std::memcpy(&result, cpu.gpr<uint8_t*>(source.base()) + source.offset(), sizeof(JSValue));
65         return result;
66     }
67 #if USE(JSVALUE64)
68     return JSValue::decode(cpu.gpr<EncodedJSValue>(source.gpr()));
69 #else
70     if (source.hasKnownTag())
71         return JSValue(source.tag(), cpu.gpr<int32_t>(source.payloadGPR()));
72     return JSValue(cpu.gpr<int32_t>(source.tagGPR()), cpu.gpr<int32_t>(source.payloadGPR()));
73 #endif
74 }
75
76 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
77
78 static_assert(is64Bit(), "we only support callee save registers on 64-bit");
79
80 // Based on AssemblyHelpers::emitRestoreCalleeSavesFor().
81 static void restoreCalleeSavesFor(Context& context, CodeBlock* codeBlock)
82 {
83     ASSERT(codeBlock);
84
85     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
86     RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
87     unsigned registerCount = calleeSaves->size();
88
89     uintptr_t* physicalStackFrame = context.fp<uintptr_t*>();
90     for (unsigned i = 0; i < registerCount; i++) {
91         RegisterAtOffset entry = calleeSaves->at(i);
92         if (dontRestoreRegisters.get(entry.reg()))
93             continue;
94         // The callee saved values come from the original stack, not the recovered stack.
95         // Hence, we read the values directly from the physical stack memory instead of
96         // going through context.stack().
97         ASSERT(!(entry.offset() % sizeof(uintptr_t)));
98         context.gpr(entry.reg().gpr()) = physicalStackFrame[entry.offset() / sizeof(uintptr_t)];
99     }
100 }
101
102 // Based on AssemblyHelpers::emitSaveCalleeSavesFor().
103 static void saveCalleeSavesFor(Context& context, CodeBlock* codeBlock)
104 {
105     auto& stack = context.stack();
106     ASSERT(codeBlock);
107
108     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
109     RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
110     unsigned registerCount = calleeSaves->size();
111
112     for (unsigned i = 0; i < registerCount; i++) {
113         RegisterAtOffset entry = calleeSaves->at(i);
114         if (dontSaveRegisters.get(entry.reg()))
115             continue;
116         stack.set(context.fp(), entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
117     }
118 }
119
120 // Based on AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer().
121 static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context& context)
122 {
123     VM& vm = *context.arg<VM*>();
124
125     RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
126     RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
127     unsigned registerCount = allCalleeSaves->size();
128
129     VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
130     uintptr_t* calleeSaveBuffer = reinterpret_cast<uintptr_t*>(entryRecord->calleeSaveRegistersBuffer);
131
132     // Restore all callee saves.
133     for (unsigned i = 0; i < registerCount; i++) {
134         RegisterAtOffset entry = allCalleeSaves->at(i);
135         if (dontRestoreRegisters.get(entry.reg()))
136             continue;
137         size_t uintptrOffset = entry.offset() / sizeof(uintptr_t);
138         if (entry.reg().isGPR())
139             context.gpr(entry.reg().gpr()) = calleeSaveBuffer[uintptrOffset];
140         else
141             context.fpr(entry.reg().fpr()) = bitwise_cast<double>(calleeSaveBuffer[uintptrOffset]);
142     }
143 }
144
145 // Based on AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer().
146 static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context& context)
147 {
148     VM& vm = *context.arg<VM*>();
149     auto& stack = context.stack();
150
151     VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
152     void* calleeSaveBuffer = entryRecord->calleeSaveRegistersBuffer;
153
154     RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
155     RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
156     unsigned registerCount = allCalleeSaves->size();
157
158     for (unsigned i = 0; i < registerCount; i++) {
159         RegisterAtOffset entry = allCalleeSaves->at(i);
160         if (dontCopyRegisters.get(entry.reg()))
161             continue;
162         if (entry.reg().isGPR())
163             stack.set(calleeSaveBuffer, entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
164         else
165             stack.set(calleeSaveBuffer, entry.offset(), context.fpr<uintptr_t>(entry.reg().fpr()));
166     }
167 }
168
169 // Based on AssemblyHelpers::emitSaveOrCopyCalleeSavesFor().
170 static void saveOrCopyCalleeSavesFor(Context& context, CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, bool wasCalledViaTailCall)
171 {
172     Frame frame(context.fp(), context.stack());
173     ASSERT(codeBlock);
174
175     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
176     RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
177     unsigned registerCount = calleeSaves->size();
178
179     RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
180
181     for (unsigned i = 0; i < registerCount; i++) {
182         RegisterAtOffset entry = calleeSaves->at(i);
183         if (dontSaveRegisters.get(entry.reg()))
184             continue;
185
186         uintptr_t savedRegisterValue;
187
188         if (wasCalledViaTailCall && baselineCalleeSaves.get(entry.reg()))
189             savedRegisterValue = frame.get<uintptr_t>(entry.offset());
190         else
191             savedRegisterValue = context.gpr(entry.reg().gpr());
192
193         frame.set(offsetVirtualRegister.offsetInBytes() + entry.offset(), savedRegisterValue);
194     }
195 }
196 #else // not NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
197
198 static void restoreCalleeSavesFor(Context&, CodeBlock*) { }
199 static void saveCalleeSavesFor(Context&, CodeBlock*) { }
200 static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context&) { }
201 static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context&) { }
202 static void saveOrCopyCalleeSavesFor(Context&, CodeBlock*, VirtualRegister, bool) { }
203
204 #endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
205
206 static JSCell* createDirectArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
207 {
208     VM& vm = *context.arg<VM*>();
209
210     ASSERT(vm.heap.isDeferred());
211
212     if (inlineCallFrame)
213         codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
214
215     unsigned length = argumentCount - 1;
216     unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
217     DirectArguments* result = DirectArguments::create(
218         vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
219
220     result->callee().set(vm, result, callee);
221
222     void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
223     Frame frame(frameBase, context.stack());
224     for (unsigned i = length; i--;)
225         result->setIndexQuickly(vm, i, frame.argument(i));
226
227     return result;
228 }
229
230 static JSCell* createClonedArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
231 {
232     VM& vm = *context.arg<VM*>();
233     ExecState* exec = context.fp<ExecState*>();
234
235     ASSERT(vm.heap.isDeferred());
236
237     if (inlineCallFrame)
238         codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
239
240     unsigned length = argumentCount - 1;
241     ClonedArguments* result = ClonedArguments::createEmpty(
242         vm, codeBlock->globalObject()->clonedArgumentsStructure(), callee, length);
243
244     void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
245     Frame frame(frameBase, context.stack());
246     for (unsigned i = length; i--;)
247         result->putDirectIndex(exec, i, frame.argument(i));
248     return result;
249 }
250
251 static void emitRestoreArguments(Context& context, CodeBlock* codeBlock, DFG::JITCode* dfgJITCode, const Operands<ValueRecovery>& operands)
252 {
253     Frame frame(context.fp(), context.stack());
254
255     HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
256     for (size_t index = 0; index < operands.size(); ++index) {
257         const ValueRecovery& recovery = operands[index];
258         int operand = operands.operandForIndex(index);
259
260         if (recovery.technique() != DirectArgumentsThatWereNotCreated
261             && recovery.technique() != ClonedArgumentsThatWereNotCreated)
262             continue;
263
264         MinifiedID id = recovery.nodeID();
265         auto iter = alreadyAllocatedArguments.find(id);
266         if (iter != alreadyAllocatedArguments.end()) {
267             frame.setOperand(operand, frame.operand(iter->value));
268             continue;
269         }
270
271         InlineCallFrame* inlineCallFrame =
272             dfgJITCode->minifiedDFG.at(id)->inlineCallFrame();
273
274         int stackOffset;
275         if (inlineCallFrame)
276             stackOffset = inlineCallFrame->stackOffset;
277         else
278             stackOffset = 0;
279
280         JSFunction* callee;
281         if (!inlineCallFrame || inlineCallFrame->isClosureCall)
282             callee = jsCast<JSFunction*>(frame.operand(stackOffset + CallFrameSlot::callee).asCell());
283         else
284             callee = jsCast<JSFunction*>(inlineCallFrame->calleeRecovery.constant().asCell());
285
286         int32_t argumentCount;
287         if (!inlineCallFrame || inlineCallFrame->isVarargs())
288             argumentCount = frame.operand<int32_t>(stackOffset + CallFrameSlot::argumentCount, PayloadOffset);
289         else
290             argumentCount = inlineCallFrame->argumentCountIncludingThis;
291
292         JSCell* argumentsObject;
293         switch (recovery.technique()) {
294         case DirectArgumentsThatWereNotCreated:
295             argumentsObject = createDirectArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
296             break;
297         case ClonedArgumentsThatWereNotCreated:
298             argumentsObject = createClonedArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
299             break;
300         default:
301             RELEASE_ASSERT_NOT_REACHED();
302             break;
303         }
304         frame.setOperand(operand, JSValue(argumentsObject));
305
306         alreadyAllocatedArguments.add(id, operand);
307     }
308 }
309
310 // The following is a list of extra initializations that need to be done in order
311 // of most likely needed (lower enum value) to least likely needed (higher enum value).
312 // Each level initialization includes the previous lower enum value (see use of the
313 // extraInitializationLevel value below).
314 enum class ExtraInitializationLevel {
315     None,
316     SpeculationRecovery,
317     ValueProfileUpdate,
318     ArrayProfileUpdate,
319     Other
320 };
321
322 void OSRExit::executeOSRExit(Context& context)
323 {
324     VM& vm = *context.arg<VM*>();
325     auto scope = DECLARE_THROW_SCOPE(vm);
326
327     ExecState* exec = context.fp<ExecState*>();
328     ASSERT(&exec->vm() == &vm);
329     auto& cpu = context.cpu;
330
331     if (vm.callFrameForCatch) {
332         exec = vm.callFrameForCatch;
333         context.fp() = exec;
334     }
335
336     CodeBlock* codeBlock = exec->codeBlock();
337     ASSERT(codeBlock);
338     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
339
340     // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
341     // really be profitable.
342     DeferGCForAWhile deferGC(vm.heap);
343
344     uint32_t exitIndex = vm.osrExitIndex;
345     DFG::JITCode* dfgJITCode = codeBlock->jitCode()->dfg();
346     OSRExit& exit = dfgJITCode->osrExit[exitIndex];
347
348     ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
349     EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
350
351     if (UNLIKELY(!exit.exitState)) {
352         ExtraInitializationLevel extraInitializationLevel = ExtraInitializationLevel::None;
353
354         // We only need to execute this block once for each OSRExit record. The computed
355         // results will be cached in the OSRExitState record for use of the rest of the
356         // exit ramp code.
357
358         // Ensure we have baseline codeBlocks to OSR exit to.
359         prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
360
361         CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
362         ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
363
364         SpeculationRecovery* recovery = nullptr;
365         if (exit.m_recoveryIndex != UINT_MAX) {
366             recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex];
367             extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::SpeculationRecovery);
368         }
369
370         if (UNLIKELY(exit.m_kind == GenericUnwind))
371             extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
372
373         ArrayProfile* arrayProfile = nullptr;
374         if (!!exit.m_jsValueSource) {
375             if (exit.m_valueProfile)
376                 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ValueProfileUpdate);
377             if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
378                 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
379                 CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
380                 arrayProfile = profiledCodeBlock->getArrayProfile(codeOrigin.bytecodeIndex);
381                 if (arrayProfile)
382                     extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ArrayProfileUpdate);
383             }
384         }
385
386         int32_t activeThreshold = baselineCodeBlock->adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp());
387         double adjustedThreshold = applyMemoryUsageHeuristicsAndConvertToInt(activeThreshold, baselineCodeBlock);
388         ASSERT(adjustedThreshold > 0);
389         adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
390
391         CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
392         Vector<BytecodeAndMachineOffset> decodedCodeMap;
393         codeBlockForExit->jitCodeMap()->decode(decodedCodeMap);
394
395         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
396
397         ASSERT(mapping);
398         ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
399
400         void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
401
402         // Compute the value recoveries.
403         Operands<ValueRecovery> operands;
404         Vector<UndefinedOperandSpan> undefinedOperandSpans;
405         unsigned numVariables = dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands, &undefinedOperandSpans);
406         ptrdiff_t stackPointerOffset = -static_cast<ptrdiff_t>(numVariables) * sizeof(Register);
407
408         exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile));
409
410         if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
411             Profiler::Database& database = *vm.m_perBytecodeProfiler;
412             Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
413
414             Profiler::OSRExit* profilerExit = compilation->addOSRExit(
415                 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
416                 exit.m_kind, exit.m_kind == UncountableInvalidation);
417             exit.exitState->profilerExit = profilerExit;
418             extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
419         }
420
421         if (UNLIKELY(Options::printEachOSRExit()))
422             extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
423
424         exit.exitState->extraInitializationLevel = extraInitializationLevel;
425
426         if (UNLIKELY(Options::verboseOSR() || Options::verboseDFGOSRExit())) {
427             dataLogF("DFG OSR exit #%u (%s, %s) from %s, with operands = %s\n",
428                 exitIndex, toCString(exit.m_codeOrigin).data(),
429                 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
430                 toCString(ignoringContext<DumpContext>(operands)).data());
431         }
432     }
433
434     OSRExitState& exitState = *exit.exitState.get();
435     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
436     ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
437
438     Operands<ValueRecovery>& operands = exitState.operands;
439     Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans;
440
441     context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
442
443     // The only reason for using this do while look is so we can break out midway when appropriate.
444     do {
445         auto extraInitializationLevel = static_cast<ExtraInitializationLevel>(exitState.extraInitializationLevel);
446
447         if (extraInitializationLevel == ExtraInitializationLevel::None) {
448             context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
449             break;
450         }
451
452         // Begin extra initilization level: SpeculationRecovery
453
454         // We need to do speculation recovery first because array profiling and value profiling
455         // may rely on a value that it recovers. However, that doesn't mean that it is likely
456         // to have a recovery value. So, we'll decorate it as UNLIKELY.
457         SpeculationRecovery* recovery = exitState.recovery;
458         if (UNLIKELY(recovery)) {
459             switch (recovery->type()) {
460             case SpeculativeAdd:
461                 cpu.gpr(recovery->dest()) = cpu.gpr<uint32_t>(recovery->dest()) - cpu.gpr<uint32_t>(recovery->src());
462 #if USE(JSVALUE64)
463                 ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
464                 cpu.gpr(recovery->dest()) |= TagTypeNumber;
465 #endif
466                 break;
467
468             case SpeculativeAddImmediate:
469                 cpu.gpr(recovery->dest()) = (cpu.gpr<uint32_t>(recovery->dest()) - recovery->immediate());
470 #if USE(JSVALUE64)
471                 ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
472                 cpu.gpr(recovery->dest()) |= TagTypeNumber;
473 #endif
474                 break;
475
476             case BooleanSpeculationCheck:
477 #if USE(JSVALUE64)
478                 cpu.gpr(recovery->dest()) = cpu.gpr(recovery->dest()) ^ ValueFalse;
479 #endif
480                 break;
481
482             default:
483                 break;
484             }
485         }
486         if (extraInitializationLevel <= ExtraInitializationLevel::SpeculationRecovery)
487             break;
488
489         // Begin extra initilization level: ValueProfileUpdate
490         JSValue profiledValue;
491         if (!!exit.m_jsValueSource) {
492             profiledValue = jsValueFor(cpu, exit.m_jsValueSource);
493             if (MethodOfGettingAValueProfile profile = exit.m_valueProfile)
494                 profile.reportValue(profiledValue);
495         }
496         if (extraInitializationLevel <= ExtraInitializationLevel::ValueProfileUpdate)
497             break;
498
499         // Begin extra initilization level: ArrayProfileUpdate
500         ArrayProfile* arrayProfile = exitState.arrayProfile;
501         if (arrayProfile) {
502             ASSERT(!!exit.m_jsValueSource);
503             ASSERT(exit.m_kind == BadCache || exit.m_kind == BadIndexingType);
504             Structure* structure = profiledValue.asCell()->structure(vm);
505             arrayProfile->observeStructure(structure);
506             arrayProfile->observeArrayMode(asArrayModes(structure->indexingType()));
507         }
508         if (extraInitializationLevel <= ExtraInitializationLevel::ArrayProfileUpdate)
509             break;
510
511         // Begin Extra initilization level: Other
512         if (UNLIKELY(exit.m_kind == GenericUnwind)) {
513             // We are acting as a defacto op_catch because we arrive here from genericUnwind().
514             // So, we must restore our call frame and stack pointer.
515             restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(context);
516             ASSERT(context.fp() == vm.callFrameForCatch);
517         }
518
519         if (exitState.profilerExit)
520             exitState.profilerExit->incCount();
521
522         if (UNLIKELY(Options::printEachOSRExit()))
523             printOSRExit(context, vm.osrExitIndex, exit);
524
525     } while (false); // End extra initialization.
526
527     Frame frame(cpu.fp(), context.stack());
528     ASSERT(!(context.fp<uintptr_t>() & 0x7));
529
530 #if USE(JSVALUE64)
531     ASSERT(cpu.gpr(GPRInfo::tagTypeNumberRegister) == TagTypeNumber);
532     ASSERT(cpu.gpr(GPRInfo::tagMaskRegister) == TagMask);
533 #endif
534
535     // Do all data format conversions and store the results into the stack.
536     // Note: we need to recover values before restoring callee save registers below
537     // because the recovery may rely on values in some of callee save registers.
538
539     int calleeSaveSpaceAsVirtualRegisters = static_cast<int>(baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters());
540     size_t numberOfOperands = operands.size();
541     size_t numUndefinedOperandSpans = undefinedOperandSpans.size();
542
543     size_t nextUndefinedSpanIndex = 0;
544     size_t nextUndefinedOperandIndex = numberOfOperands;
545     if (numUndefinedOperandSpans)
546         nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
547
548     JSValue undefined = jsUndefined();
549     for (size_t spanIndex = 0; spanIndex < numUndefinedOperandSpans; ++spanIndex) {
550         auto& span = undefinedOperandSpans[spanIndex];
551         int firstOffset = span.minOffset;
552         int lastOffset = firstOffset + span.numberOfRegisters;
553
554         for (int offset = firstOffset; offset < lastOffset; ++offset)
555             frame.setOperand(offset, undefined);
556     }
557
558     for (size_t index = 0; index < numberOfOperands; ++index) {
559         const ValueRecovery& recovery = operands[index];
560         VirtualRegister reg = operands.virtualRegisterForIndex(index);
561
562         if (UNLIKELY(index == nextUndefinedOperandIndex)) {
563             index += undefinedOperandSpans[nextUndefinedSpanIndex++].numberOfRegisters - 1;
564             if (nextUndefinedSpanIndex < numUndefinedOperandSpans)
565                 nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
566             else
567                 nextUndefinedOperandIndex = numberOfOperands;
568             continue;
569         }
570
571         if (reg.isLocal() && reg.toLocal() < calleeSaveSpaceAsVirtualRegisters)
572             continue;
573
574         int operand = reg.offset();
575
576         switch (recovery.technique()) {
577         case DisplacedInJSStack:
578             frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
579             break;
580
581         case InFPR:
582             frame.setOperand(operand, cpu.fpr<JSValue>(recovery.fpr()));
583             break;
584
585 #if USE(JSVALUE64)
586         case InGPR:
587             frame.setOperand(operand, cpu.gpr<JSValue>(recovery.gpr()));
588             break;
589 #else
590         case InPair:
591             frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.tagGPR()), cpu.gpr<int32_t>(recovery.payloadGPR())));
592             break;
593 #endif
594
595         case UnboxedCellInGPR:
596             frame.setOperand(operand, JSValue(cpu.gpr<JSCell*>(recovery.gpr())));
597             break;
598
599         case CellDisplacedInJSStack:
600             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedCell()));
601             break;
602
603 #if USE(JSVALUE32_64)
604         case UnboxedBooleanInGPR:
605             frame.setOperand(operand, jsBoolean(cpu.gpr<bool>(recovery.gpr())));
606             break;
607 #endif
608
609         case BooleanDisplacedInJSStack:
610 #if USE(JSVALUE64)
611             frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
612 #else
613             frame.setOperand(operand, jsBoolean(exec->r(recovery.virtualRegister()).jsValue().payload()));
614 #endif
615             break;
616
617         case UnboxedInt32InGPR:
618             frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.gpr())));
619             break;
620
621         case Int32DisplacedInJSStack:
622             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt32()));
623             break;
624
625 #if USE(JSVALUE64)
626         case UnboxedInt52InGPR:
627             frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr()) >> JSValue::int52ShiftAmount));
628             break;
629
630         case Int52DisplacedInJSStack:
631             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt52()));
632             break;
633
634         case UnboxedStrictInt52InGPR:
635             frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr())));
636             break;
637
638         case StrictInt52DisplacedInJSStack:
639             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedStrictInt52()));
640             break;
641 #endif
642
643         case UnboxedDoubleInFPR:
644             frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(cpu.fpr(recovery.fpr()))));
645             break;
646
647         case DoubleDisplacedInJSStack:
648             frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(exec->r(recovery.virtualRegister()).unboxedDouble())));
649             break;
650
651         case Constant:
652             frame.setOperand(operand, recovery.constant());
653             break;
654
655         case DirectArgumentsThatWereNotCreated:
656         case ClonedArgumentsThatWereNotCreated:
657             // Don't do this, yet.
658             break;
659
660         default:
661             RELEASE_ASSERT_NOT_REACHED();
662             break;
663         }
664     }
665
666     // Restore the DFG callee saves and then save the ones the baseline JIT uses.
667     restoreCalleeSavesFor(context, codeBlock);
668     saveCalleeSavesFor(context, baselineCodeBlock);
669
670 #if USE(JSVALUE64)
671     cpu.gpr(GPRInfo::tagTypeNumberRegister) = static_cast<uintptr_t>(TagTypeNumber);
672     cpu.gpr(GPRInfo::tagMaskRegister) = static_cast<uintptr_t>(TagTypeNumber | TagBitTypeOther);
673 #endif
674
675     if (exit.isExceptionHandler())
676         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(context);
677
678     // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
679     // recoveries don't recursively refer to each other. But, we don't try to assume that they only
680     // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
681     // Note that we also roughly assume that the arguments might still be materialized outside of its
682     // inline call frame scope - but for now the DFG wouldn't do that.
683
684     DFG::emitRestoreArguments(context, codeBlock, dfgJITCode, operands);
685
686     // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
687     // that all new calls into this code will go to the new JIT, so the execute
688     // counter only affects call frames that performed OSR exit and call frames
689     // that were still executing the old JIT at the time of another call frame's
690     // OSR exit. We want to ensure that the following is true:
691     //
692     // (a) Code the performs an OSR exit gets a chance to reenter optimized
693     //     code eventually, since optimized code is faster. But we don't
694     //     want to do such reentery too aggressively (see (c) below).
695     //
696     // (b) If there is code on the call stack that is still running the old
697     //     JIT's code and has never OSR'd, then it should get a chance to
698     //     perform OSR entry despite the fact that we've exited.
699     //
700     // (c) Code the performs an OSR exit should not immediately retry OSR
701     //     entry, since both forms of OSR are expensive. OSR entry is
702     //     particularly expensive.
703     //
704     // (d) Frequent OSR failures, even those that do not result in the code
705     //     running in a hot loop, result in recompilation getting triggered.
706     //
707     // To ensure (c), we'd like to set the execute counter to
708     // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
709     // (a) and (b), since then every OSR exit would delay the opportunity for
710     // every call frame to perform OSR entry. Essentially, if OSR exit happens
711     // frequently and the function has few loops, then the counter will never
712     // become non-negative and OSR entry will never be triggered. OSR entry
713     // will only happen if a loop gets hot in the old JIT, which does a pretty
714     // good job of ensuring (a) and (b). But that doesn't take care of (d),
715     // since each speculation failure would reset the execute counter.
716     // So we check here if the number of speculation failures is significantly
717     // larger than the number of successes (we want 90% success rate), and if
718     // there have been a large enough number of failures. If so, we set the
719     // counter to 0; otherwise we set the counter to
720     // counterValueForOptimizeAfterWarmUp().
721
722     if (UNLIKELY(codeBlock->updateOSRExitCounterAndCheckIfNeedToReoptimize(exitState) == CodeBlock::OptimizeAction::ReoptimizeNow))
723         triggerReoptimizationNow(baselineCodeBlock, &exit);
724
725     reifyInlinedCallFrames(context, baselineCodeBlock, exit);
726     adjustAndJumpToTarget(context, vm, codeBlock, baselineCodeBlock, exit);
727 }
728
729 static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselineCodeBlock, const OSRExitBase& exit)
730 {
731     auto& cpu = context.cpu;
732     Frame frame(cpu.fp(), context.stack());
733
734     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
735     // in presence of inlined tail calls.
736     // https://bugs.webkit.org/show_bug.cgi?id=147511
737     ASSERT(outermostBaselineCodeBlock->jitType() == JITCode::BaselineJIT);
738     frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
739
740     const CodeOrigin* codeOrigin;
741     for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
742         InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
743         CodeBlock* baselineCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(*codeOrigin, outermostBaselineCodeBlock);
744         InlineCallFrame::Kind trueCallerCallKind;
745         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
746         void* callerFrame = cpu.fp();
747
748         if (!trueCaller) {
749             ASSERT(inlineCallFrame->isTail());
750             void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
751             frame.set<void*>(inlineCallFrame->returnPCOffset(), returnPC);
752             callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
753         } else {
754             CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
755             unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
756             void* jumpTarget = nullptr;
757
758             switch (trueCallerCallKind) {
759             case InlineCallFrame::Call:
760             case InlineCallFrame::Construct:
761             case InlineCallFrame::CallVarargs:
762             case InlineCallFrame::ConstructVarargs:
763             case InlineCallFrame::TailCall:
764             case InlineCallFrame::TailCallVarargs: {
765                 CallLinkInfo* callLinkInfo =
766                     baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
767                 RELEASE_ASSERT(callLinkInfo);
768
769                 jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
770                 break;
771             }
772
773             case InlineCallFrame::GetterCall:
774             case InlineCallFrame::SetterCall: {
775                 StructureStubInfo* stubInfo =
776                     baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
777                 RELEASE_ASSERT(stubInfo);
778
779                 jumpTarget = stubInfo->doneLocation().executableAddress();
780                 break;
781             }
782
783             default:
784                 RELEASE_ASSERT_NOT_REACHED();
785             }
786
787             if (trueCaller->inlineCallFrame)
788                 callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
789
790             frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
791         }
792
793         frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
794
795         // Restore the inline call frame's callee save registers.
796         // If this inlined frame is a tail call that will return back to the original caller, we need to
797         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
798         saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
799
800         if (!inlineCallFrame->isVarargs())
801             frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
802         ASSERT(callerFrame);
803         frame.set<void*>(inlineCallFrame->callerFrameOffset(), callerFrame);
804 #if USE(JSVALUE64)
805         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
806         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
807         if (!inlineCallFrame->isClosureCall)
808             frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
809 #else // USE(JSVALUE64) // so this is the 32-bit part
810         Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
811         uint32_t locationBits = CallSiteIndex(instruction).bits();
812         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
813         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
814         if (!inlineCallFrame->isClosureCall)
815             frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, PayloadOffset, inlineCallFrame->calleeConstant());
816 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
817     }
818
819     // Don't need to set the toplevel code origin if we only did inline tail calls
820     if (codeOrigin) {
821 #if USE(JSVALUE64)
822         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
823 #else
824         Instruction* instruction = outermostBaselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
825         uint32_t locationBits = CallSiteIndex(instruction).bits();
826 #endif
827         frame.setOperand<uint32_t>(CallFrameSlot::argumentCount, TagOffset, locationBits);
828     }
829 }
830
831 static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, OSRExit& exit)
832 {
833     OSRExitState* exitState = exit.exitState.get();
834
835     WTF::storeLoadFence(); // The optimizing compiler expects that the OSR exit mechanism will execute this fence.
836     vm.heap.writeBarrier(baselineCodeBlock);
837
838     // We barrier all inlined frames -- and not just the current inline stack --
839     // because we don't know which inlined function owns the value profile that
840     // we'll update when we exit. In the case of "f() { a(); b(); }", if both
841     // a and b are inlined, we might exit inside b due to a bad value loaded
842     // from a.
843     // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
844     // the value profile.
845     InlineCallFrameSet* inlineCallFrames = codeBlock->jitCode()->dfgCommon()->inlineCallFrames.get();
846     if (inlineCallFrames) {
847         for (InlineCallFrame* inlineCallFrame : *inlineCallFrames)
848             vm.heap.writeBarrier(inlineCallFrame->baselineCodeBlock.get());
849     }
850
851     if (exit.m_codeOrigin.inlineCallFrame)
852         context.fp() = context.fp<uint8_t*>() + exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
853
854     void* jumpTarget = exitState->jumpTarget;
855     ASSERT(jumpTarget);
856
857     if (exit.isExceptionHandler()) {
858         // Since we're jumping to op_catch, we need to set callFrameForCatch.
859         vm.callFrameForCatch = context.fp<ExecState*>();
860     }
861
862     vm.topCallFrame = context.fp<ExecState*>();
863     context.pc() = jumpTarget;
864 }
865
866 static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit& exit)
867 {
868     ExecState* exec = context.fp<ExecState*>();
869     CodeBlock* codeBlock = exec->codeBlock();
870     CodeBlock* alternative = codeBlock->alternative();
871     ExitKind kind = exit.m_kind;
872     unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
873
874     dataLog("Speculation failure in ", *codeBlock);
875     dataLog(" @ exit #", osrExitIndex, " (bc#", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
876     if (alternative) {
877         dataLog(
878             "executeCounter = ", alternative->jitExecuteCounter(),
879             ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
880             ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
881     } else
882         dataLog("no alternative code block (i.e. we've been jettisoned)");
883     dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
884     dataLog("    GPRs at time of exit:");
885     for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
886         GPRReg gpr = GPRInfo::toRegister(i);
887         dataLog(" ", context.gprName(gpr), ":", RawPointer(context.gpr<void*>(gpr)));
888     }
889     dataLog("\n");
890     dataLog("    FPRs at time of exit:");
891     for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
892         FPRReg fpr = FPRInfo::toRegister(i);
893         dataLog(" ", context.fprName(fpr), ":");
894         uint64_t bits = context.fpr<uint64_t>(fpr);
895         double value = context.fpr(fpr);
896         dataLogF("%llx:%lf", static_cast<long long>(bits), value);
897     }
898     dataLog("\n");
899 }
900
901 // JIT based OSR Exit.
902
903 OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
904     : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
905     , m_jsValueSource(jsValueSource)
906     , m_valueProfile(valueProfile)
907     , m_recoveryIndex(recoveryIndex)
908     , m_streamIndex(streamIndex)
909 {
910     bool canExit = jit->m_origin.exitOK;
911     if (!canExit && jit->m_currentNode) {
912         ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode);
913         canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions;
914     }
915     DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit);
916 }
917
918 void OSRExit::setPatchableCodeOffset(MacroAssembler::PatchableJump check)
919 {
920     m_patchableCodeOffset = check.m_jump.m_label.m_offset;
921 }
922
923 MacroAssembler::Jump OSRExit::getPatchableCodeOffsetAsJump() const
924 {
925     return MacroAssembler::Jump(AssemblerLabel(m_patchableCodeOffset));
926 }
927
928 CodeLocationJump OSRExit::codeLocationForRepatch(CodeBlock* dfgCodeBlock) const
929 {
930     return CodeLocationJump(dfgCodeBlock->jitCode()->dataAddressAtOffset(m_patchableCodeOffset));
931 }
932
933 void OSRExit::correctJump(LinkBuffer& linkBuffer)
934 {
935     MacroAssembler::Label label;
936     label.m_label.m_offset = m_patchableCodeOffset;
937     m_patchableCodeOffset = linkBuffer.offsetOf(label);
938 }
939
940 void OSRExit::emitRestoreArguments(CCallHelpers& jit, const Operands<ValueRecovery>& operands)
941 {
942     HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
943     for (size_t index = 0; index < operands.size(); ++index) {
944         const ValueRecovery& recovery = operands[index];
945         int operand = operands.operandForIndex(index);
946
947         if (recovery.technique() != DirectArgumentsThatWereNotCreated
948             && recovery.technique() != ClonedArgumentsThatWereNotCreated)
949             continue;
950
951         MinifiedID id = recovery.nodeID();
952         auto iter = alreadyAllocatedArguments.find(id);
953         if (iter != alreadyAllocatedArguments.end()) {
954             JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
955             jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
956             jit.storeValue(regs, CCallHelpers::addressFor(operand));
957             continue;
958         }
959
960         InlineCallFrame* inlineCallFrame =
961             jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
962
963         int stackOffset;
964         if (inlineCallFrame)
965             stackOffset = inlineCallFrame->stackOffset;
966         else
967             stackOffset = 0;
968
969         if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
970             jit.loadPtr(
971                 AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
972                 GPRInfo::regT0);
973         } else {
974             jit.move(
975                 AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
976                 GPRInfo::regT0);
977         }
978
979         if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
980             jit.load32(
981                 AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCount),
982                 GPRInfo::regT1);
983         } else {
984             jit.move(
985                 AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis),
986                 GPRInfo::regT1);
987         }
988
989         jit.setupArgumentsWithExecState(
990             AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
991         switch (recovery.technique()) {
992         case DirectArgumentsThatWereNotCreated:
993             jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
994             break;
995         case ClonedArgumentsThatWereNotCreated:
996             jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
997             break;
998         default:
999             RELEASE_ASSERT_NOT_REACHED();
1000             break;
1001         }
1002         jit.call(GPRInfo::nonArgGPR0);
1003         jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
1004
1005         alreadyAllocatedArguments.add(id, operand);
1006     }
1007 }
1008
1009 void JIT_OPERATION OSRExit::compileOSRExit(ExecState* exec)
1010 {
1011     VM* vm = &exec->vm();
1012     auto scope = DECLARE_THROW_SCOPE(*vm);
1013
1014     if (vm->callFrameForCatch)
1015         RELEASE_ASSERT(vm->callFrameForCatch == exec);
1016
1017     CodeBlock* codeBlock = exec->codeBlock();
1018     ASSERT(codeBlock);
1019     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
1020
1021     // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
1022     // really be profitable.
1023     DeferGCForAWhile deferGC(vm->heap);
1024
1025     uint32_t exitIndex = vm->osrExitIndex;
1026     OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
1027
1028     ASSERT(!vm->callFrameForCatch || exit.m_kind == GenericUnwind);
1029     EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
1030     
1031     prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
1032
1033     // Compute the value recoveries.
1034     Operands<ValueRecovery> operands;
1035     codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
1036
1037     SpeculationRecovery* recovery = 0;
1038     if (exit.m_recoveryIndex != UINT_MAX)
1039         recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
1040
1041     {
1042         CCallHelpers jit(codeBlock);
1043
1044         if (exit.m_kind == GenericUnwind) {
1045             // We are acting as a defacto op_catch because we arrive here from genericUnwind().
1046             // So, we must restore our call frame and stack pointer.
1047             jit.restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
1048             jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1049         }
1050         jit.addPtr(
1051             CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
1052             GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1053
1054         jit.jitAssertHasValidCallFrame();
1055
1056         if (UNLIKELY(vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
1057             Profiler::Database& database = *vm->m_perBytecodeProfiler;
1058             Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
1059
1060             Profiler::OSRExit* profilerExit = compilation->addOSRExit(
1061                 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
1062                 exit.m_kind, exit.m_kind == UncountableInvalidation);
1063             jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
1064         }
1065
1066         compileExit(jit, *vm, exit, operands, recovery);
1067
1068         LinkBuffer patchBuffer(jit, codeBlock);
1069         exit.m_code = FINALIZE_CODE_IF(
1070             shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
1071             patchBuffer,
1072             ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
1073                 exitIndex, toCString(exit.m_codeOrigin).data(),
1074                 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
1075                 toCString(ignoringContext<DumpContext>(operands)).data()));
1076     }
1077
1078     MacroAssembler::repatchJump(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
1079
1080     vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
1081 }
1082
1083 void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
1084 {
1085     jit.jitAssertTagsInPlace();
1086
1087     // Pro-forma stuff.
1088     if (Options::printEachOSRExit()) {
1089         SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
1090         debugInfo->codeBlock = jit.codeBlock();
1091         debugInfo->kind = exit.m_kind;
1092         debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
1093
1094         jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
1095     }
1096
1097     // Perform speculation recovery. This only comes into play when an operation
1098     // starts mutating state before verifying the speculation it has already made.
1099
1100     if (recovery) {
1101         switch (recovery->type()) {
1102         case SpeculativeAdd:
1103             jit.sub32(recovery->src(), recovery->dest());
1104 #if USE(JSVALUE64)
1105             jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
1106 #endif
1107             break;
1108
1109         case SpeculativeAddImmediate:
1110             jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest());
1111 #if USE(JSVALUE64)
1112             jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
1113 #endif
1114             break;
1115
1116         case BooleanSpeculationCheck:
1117 #if USE(JSVALUE64)
1118             jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
1119 #endif
1120             break;
1121
1122         default:
1123             break;
1124         }
1125     }
1126
1127     // Refine some array and/or value profile, if appropriate.
1128
1129     if (!!exit.m_jsValueSource) {
1130         if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
1131             // If the instruction that this originated from has an array profile, then
1132             // refine it. If it doesn't, then do nothing. The latter could happen for
1133             // hoisted checks, or checks emitted for operations that didn't have array
1134             // profiling - either ops that aren't array accesses at all, or weren't
1135             // known to be array acceses in the bytecode. The latter case is a FIXME
1136             // while the former case is an outcome of a CheckStructure not knowing why
1137             // it was emitted (could be either due to an inline cache of a property
1138             // property access, or due to an array profile).
1139
1140             CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
1141             if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
1142 #if USE(JSVALUE64)
1143                 GPRReg usedRegister;
1144                 if (exit.m_jsValueSource.isAddress())
1145                     usedRegister = exit.m_jsValueSource.base();
1146                 else
1147                     usedRegister = exit.m_jsValueSource.gpr();
1148 #else
1149                 GPRReg usedRegister1;
1150                 GPRReg usedRegister2;
1151                 if (exit.m_jsValueSource.isAddress()) {
1152                     usedRegister1 = exit.m_jsValueSource.base();
1153                     usedRegister2 = InvalidGPRReg;
1154                 } else {
1155                     usedRegister1 = exit.m_jsValueSource.payloadGPR();
1156                     if (exit.m_jsValueSource.hasKnownTag())
1157                         usedRegister2 = InvalidGPRReg;
1158                     else
1159                         usedRegister2 = exit.m_jsValueSource.tagGPR();
1160                 }
1161 #endif
1162
1163                 GPRReg scratch1;
1164                 GPRReg scratch2;
1165 #if USE(JSVALUE64)
1166                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
1167                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
1168 #else
1169                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
1170                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
1171 #endif
1172
1173                 if (isARM64()) {
1174                     jit.pushToSave(scratch1);
1175                     jit.pushToSave(scratch2);
1176                 } else {
1177                     jit.push(scratch1);
1178                     jit.push(scratch2);
1179                 }
1180
1181                 GPRReg value;
1182                 if (exit.m_jsValueSource.isAddress()) {
1183                     value = scratch1;
1184                     jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
1185                 } else
1186                     value = exit.m_jsValueSource.payloadGPR();
1187
1188                 jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
1189                 jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
1190 #if USE(JSVALUE64)
1191                 jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeAndMiscOffset()), scratch1);
1192 #else
1193                 jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeIncludingHistoryOffset()), scratch1);
1194 #endif
1195                 jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
1196                 jit.lshift32(scratch1, scratch2);
1197                 jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
1198
1199                 if (isARM64()) {
1200                     jit.popToRestore(scratch2);
1201                     jit.popToRestore(scratch1);
1202                 } else {
1203                     jit.pop(scratch2);
1204                     jit.pop(scratch1);
1205                 }
1206             }
1207         }
1208
1209         if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) {
1210 #if USE(JSVALUE64)
1211             if (exit.m_jsValueSource.isAddress()) {
1212                 // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
1213                 // since we know how to restore it.
1214                 jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
1215                 profile.emitReportValue(jit, JSValueRegs(GPRInfo::tagTypeNumberRegister));
1216                 jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
1217             } else
1218                 profile.emitReportValue(jit, JSValueRegs(exit.m_jsValueSource.gpr()));
1219 #else // not USE(JSVALUE64)
1220             if (exit.m_jsValueSource.isAddress()) {
1221                 // Save a register so we can use it.
1222                 GPRReg scratchPayload = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
1223                 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base(), scratchPayload);
1224                 jit.pushToSave(scratchPayload);
1225                 jit.pushToSave(scratchTag);
1226
1227                 JSValueRegs scratch(scratchTag, scratchPayload);
1228                 
1229                 jit.loadValue(exit.m_jsValueSource.asAddress(), scratch);
1230                 profile.emitReportValue(jit, scratch);
1231                 
1232                 jit.popToRestore(scratchTag);
1233                 jit.popToRestore(scratchPayload);
1234             } else if (exit.m_jsValueSource.hasKnownTag()) {
1235                 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.payloadGPR());
1236                 jit.pushToSave(scratchTag);
1237                 jit.move(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), scratchTag);
1238                 JSValueRegs value(scratchTag, exit.m_jsValueSource.payloadGPR());
1239                 profile.emitReportValue(jit, value);
1240                 jit.popToRestore(scratchTag);
1241             } else
1242                 profile.emitReportValue(jit, exit.m_jsValueSource.regs());
1243 #endif // USE(JSVALUE64)
1244         }
1245     }
1246
1247     // What follows is an intentionally simple OSR exit implementation that generates
1248     // fairly poor code but is very easy to hack. In particular, it dumps all state that
1249     // needs conversion into a scratch buffer so that in step 6, where we actually do the
1250     // conversions, we know that all temp registers are free to use and the variable is
1251     // definitely in a well-known spot in the scratch buffer regardless of whether it had
1252     // originally been in a register or spilled. This allows us to decouple "where was
1253     // the variable" from "how was it represented". Consider that the
1254     // Int32DisplacedInJSStack recovery: it tells us that the value is in a
1255     // particular place and that that place holds an unboxed int32. We have two different
1256     // places that a value could be (displaced, register) and a bunch of different
1257     // ways of representing a value. The number of recoveries is two * a bunch. The code
1258     // below means that we have to have two + a bunch cases rather than two * a bunch.
1259     // Once we have loaded the value from wherever it was, the reboxing is the same
1260     // regardless of its location. Likewise, before we do the reboxing, the way we get to
1261     // the value (i.e. where we load it from) is the same regardless of its type. Because
1262     // the code below always dumps everything into a scratch buffer first, the two
1263     // questions become orthogonal, which simplifies adding new types and adding new
1264     // locations.
1265     //
1266     // This raises the question: does using such a suboptimal implementation of OSR exit,
1267     // where we always emit code to dump all state into a scratch buffer only to then
1268     // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
1269     // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
1270     // taken more than ~100 times, we jettison the DFG code block along with all of its
1271     // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
1272     // execute frequently enough for the codegen to matter that much. It probably matters
1273     // enough that we don't want to turn this into some super-slow function call, but so
1274     // long as we're generating straight-line code, that code can be pretty bad. Also
1275     // because we tend to exit only along one OSR exit from any DFG code block - that's an
1276     // empirical result that we're extremely confident about - the code size of this
1277     // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
1278     // harmful to the system: it probably won't reduce either net memory usage or net
1279     // execution time. It will only prevent us from cleanly decoupling "where was the
1280     // variable" from "how was it represented", which will make it more difficult to add
1281     // features in the future and it will make it harder to reason about bugs.
1282
1283     // Save all state from GPRs into the scratch buffer.
1284
1285     ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
1286     EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
1287
1288     for (size_t index = 0; index < operands.size(); ++index) {
1289         const ValueRecovery& recovery = operands[index];
1290
1291         switch (recovery.technique()) {
1292         case UnboxedInt32InGPR:
1293         case UnboxedCellInGPR:
1294 #if USE(JSVALUE64)
1295         case InGPR:
1296         case UnboxedInt52InGPR:
1297         case UnboxedStrictInt52InGPR:
1298             jit.store64(recovery.gpr(), scratch + index);
1299             break;
1300 #else
1301         case UnboxedBooleanInGPR:
1302             jit.store32(
1303                 recovery.gpr(),
1304                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1305             break;
1306             
1307         case InPair:
1308             jit.store32(
1309                 recovery.tagGPR(),
1310                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
1311             jit.store32(
1312                 recovery.payloadGPR(),
1313                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1314             break;
1315 #endif
1316
1317         default:
1318             break;
1319         }
1320     }
1321
1322     // And voila, all GPRs are free to reuse.
1323
1324     // Save all state from FPRs into the scratch buffer.
1325
1326     for (size_t index = 0; index < operands.size(); ++index) {
1327         const ValueRecovery& recovery = operands[index];
1328
1329         switch (recovery.technique()) {
1330         case UnboxedDoubleInFPR:
1331         case InFPR:
1332             jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
1333             jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
1334             break;
1335
1336         default:
1337             break;
1338         }
1339     }
1340
1341     // Now, all FPRs are also free.
1342
1343     // Save all state from the stack into the scratch buffer. For simplicity we
1344     // do this even for state that's already in the right place on the stack.
1345     // It makes things simpler later.
1346
1347     for (size_t index = 0; index < operands.size(); ++index) {
1348         const ValueRecovery& recovery = operands[index];
1349
1350         switch (recovery.technique()) {
1351         case DisplacedInJSStack:
1352         case CellDisplacedInJSStack:
1353         case BooleanDisplacedInJSStack:
1354         case Int32DisplacedInJSStack:
1355         case DoubleDisplacedInJSStack:
1356 #if USE(JSVALUE64)
1357         case Int52DisplacedInJSStack:
1358         case StrictInt52DisplacedInJSStack:
1359             jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
1360             jit.store64(GPRInfo::regT0, scratch + index);
1361             break;
1362 #else
1363             jit.load32(
1364                 AssemblyHelpers::tagFor(recovery.virtualRegister()),
1365                 GPRInfo::regT0);
1366             jit.load32(
1367                 AssemblyHelpers::payloadFor(recovery.virtualRegister()),
1368                 GPRInfo::regT1);
1369             jit.store32(
1370                 GPRInfo::regT0,
1371                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
1372             jit.store32(
1373                 GPRInfo::regT1,
1374                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1375             break;
1376 #endif
1377
1378         default:
1379             break;
1380         }
1381     }
1382
1383     // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
1384     // could toast some stack that the DFG used. We need to do it before storing to stack offsets
1385     // used by baseline.
1386     jit.addPtr(
1387         CCallHelpers::TrustedImm32(
1388             -jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
1389         CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
1390
1391     // Restore the DFG callee saves and then save the ones the baseline JIT uses.
1392     jit.emitRestoreCalleeSaves();
1393     jit.emitSaveCalleeSavesFor(jit.baselineCodeBlock());
1394
1395     // The tag registers are needed to materialize recoveries below.
1396     jit.emitMaterializeTagCheckRegisters();
1397
1398     if (exit.isExceptionHandler())
1399         jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
1400
1401     // Do all data format conversions and store the results into the stack.
1402
1403     for (size_t index = 0; index < operands.size(); ++index) {
1404         const ValueRecovery& recovery = operands[index];
1405         VirtualRegister reg = operands.virtualRegisterForIndex(index);
1406
1407         if (reg.isLocal() && reg.toLocal() < static_cast<int>(jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters()))
1408             continue;
1409
1410         int operand = reg.offset();
1411
1412         switch (recovery.technique()) {
1413         case DisplacedInJSStack:
1414         case InFPR:
1415 #if USE(JSVALUE64)
1416         case InGPR:
1417         case UnboxedCellInGPR:
1418         case CellDisplacedInJSStack:
1419         case BooleanDisplacedInJSStack:
1420             jit.load64(scratch + index, GPRInfo::regT0);
1421             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1422             break;
1423 #else // not USE(JSVALUE64)
1424         case InPair:
1425             jit.load32(
1426                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
1427                 GPRInfo::regT0);
1428             jit.load32(
1429                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1430                 GPRInfo::regT1);
1431             jit.store32(
1432                 GPRInfo::regT0,
1433                 AssemblyHelpers::tagFor(operand));
1434             jit.store32(
1435                 GPRInfo::regT1,
1436                 AssemblyHelpers::payloadFor(operand));
1437             break;
1438
1439         case UnboxedCellInGPR:
1440         case CellDisplacedInJSStack:
1441             jit.load32(
1442                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1443                 GPRInfo::regT0);
1444             jit.store32(
1445                 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
1446                 AssemblyHelpers::tagFor(operand));
1447             jit.store32(
1448                 GPRInfo::regT0,
1449                 AssemblyHelpers::payloadFor(operand));
1450             break;
1451
1452         case UnboxedBooleanInGPR:
1453         case BooleanDisplacedInJSStack:
1454             jit.load32(
1455                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1456                 GPRInfo::regT0);
1457             jit.store32(
1458                 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
1459                 AssemblyHelpers::tagFor(operand));
1460             jit.store32(
1461                 GPRInfo::regT0,
1462                 AssemblyHelpers::payloadFor(operand));
1463             break;
1464 #endif // USE(JSVALUE64)
1465
1466         case UnboxedInt32InGPR:
1467         case Int32DisplacedInJSStack:
1468 #if USE(JSVALUE64)
1469             jit.load64(scratch + index, GPRInfo::regT0);
1470             jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
1471             jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
1472             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1473 #else
1474             jit.load32(
1475                 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1476                 GPRInfo::regT0);
1477             jit.store32(
1478                 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
1479                 AssemblyHelpers::tagFor(operand));
1480             jit.store32(
1481                 GPRInfo::regT0,
1482                 AssemblyHelpers::payloadFor(operand));
1483 #endif
1484             break;
1485
1486 #if USE(JSVALUE64)
1487         case UnboxedInt52InGPR:
1488         case Int52DisplacedInJSStack:
1489             jit.load64(scratch + index, GPRInfo::regT0);
1490             jit.rshift64(
1491                 AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
1492             jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
1493             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1494             break;
1495
1496         case UnboxedStrictInt52InGPR:
1497         case StrictInt52DisplacedInJSStack:
1498             jit.load64(scratch + index, GPRInfo::regT0);
1499             jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
1500             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1501             break;
1502 #endif
1503
1504         case UnboxedDoubleInFPR:
1505         case DoubleDisplacedInJSStack:
1506             jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
1507             jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
1508             jit.purifyNaN(FPRInfo::fpRegT0);
1509 #if USE(JSVALUE64)
1510             jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
1511             jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1512 #else
1513             jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
1514 #endif
1515             break;
1516
1517         case Constant:
1518 #if USE(JSVALUE64)
1519             jit.store64(
1520                 AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
1521                 AssemblyHelpers::addressFor(operand));
1522 #else
1523             jit.store32(
1524                 AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
1525                 AssemblyHelpers::tagFor(operand));
1526             jit.store32(
1527                 AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
1528                 AssemblyHelpers::payloadFor(operand));
1529 #endif
1530             break;
1531
1532         case DirectArgumentsThatWereNotCreated:
1533         case ClonedArgumentsThatWereNotCreated:
1534             // Don't do this, yet.
1535             break;
1536
1537         default:
1538             RELEASE_ASSERT_NOT_REACHED();
1539             break;
1540         }
1541     }
1542
1543     // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
1544     // recoveries don't recursively refer to each other. But, we don't try to assume that they only
1545     // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
1546     // Note that we also roughly assume that the arguments might still be materialized outside of its
1547     // inline call frame scope - but for now the DFG wouldn't do that.
1548
1549     emitRestoreArguments(jit, operands);
1550
1551     // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
1552     // that all new calls into this code will go to the new JIT, so the execute
1553     // counter only affects call frames that performed OSR exit and call frames
1554     // that were still executing the old JIT at the time of another call frame's
1555     // OSR exit. We want to ensure that the following is true:
1556     //
1557     // (a) Code the performs an OSR exit gets a chance to reenter optimized
1558     //     code eventually, since optimized code is faster. But we don't
1559     //     want to do such reentery too aggressively (see (c) below).
1560     //
1561     // (b) If there is code on the call stack that is still running the old
1562     //     JIT's code and has never OSR'd, then it should get a chance to
1563     //     perform OSR entry despite the fact that we've exited.
1564     //
1565     // (c) Code the performs an OSR exit should not immediately retry OSR
1566     //     entry, since both forms of OSR are expensive. OSR entry is
1567     //     particularly expensive.
1568     //
1569     // (d) Frequent OSR failures, even those that do not result in the code
1570     //     running in a hot loop, result in recompilation getting triggered.
1571     //
1572     // To ensure (c), we'd like to set the execute counter to
1573     // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
1574     // (a) and (b), since then every OSR exit would delay the opportunity for
1575     // every call frame to perform OSR entry. Essentially, if OSR exit happens
1576     // frequently and the function has few loops, then the counter will never
1577     // become non-negative and OSR entry will never be triggered. OSR entry
1578     // will only happen if a loop gets hot in the old JIT, which does a pretty
1579     // good job of ensuring (a) and (b). But that doesn't take care of (d),
1580     // since each speculation failure would reset the execute counter.
1581     // So we check here if the number of speculation failures is significantly
1582     // larger than the number of successes (we want 90% success rate), and if
1583     // there have been a large enough number of failures. If so, we set the
1584     // counter to 0; otherwise we set the counter to
1585     // counterValueForOptimizeAfterWarmUp().
1586
1587     handleExitCounts(jit, exit);
1588
1589     // Reify inlined call frames.
1590
1591     reifyInlinedCallFrames(jit, exit);
1592
1593     // And finish.
1594     adjustAndJumpToTarget(vm, jit, exit);
1595 }
1596
1597 void JIT_OPERATION OSRExit::debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw, void* scratch)
1598 {
1599     VM* vm = &exec->vm();
1600     NativeCallFrameTracer tracer(vm, exec);
1601
1602     SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
1603     CodeBlock* codeBlock = debugInfo->codeBlock;
1604     CodeBlock* alternative = codeBlock->alternative();
1605     dataLog("Speculation failure in ", *codeBlock);
1606     dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
1607     if (alternative) {
1608         dataLog(
1609             "executeCounter = ", alternative->jitExecuteCounter(),
1610             ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
1611             ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
1612     } else
1613         dataLog("no alternative code block (i.e. we've been jettisoned)");
1614     dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
1615     dataLog("    GPRs at time of exit:");
1616     char* scratchPointer = static_cast<char*>(scratch);
1617     for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
1618         GPRReg gpr = GPRInfo::toRegister(i);
1619         dataLog(" ", GPRInfo::debugName(gpr), ":", RawPointer(*reinterpret_cast_ptr<void**>(scratchPointer)));
1620         scratchPointer += sizeof(EncodedJSValue);
1621     }
1622     dataLog("\n");
1623     dataLog("    FPRs at time of exit:");
1624     for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
1625         FPRReg fpr = FPRInfo::toRegister(i);
1626         dataLog(" ", FPRInfo::debugName(fpr), ":");
1627         uint64_t bits = *reinterpret_cast_ptr<uint64_t*>(scratchPointer);
1628         double value = *reinterpret_cast_ptr<double*>(scratchPointer);
1629         dataLogF("%llx:%lf", static_cast<long long>(bits), value);
1630         scratchPointer += sizeof(EncodedJSValue);
1631     }
1632     dataLog("\n");
1633 }
1634
1635 } } // namespace JSC::DFG
1636
1637 #endif // ENABLE(DFG_JIT)