[Re-landing] Use JIT probes for DFG OSR exit.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExit.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExit.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "ClonedArguments.h"
33 #include "DFGGraph.h"
34 #include "DFGMayExit.h"
35 #include "DFGOSRExitPreparation.h"
36 #include "DFGOperations.h"
37 #include "DFGSpeculativeJIT.h"
38 #include "DirectArguments.h"
39 #include "InlineCallFrame.h"
40 #include "JSCInlines.h"
41 #include "JSCJSValue.h"
42 #include "OperandsInlines.h"
43 #include "ProbeContext.h"
44 #include "ProbeFrame.h"
45
46 namespace JSC { namespace DFG {
47
48 using CPUState = Probe::CPUState;
49 using Context = Probe::Context;
50 using Frame = Probe::Frame;
51
52 static void reifyInlinedCallFrames(Probe::Context&, CodeBlock* baselineCodeBlock, const OSRExitBase&);
53 static void adjustAndJumpToTarget(Probe::Context&, VM&, CodeBlock*, CodeBlock* baselineCodeBlock, OSRExit&);
54 static void printOSRExit(Context&, uint32_t osrExitIndex, const OSRExit&);
55
56 static JSValue jsValueFor(CPUState& cpu, JSValueSource source)
57 {
58     if (source.isAddress()) {
59         JSValue result;
60         std::memcpy(&result, cpu.gpr<uint8_t*>(source.base()) + source.offset(), sizeof(JSValue));
61         return result;
62     }
63 #if USE(JSVALUE64)
64     return JSValue::decode(cpu.gpr<EncodedJSValue>(source.gpr()));
65 #else
66     if (source.hasKnownTag())
67         return JSValue(source.tag(), cpu.gpr<int32_t>(source.payloadGPR()));
68     return JSValue(cpu.gpr<int32_t>(source.tagGPR()), cpu.gpr<int32_t>(source.payloadGPR()));
69 #endif
70 }
71
72 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
73
74 static_assert(is64Bit(), "we only support callee save registers on 64-bit");
75
76 // Based on AssemblyHelpers::emitRestoreCalleeSavesFor().
77 static void restoreCalleeSavesFor(Context& context, CodeBlock* codeBlock)
78 {
79     ASSERT(codeBlock);
80
81     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
82     RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
83     unsigned registerCount = calleeSaves->size();
84
85     uintptr_t* physicalStackFrame = context.fp<uintptr_t*>();
86     for (unsigned i = 0; i < registerCount; i++) {
87         RegisterAtOffset entry = calleeSaves->at(i);
88         if (dontRestoreRegisters.get(entry.reg()))
89             continue;
90         // The callee saved values come from the original stack, not the recovered stack.
91         // Hence, we read the values directly from the physical stack memory instead of
92         // going through context.stack().
93         ASSERT(!(entry.offset() % sizeof(uintptr_t)));
94         context.gpr(entry.reg().gpr()) = physicalStackFrame[entry.offset() / sizeof(uintptr_t)];
95     }
96 }
97
98 // Based on AssemblyHelpers::emitSaveCalleeSavesFor().
99 static void saveCalleeSavesFor(Context& context, CodeBlock* codeBlock)
100 {
101     auto& stack = context.stack();
102     ASSERT(codeBlock);
103
104     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
105     RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
106     unsigned registerCount = calleeSaves->size();
107
108     for (unsigned i = 0; i < registerCount; i++) {
109         RegisterAtOffset entry = calleeSaves->at(i);
110         if (dontSaveRegisters.get(entry.reg()))
111             continue;
112         stack.set(context.fp(), entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
113     }
114 }
115
116 // Based on AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer().
117 static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context& context)
118 {
119     VM& vm = *context.arg<VM*>();
120
121     RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
122     RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
123     unsigned registerCount = allCalleeSaves->size();
124
125     VMEntryRecord* entryRecord = vmEntryRecord(vm.topVMEntryFrame);
126     uintptr_t* calleeSaveBuffer = reinterpret_cast<uintptr_t*>(entryRecord->calleeSaveRegistersBuffer);
127
128     // Restore all callee saves.
129     for (unsigned i = 0; i < registerCount; i++) {
130         RegisterAtOffset entry = allCalleeSaves->at(i);
131         if (dontRestoreRegisters.get(entry.reg()))
132             continue;
133         size_t uintptrOffset = entry.offset() / sizeof(uintptr_t);
134         if (entry.reg().isGPR())
135             context.gpr(entry.reg().gpr()) = calleeSaveBuffer[uintptrOffset];
136         else
137             context.fpr(entry.reg().fpr()) = bitwise_cast<double>(calleeSaveBuffer[uintptrOffset]);
138     }
139 }
140
141 // Based on AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer().
142 static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context& context)
143 {
144     VM& vm = *context.arg<VM*>();
145     auto& stack = context.stack();
146
147     VMEntryRecord* entryRecord = vmEntryRecord(vm.topVMEntryFrame);
148     void* calleeSaveBuffer = entryRecord->calleeSaveRegistersBuffer;
149
150     RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
151     RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
152     unsigned registerCount = allCalleeSaves->size();
153
154     for (unsigned i = 0; i < registerCount; i++) {
155         RegisterAtOffset entry = allCalleeSaves->at(i);
156         if (dontCopyRegisters.get(entry.reg()))
157             continue;
158         if (entry.reg().isGPR())
159             stack.set(calleeSaveBuffer, entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
160         else
161             stack.set(calleeSaveBuffer, entry.offset(), context.fpr<uintptr_t>(entry.reg().fpr()));
162     }
163 }
164
165 // Based on AssemblyHelpers::emitSaveOrCopyCalleeSavesFor().
166 static void saveOrCopyCalleeSavesFor(Context& context, CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, bool wasCalledViaTailCall)
167 {
168     Frame frame(context.fp(), context.stack());
169     ASSERT(codeBlock);
170
171     RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
172     RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
173     unsigned registerCount = calleeSaves->size();
174
175     RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
176
177     for (unsigned i = 0; i < registerCount; i++) {
178         RegisterAtOffset entry = calleeSaves->at(i);
179         if (dontSaveRegisters.get(entry.reg()))
180             continue;
181
182         uintptr_t savedRegisterValue;
183
184         if (wasCalledViaTailCall && baselineCalleeSaves.get(entry.reg()))
185             savedRegisterValue = frame.get<uintptr_t>(entry.offset());
186         else
187             savedRegisterValue = context.gpr(entry.reg().gpr());
188
189         frame.set(offsetVirtualRegister.offsetInBytes() + entry.offset(), savedRegisterValue);
190     }
191 }
192 #else // not NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
193
194 static void restoreCalleeSavesFor(Context&, CodeBlock*) { }
195 static void saveCalleeSavesFor(Context&, CodeBlock*) { }
196 static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context&) { }
197 static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context&) { }
198 static void saveOrCopyCalleeSavesFor(Context&, CodeBlock*, VirtualRegister, bool) { }
199
200 #endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
201
202 static JSCell* createDirectArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
203 {
204     VM& vm = *context.arg<VM*>();
205
206     ASSERT(vm.heap.isDeferred());
207
208     if (inlineCallFrame)
209         codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
210
211     unsigned length = argumentCount - 1;
212     unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
213     DirectArguments* result = DirectArguments::create(
214         vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
215
216     result->callee().set(vm, result, callee);
217
218     void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
219     Frame frame(frameBase, context.stack());
220     for (unsigned i = length; i--;)
221         result->setIndexQuickly(vm, i, frame.argument(i));
222
223     return result;
224 }
225
226 static JSCell* createClonedArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
227 {
228     VM& vm = *context.arg<VM*>();
229     ExecState* exec = context.fp<ExecState*>();
230
231     ASSERT(vm.heap.isDeferred());
232
233     if (inlineCallFrame)
234         codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
235
236     unsigned length = argumentCount - 1;
237     ClonedArguments* result = ClonedArguments::createEmpty(
238         vm, codeBlock->globalObject()->clonedArgumentsStructure(), callee, length);
239
240     void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
241     Frame frame(frameBase, context.stack());
242     for (unsigned i = length; i--;)
243         result->putDirectIndex(exec, i, frame.argument(i));
244     return result;
245 }
246
247 OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
248     : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
249     , m_jsValueSource(jsValueSource)
250     , m_valueProfile(valueProfile)
251     , m_recoveryIndex(recoveryIndex)
252     , m_streamIndex(streamIndex)
253 {
254     bool canExit = jit->m_origin.exitOK;
255     if (!canExit && jit->m_currentNode) {
256         ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode);
257         canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions;
258     }
259     DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit);
260 }
261
262 static void emitRestoreArguments(Context& context, CodeBlock* codeBlock, DFG::JITCode* dfgJITCode, const Operands<ValueRecovery>& operands)
263 {
264     Frame frame(context.fp(), context.stack());
265
266     HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
267     for (size_t index = 0; index < operands.size(); ++index) {
268         const ValueRecovery& recovery = operands[index];
269         int operand = operands.operandForIndex(index);
270
271         if (recovery.technique() != DirectArgumentsThatWereNotCreated
272             && recovery.technique() != ClonedArgumentsThatWereNotCreated)
273             continue;
274
275         MinifiedID id = recovery.nodeID();
276         auto iter = alreadyAllocatedArguments.find(id);
277         if (iter != alreadyAllocatedArguments.end()) {
278             frame.setOperand(operand, frame.operand(iter->value));
279             continue;
280         }
281
282         InlineCallFrame* inlineCallFrame =
283             dfgJITCode->minifiedDFG.at(id)->inlineCallFrame();
284
285         int stackOffset;
286         if (inlineCallFrame)
287             stackOffset = inlineCallFrame->stackOffset;
288         else
289             stackOffset = 0;
290
291         JSFunction* callee;
292         if (!inlineCallFrame || inlineCallFrame->isClosureCall)
293             callee = jsCast<JSFunction*>(frame.operand(stackOffset + CallFrameSlot::callee).asCell());
294         else
295             callee = jsCast<JSFunction*>(inlineCallFrame->calleeRecovery.constant().asCell());
296
297         int32_t argumentCount;
298         if (!inlineCallFrame || inlineCallFrame->isVarargs())
299             argumentCount = frame.operand<int32_t>(stackOffset + CallFrameSlot::argumentCount, PayloadOffset);
300         else
301             argumentCount = inlineCallFrame->argumentCountIncludingThis;
302
303         JSCell* argumentsObject;
304         switch (recovery.technique()) {
305         case DirectArgumentsThatWereNotCreated:
306             argumentsObject = createDirectArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
307             break;
308         case ClonedArgumentsThatWereNotCreated:
309             argumentsObject = createClonedArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
310             break;
311         default:
312             RELEASE_ASSERT_NOT_REACHED();
313             break;
314         }
315         frame.setOperand(operand, JSValue(argumentsObject));
316
317         alreadyAllocatedArguments.add(id, operand);
318     }
319 }
320
321 void OSRExit::executeOSRExit(Context& context)
322 {
323     VM& vm = *context.arg<VM*>();
324     auto scope = DECLARE_THROW_SCOPE(vm);
325
326     ExecState* exec = context.fp<ExecState*>();
327     ASSERT(&exec->vm() == &vm);
328
329     if (vm.callFrameForCatch) {
330         exec = vm.callFrameForCatch;
331         context.fp() = exec;
332     }
333
334     CodeBlock* codeBlock = exec->codeBlock();
335     ASSERT(codeBlock);
336     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
337
338     // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
339     // really be profitable.
340     DeferGCForAWhile deferGC(vm.heap);
341
342     uint32_t exitIndex = vm.osrExitIndex;
343     DFG::JITCode* dfgJITCode = codeBlock->jitCode()->dfg();
344     OSRExit& exit = dfgJITCode->osrExit[exitIndex];
345
346     ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
347     ASSERT_UNUSED(scope, !exit.isExceptionHandler() || !!scope.exception());
348
349     if (UNLIKELY(!exit.exitState)) {
350         // We only need to execute this block once for each OSRExit record. The computed
351         // results will be cached in the OSRExitState record for use of the rest of the
352         // exit ramp code.
353
354         // Ensure we have baseline codeBlocks to OSR exit to.
355         prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
356
357         CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
358         ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
359
360         // Compute the value recoveries.
361         Operands<ValueRecovery> operands;
362         dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands);
363
364         SpeculationRecovery* recovery = nullptr;
365         if (exit.m_recoveryIndex != UINT_MAX)
366             recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex];
367
368         int32_t activeThreshold = baselineCodeBlock->adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp());
369         double adjustedThreshold = applyMemoryUsageHeuristicsAndConvertToInt(activeThreshold, baselineCodeBlock);
370         ASSERT(adjustedThreshold > 0);
371         adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
372
373         CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
374         Vector<BytecodeAndMachineOffset> decodedCodeMap;
375         codeBlockForExit->jitCodeMap()->decode(decodedCodeMap);
376
377         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
378
379         ASSERT(mapping);
380         ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
381
382         ptrdiff_t finalStackPointerOffset = codeBlockForExit->stackPointerOffset() * sizeof(Register);
383
384         void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
385
386         exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, recovery, finalStackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget));
387
388         if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
389             Profiler::Database& database = *vm.m_perBytecodeProfiler;
390             Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
391
392             Profiler::OSRExit* profilerExit = compilation->addOSRExit(
393                 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
394                 exit.m_kind, exit.m_kind == UncountableInvalidation);
395             exit.exitState->profilerExit = profilerExit;
396         }
397
398         if (UNLIKELY(Options::verboseOSR() || Options::verboseDFGOSRExit())) {
399             dataLogF("DFG OSR exit #%u (%s, %s) from %s, with operands = %s\n",
400                 exitIndex, toCString(exit.m_codeOrigin).data(),
401                 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
402                 toCString(ignoringContext<DumpContext>(operands)).data());
403         }
404     }
405
406     OSRExitState& exitState = *exit.exitState.get();
407     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
408     ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
409
410     Operands<ValueRecovery>& operands = exitState.operands;
411     SpeculationRecovery* recovery = exitState.recovery;
412
413     if (exit.m_kind == GenericUnwind) {
414         // We are acting as a defacto op_catch because we arrive here from genericUnwind().
415         // So, we must restore our call frame and stack pointer.
416         restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(context);
417         ASSERT(context.fp() == vm.callFrameForCatch);
418     }
419     context.sp() = context.fp<uint8_t*>() + (codeBlock->stackPointerOffset() * sizeof(Register));
420
421     ASSERT(!(context.fp<uintptr_t>() & 0x7));
422
423     if (exitState.profilerExit)
424         exitState.profilerExit->incCount();
425
426     auto& cpu = context.cpu;
427     Frame frame(cpu.fp(), context.stack());
428
429 #if USE(JSVALUE64)
430     ASSERT(cpu.gpr(GPRInfo::tagTypeNumberRegister) == TagTypeNumber);
431     ASSERT(cpu.gpr(GPRInfo::tagMaskRegister) == TagMask);
432 #endif
433
434     if (UNLIKELY(Options::printEachOSRExit()))
435         printOSRExit(context, vm.osrExitIndex, exit);
436
437     // Perform speculation recovery. This only comes into play when an operation
438     // starts mutating state before verifying the speculation it has already made.
439
440     if (recovery) {
441         switch (recovery->type()) {
442         case SpeculativeAdd:
443             cpu.gpr(recovery->dest()) = cpu.gpr<uint32_t>(recovery->dest()) - cpu.gpr<uint32_t>(recovery->src());
444 #if USE(JSVALUE64)
445             ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
446             cpu.gpr(recovery->dest()) |= TagTypeNumber;
447 #endif
448             break;
449
450         case SpeculativeAddImmediate:
451             cpu.gpr(recovery->dest()) = (cpu.gpr<uint32_t>(recovery->dest()) - recovery->immediate());
452 #if USE(JSVALUE64)
453             ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
454             cpu.gpr(recovery->dest()) |= TagTypeNumber;
455 #endif
456             break;
457
458         case BooleanSpeculationCheck:
459 #if USE(JSVALUE64)
460             cpu.gpr(recovery->dest()) = cpu.gpr(recovery->dest()) ^ ValueFalse;
461 #endif
462             break;
463
464         default:
465             break;
466         }
467     }
468
469     // Refine some array and/or value profile, if appropriate.
470
471     if (!!exit.m_jsValueSource) {
472         if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
473             // If the instruction that this originated from has an array profile, then
474             // refine it. If it doesn't, then do nothing. The latter could happen for
475             // hoisted checks, or checks emitted for operations that didn't have array
476             // profiling - either ops that aren't array accesses at all, or weren't
477             // known to be array acceses in the bytecode. The latter case is a FIXME
478             // while the former case is an outcome of a CheckStructure not knowing why
479             // it was emitted (could be either due to an inline cache of a property
480             // property access, or due to an array profile).
481
482             CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
483             CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
484             if (ArrayProfile* arrayProfile = profiledCodeBlock->getArrayProfile(codeOrigin.bytecodeIndex)) {
485                 Structure* structure = jsValueFor(cpu, exit.m_jsValueSource).asCell()->structure(vm);
486                 arrayProfile->observeStructure(structure);
487                 // FIXME: We should be able to use arrayModeFromStructure() to determine the observed ArrayMode here.
488                 // However, currently, doing so would result in a pdfjs preformance regression.
489                 // https://bugs.webkit.org/show_bug.cgi?id=176473
490                 arrayProfile->observeArrayMode(asArrayModes(structure->indexingType()));
491             }
492         }
493
494         if (MethodOfGettingAValueProfile profile = exit.m_valueProfile)
495             profile.reportValue(jsValueFor(cpu, exit.m_jsValueSource));
496     }
497
498     // Do all data format conversions and store the results into the stack.
499     // Note: we need to recover values before restoring callee save registers below
500     // because the recovery may rely on values in some of callee save registers.
501
502     int calleeSaveSpaceAsVirtualRegisters = static_cast<int>(baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters());
503     size_t numberOfOperands = operands.size();
504     for (size_t index = 0; index < numberOfOperands; ++index) {
505         const ValueRecovery& recovery = operands[index];
506         VirtualRegister reg = operands.virtualRegisterForIndex(index);
507
508         if (reg.isLocal() && reg.toLocal() < calleeSaveSpaceAsVirtualRegisters)
509             continue;
510
511         int operand = reg.offset();
512
513         switch (recovery.technique()) {
514         case DisplacedInJSStack:
515             frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
516             break;
517
518         case InFPR:
519             frame.setOperand(operand, cpu.fpr<JSValue>(recovery.fpr()));
520             break;
521
522 #if USE(JSVALUE64)
523         case InGPR:
524             frame.setOperand(operand, cpu.gpr<JSValue>(recovery.gpr()));
525             break;
526 #else
527         case InPair:
528             frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.tagGPR()), cpu.gpr<int32_t>(recovery.payloadGPR())));
529             break;
530 #endif
531
532         case UnboxedCellInGPR:
533             frame.setOperand(operand, JSValue(cpu.gpr<JSCell*>(recovery.gpr())));
534             break;
535
536         case CellDisplacedInJSStack:
537             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedCell()));
538             break;
539
540 #if USE(JSVALUE32_64)
541         case UnboxedBooleanInGPR:
542             frame.setOperand(operand, jsBoolean(cpu.gpr<bool>(recovery.gpr())));
543             break;
544 #endif
545
546         case BooleanDisplacedInJSStack:
547 #if USE(JSVALUE64)
548             frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
549 #else
550             frame.setOperand(operand, jsBoolean(exec->r(recovery.virtualRegister()).jsValue().payload()));
551 #endif
552             break;
553
554         case UnboxedInt32InGPR:
555             frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.gpr())));
556             break;
557
558         case Int32DisplacedInJSStack:
559             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt32()));
560             break;
561
562 #if USE(JSVALUE64)
563         case UnboxedInt52InGPR:
564             frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr()) >> JSValue::int52ShiftAmount));
565             break;
566
567         case Int52DisplacedInJSStack:
568             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt52()));
569             break;
570
571         case UnboxedStrictInt52InGPR:
572             frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr())));
573             break;
574
575         case StrictInt52DisplacedInJSStack:
576             frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedStrictInt52()));
577             break;
578 #endif
579
580         case UnboxedDoubleInFPR:
581             frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(cpu.fpr(recovery.fpr()))));
582             break;
583
584         case DoubleDisplacedInJSStack:
585             frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(exec->r(recovery.virtualRegister()).unboxedDouble())));
586             break;
587
588         case Constant:
589             frame.setOperand(operand, recovery.constant());
590             break;
591
592         case DirectArgumentsThatWereNotCreated:
593         case ClonedArgumentsThatWereNotCreated:
594             // Don't do this, yet.
595             break;
596
597         default:
598             RELEASE_ASSERT_NOT_REACHED();
599             break;
600         }
601     }
602
603     // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
604     // could toast some stack that the DFG used. We need to do it before storing to stack offsets
605     // used by baseline.
606     cpu.sp() = cpu.fp<uint8_t*>() - (codeBlock->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register));
607
608     // Restore the DFG callee saves and then save the ones the baseline JIT uses.
609     restoreCalleeSavesFor(context, codeBlock);
610     saveCalleeSavesFor(context, baselineCodeBlock);
611
612     // The tag registers are needed to materialize recoveries below.
613 #if USE(JSVALUE64)
614     cpu.gpr(GPRInfo::tagTypeNumberRegister) = TagTypeNumber;
615     cpu.gpr(GPRInfo::tagMaskRegister) = TagTypeNumber | TagBitTypeOther;
616 #endif
617
618     if (exit.isExceptionHandler())
619         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(context);
620
621     // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
622     // recoveries don't recursively refer to each other. But, we don't try to assume that they only
623     // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
624     // Note that we also roughly assume that the arguments might still be materialized outside of its
625     // inline call frame scope - but for now the DFG wouldn't do that.
626
627     emitRestoreArguments(context, codeBlock, dfgJITCode, operands);
628
629     // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
630     // that all new calls into this code will go to the new JIT, so the execute
631     // counter only affects call frames that performed OSR exit and call frames
632     // that were still executing the old JIT at the time of another call frame's
633     // OSR exit. We want to ensure that the following is true:
634     //
635     // (a) Code the performs an OSR exit gets a chance to reenter optimized
636     //     code eventually, since optimized code is faster. But we don't
637     //     want to do such reentery too aggressively (see (c) below).
638     //
639     // (b) If there is code on the call stack that is still running the old
640     //     JIT's code and has never OSR'd, then it should get a chance to
641     //     perform OSR entry despite the fact that we've exited.
642     //
643     // (c) Code the performs an OSR exit should not immediately retry OSR
644     //     entry, since both forms of OSR are expensive. OSR entry is
645     //     particularly expensive.
646     //
647     // (d) Frequent OSR failures, even those that do not result in the code
648     //     running in a hot loop, result in recompilation getting triggered.
649     //
650     // To ensure (c), we'd like to set the execute counter to
651     // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
652     // (a) and (b), since then every OSR exit would delay the opportunity for
653     // every call frame to perform OSR entry. Essentially, if OSR exit happens
654     // frequently and the function has few loops, then the counter will never
655     // become non-negative and OSR entry will never be triggered. OSR entry
656     // will only happen if a loop gets hot in the old JIT, which does a pretty
657     // good job of ensuring (a) and (b). But that doesn't take care of (d),
658     // since each speculation failure would reset the execute counter.
659     // So we check here if the number of speculation failures is significantly
660     // larger than the number of successes (we want 90% success rate), and if
661     // there have been a large enough number of failures. If so, we set the
662     // counter to 0; otherwise we set the counter to
663     // counterValueForOptimizeAfterWarmUp().
664
665     if (UNLIKELY(codeBlock->updateOSRExitCounterAndCheckIfNeedToReoptimize(exitState) == CodeBlock::OptimizeAction::ReoptimizeNow))
666         triggerReoptimizationNow(baselineCodeBlock, &exit);
667
668     reifyInlinedCallFrames(context, baselineCodeBlock, exit);
669     adjustAndJumpToTarget(context, vm, codeBlock, baselineCodeBlock, exit);
670 }
671
672 static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselineCodeBlock, const OSRExitBase& exit)
673 {
674     auto& cpu = context.cpu;
675     Frame frame(cpu.fp(), context.stack());
676
677     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
678     // in presence of inlined tail calls.
679     // https://bugs.webkit.org/show_bug.cgi?id=147511
680     ASSERT(outermostBaselineCodeBlock->jitType() == JITCode::BaselineJIT);
681     frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
682
683     const CodeOrigin* codeOrigin;
684     for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
685         InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
686         CodeBlock* baselineCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(*codeOrigin, outermostBaselineCodeBlock);
687         InlineCallFrame::Kind trueCallerCallKind;
688         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
689         void* callerFrame = cpu.fp();
690
691         if (!trueCaller) {
692             ASSERT(inlineCallFrame->isTail());
693             void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
694             frame.set<void*>(inlineCallFrame->returnPCOffset(), returnPC);
695             callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
696         } else {
697             CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
698             unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
699             void* jumpTarget = nullptr;
700
701             switch (trueCallerCallKind) {
702             case InlineCallFrame::Call:
703             case InlineCallFrame::Construct:
704             case InlineCallFrame::CallVarargs:
705             case InlineCallFrame::ConstructVarargs:
706             case InlineCallFrame::TailCall:
707             case InlineCallFrame::TailCallVarargs: {
708                 CallLinkInfo* callLinkInfo =
709                     baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
710                 RELEASE_ASSERT(callLinkInfo);
711
712                 jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
713                 break;
714             }
715
716             case InlineCallFrame::GetterCall:
717             case InlineCallFrame::SetterCall: {
718                 StructureStubInfo* stubInfo =
719                     baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
720                 RELEASE_ASSERT(stubInfo);
721
722                 jumpTarget = stubInfo->doneLocation().executableAddress();
723                 break;
724             }
725
726             default:
727                 RELEASE_ASSERT_NOT_REACHED();
728             }
729
730             if (trueCaller->inlineCallFrame)
731                 callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
732
733             frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
734         }
735
736         frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
737
738         // Restore the inline call frame's callee save registers.
739         // If this inlined frame is a tail call that will return back to the original caller, we need to
740         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
741         saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
742
743         if (!inlineCallFrame->isVarargs())
744             frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
745         ASSERT(callerFrame);
746         frame.set<void*>(inlineCallFrame->callerFrameOffset(), callerFrame);
747 #if USE(JSVALUE64)
748         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
749         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
750         if (!inlineCallFrame->isClosureCall)
751             frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
752 #else // USE(JSVALUE64) // so this is the 32-bit part
753         Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
754         uint32_t locationBits = CallSiteIndex(instruction).bits();
755         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
756         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
757         if (!inlineCallFrame->isClosureCall)
758             frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, PayloadOffset, inlineCallFrame->calleeConstant());
759 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
760     }
761
762     // Don't need to set the toplevel code origin if we only did inline tail calls
763     if (codeOrigin) {
764 #if USE(JSVALUE64)
765         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
766 #else
767         Instruction* instruction = outermostBaselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
768         uint32_t locationBits = CallSiteIndex(instruction).bits();
769 #endif
770         frame.setOperand<uint32_t>(CallFrameSlot::argumentCount, TagOffset, locationBits);
771     }
772 }
773
774 static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, OSRExit& exit)
775 {
776     OSRExitState* exitState = exit.exitState.get();
777
778     WTF::storeLoadFence(); // The optimizing compiler expects that the OSR exit mechanism will execute this fence.
779     vm.heap.writeBarrier(baselineCodeBlock);
780
781     // We barrier all inlined frames -- and not just the current inline stack --
782     // because we don't know which inlined function owns the value profile that
783     // we'll update when we exit. In the case of "f() { a(); b(); }", if both
784     // a and b are inlined, we might exit inside b due to a bad value loaded
785     // from a.
786     // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
787     // the value profile.
788     InlineCallFrameSet* inlineCallFrames = codeBlock->jitCode()->dfgCommon()->inlineCallFrames.get();
789     if (inlineCallFrames) {
790         for (InlineCallFrame* inlineCallFrame : *inlineCallFrames)
791             vm.heap.writeBarrier(inlineCallFrame->baselineCodeBlock.get());
792     }
793
794     if (exit.m_codeOrigin.inlineCallFrame)
795         context.fp() = context.fp<uint8_t*>() + exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
796
797     void* jumpTarget = exitState->jumpTarget;
798     ASSERT(jumpTarget);
799
800     context.sp() = context.fp<uint8_t*>() + exitState->stackPointerOffset;
801     if (exit.isExceptionHandler()) {
802         // Since we're jumping to op_catch, we need to set callFrameForCatch.
803         vm.callFrameForCatch = context.fp<ExecState*>();
804     }
805
806     vm.topCallFrame = context.fp<ExecState*>();
807     context.pc() = jumpTarget;
808 }
809
810 static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit& exit)
811 {
812     ExecState* exec = context.fp<ExecState*>();
813     CodeBlock* codeBlock = exec->codeBlock();
814     CodeBlock* alternative = codeBlock->alternative();
815     ExitKind kind = exit.m_kind;
816     unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
817
818     dataLog("Speculation failure in ", *codeBlock);
819     dataLog(" @ exit #", osrExitIndex, " (bc#", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
820     if (alternative) {
821         dataLog(
822             "executeCounter = ", alternative->jitExecuteCounter(),
823             ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
824             ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
825     } else
826         dataLog("no alternative code block (i.e. we've been jettisoned)");
827     dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
828     dataLog("    GPRs at time of exit:");
829     for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
830         GPRReg gpr = GPRInfo::toRegister(i);
831         dataLog(" ", context.gprName(gpr), ":", RawPointer(context.gpr<void*>(gpr)));
832     }
833     dataLog("\n");
834     dataLog("    FPRs at time of exit:");
835     for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
836         FPRReg fpr = FPRInfo::toRegister(i);
837         dataLog(" ", context.fprName(fpr), ":");
838         uint64_t bits = context.fpr<uint64_t>(fpr);
839         double value = context.fpr(fpr);
840         dataLogF("%llx:%lf", static_cast<long long>(bits), value);
841     }
842     dataLog("\n");
843 }
844
845 } } // namespace JSC::DFG
846
847 #endif // ENABLE(DFG_JIT)