dfg/DFGOSREntry.cpp
dfg/DFGOSREntrypointCreationPhase.cpp
dfg/DFGOSRExit.cpp
+ dfg/DFGOSRExit32_64.cpp
dfg/DFGOSRExitBase.cpp
- dfg/DFGOSRExitCompiler.cpp
- dfg/DFGOSRExitCompiler32_64.cpp
- dfg/DFGOSRExitCompiler64.cpp
dfg/DFGOSRExitCompilerCommon.cpp
dfg/DFGOSRExitFuzz.cpp
dfg/DFGOSRExitJumpPlaceholder.cpp
+2017-08-04 Mark Lam <mark.lam@apple.com>
+
+ Move DFG::OSRExitCompiler methods into DFG::OSRExit [step 2].
+ https://bugs.webkit.org/show_bug.cgi?id=175214
+ <rdar://problem/33733308>
+
+ Rubber-stamped by Michael Saboff.
+
+ Copy the 64-bit and common methods into DFGOSRExit.cpp, and delete the unused
+ DFGOSRExitCompiler files.
+
+ Also renamed DFGOSRExitCompiler32_64.cpp to DFGOSRExit32_64.cpp.
+
+ Also move debugOperationPrintSpeculationFailure() into DFGOSRExit.cpp. It's only
+ used by compileOSRExit(), and will be changed to not be a DFG operation function
+ when we use JIT probes for DFG OSR exits later in
+ https://bugs.webkit.org/show_bug.cgi?id=175144.
+
+ * CMakeLists.txt:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * dfg/DFGJITCompiler.cpp:
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::emitRestoreArguments):
+ (JSC::DFG::OSRExit::compileOSRExit):
+ (JSC::DFG::OSRExit::compileExit):
+ (JSC::DFG::OSRExit::debugOperationPrintSpeculationFailure):
+ * dfg/DFGOSRExit.h:
+ * dfg/DFGOSRExit32_64.cpp: Copied from Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp.
+ * dfg/DFGOSRExitCompiler.cpp: Removed.
+ * dfg/DFGOSRExitCompiler.h: Removed.
+ * dfg/DFGOSRExitCompiler32_64.cpp: Removed.
+ * dfg/DFGOSRExitCompiler64.cpp: Removed.
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGOperations.h:
+ * dfg/DFGThunks.cpp:
+
2017-08-04 Matt Baker <mattbaker@apple.com>
Web Inspector: capture async stack trace when workers/main context posts a message
0FBF158C19B7A53100695DD0 /* DFGBlockSet.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FBF158A19B7A53100695DD0 /* DFGBlockSet.cpp */; };
0FBF158D19B7A53100695DD0 /* DFGBlockSetInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FBF158B19B7A53100695DD0 /* DFGBlockSetInlines.h */; };
0FC0976A1468A6F700CF2442 /* DFGOSRExit.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FC097681468A6EF00CF2442 /* DFGOSRExit.h */; };
- 0FC0977114693AF500CF2442 /* DFGOSRExitCompiler.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FC0976F14693AEF00CF2442 /* DFGOSRExitCompiler.h */; };
- 0FC0977214693AF900CF2442 /* DFGOSRExitCompiler64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC0977014693AEF00CF2442 /* DFGOSRExitCompiler64.cpp */; };
- 0FC09776146943B000CF2442 /* DFGOSRExitCompiler32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC09775146943AD00CF2442 /* DFGOSRExitCompiler32_64.cpp */; };
+ 0FC09776146943B000CF2442 /* DFGOSRExit32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC09775146943AD00CF2442 /* DFGOSRExit32_64.cpp */; };
0FC09791146A6F7100CF2442 /* DFGOSRExit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC0978E146A6F6300CF2442 /* DFGOSRExit.cpp */; };
- 0FC09792146A6F7300CF2442 /* DFGOSRExitCompiler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC0978F146A6F6300CF2442 /* DFGOSRExitCompiler.cpp */; };
0FC097A1146B28CA00CF2442 /* DFGThunks.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC0979F146B28C700CF2442 /* DFGThunks.cpp */; };
0FC097A2146B28CC00CF2442 /* DFGThunks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FC097A0146B28C700CF2442 /* DFGThunks.h */; };
0FC20CB51852E2C600C9E954 /* DFGStrengthReductionPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC20CB31852E2C600C9E954 /* DFGStrengthReductionPhase.cpp */; };
0FBF158A19B7A53100695DD0 /* DFGBlockSet.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGBlockSet.cpp; path = dfg/DFGBlockSet.cpp; sourceTree = "<group>"; };
0FBF158B19B7A53100695DD0 /* DFGBlockSetInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGBlockSetInlines.h; path = dfg/DFGBlockSetInlines.h; sourceTree = "<group>"; };
0FC097681468A6EF00CF2442 /* DFGOSRExit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOSRExit.h; path = dfg/DFGOSRExit.h; sourceTree = "<group>"; };
- 0FC0976F14693AEF00CF2442 /* DFGOSRExitCompiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOSRExitCompiler.h; path = dfg/DFGOSRExitCompiler.h; sourceTree = "<group>"; };
- 0FC0977014693AEF00CF2442 /* DFGOSRExitCompiler64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExitCompiler64.cpp; path = dfg/DFGOSRExitCompiler64.cpp; sourceTree = "<group>"; };
- 0FC09775146943AD00CF2442 /* DFGOSRExitCompiler32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExitCompiler32_64.cpp; path = dfg/DFGOSRExitCompiler32_64.cpp; sourceTree = "<group>"; };
+ 0FC09775146943AD00CF2442 /* DFGOSRExit32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExit32_64.cpp; path = dfg/DFGOSRExit32_64.cpp; sourceTree = "<group>"; };
0FC0977E1469EBC400CF2442 /* DFGCommon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCommon.h; path = dfg/DFGCommon.h; sourceTree = "<group>"; };
0FC0978E146A6F6300CF2442 /* DFGOSRExit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExit.cpp; path = dfg/DFGOSRExit.cpp; sourceTree = "<group>"; };
- 0FC0978F146A6F6300CF2442 /* DFGOSRExitCompiler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExitCompiler.cpp; path = dfg/DFGOSRExitCompiler.cpp; sourceTree = "<group>"; };
0FC0979F146B28C700CF2442 /* DFGThunks.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGThunks.cpp; path = dfg/DFGThunks.cpp; sourceTree = "<group>"; };
0FC097A0146B28C700CF2442 /* DFGThunks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGThunks.h; path = dfg/DFGThunks.h; sourceTree = "<group>"; };
0FC20CB31852E2C600C9E954 /* DFGStrengthReductionPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGStrengthReductionPhase.cpp; path = dfg/DFGStrengthReductionPhase.cpp; sourceTree = "<group>"; };
0FD8A31E17D51F5700CA2C40 /* DFGOSREntrypointCreationPhase.h */,
0FC0978E146A6F6300CF2442 /* DFGOSRExit.cpp */,
0FC097681468A6EF00CF2442 /* DFGOSRExit.h */,
+ 0FC09775146943AD00CF2442 /* DFGOSRExit32_64.cpp */,
0F235BE717178E7300690C7F /* DFGOSRExitBase.cpp */,
0F235BE817178E7300690C7F /* DFGOSRExitBase.h */,
65987F2C167FE84B003C2F8D /* DFGOSRExitCompilationInfo.h */,
- 0FC0978F146A6F6300CF2442 /* DFGOSRExitCompiler.cpp */,
- 0FC0976F14693AEF00CF2442 /* DFGOSRExitCompiler.h */,
- 0FC09775146943AD00CF2442 /* DFGOSRExitCompiler32_64.cpp */,
- 0FC0977014693AEF00CF2442 /* DFGOSRExitCompiler64.cpp */,
0F7025A71714B0F800382C0E /* DFGOSRExitCompilerCommon.cpp */,
0F7025A81714B0F800382C0E /* DFGOSRExitCompilerCommon.h */,
0F392C871B46188400844728 /* DFGOSRExitFuzz.cpp */,
0FC0976A1468A6F700CF2442 /* DFGOSRExit.h in Headers */,
0F235BEC17178E7300690C7F /* DFGOSRExitBase.h in Headers */,
0FFB921C16D02F110055A5DB /* DFGOSRExitCompilationInfo.h in Headers */,
- 0FC0977114693AF500CF2442 /* DFGOSRExitCompiler.h in Headers */,
0F7025AA1714B0FC00382C0E /* DFGOSRExitCompilerCommon.h in Headers */,
0F392C8A1B46188400844728 /* DFGOSRExitFuzz.h in Headers */,
0FEFC9AB1681A3B600567F53 /* DFGOSRExitJumpPlaceholder.h in Headers */,
0FD8A32517D51F5700CA2C40 /* DFGOSREntrypointCreationPhase.cpp in Sources */,
0FC09791146A6F7100CF2442 /* DFGOSRExit.cpp in Sources */,
0F235BEB17178E7300690C7F /* DFGOSRExitBase.cpp in Sources */,
- 0FC09792146A6F7300CF2442 /* DFGOSRExitCompiler.cpp in Sources */,
- 0FC09776146943B000CF2442 /* DFGOSRExitCompiler32_64.cpp in Sources */,
- 0FC0977214693AF900CF2442 /* DFGOSRExitCompiler64.cpp in Sources */,
+ 0FC09776146943B000CF2442 /* DFGOSRExit32_64.cpp in Sources */,
0F7025A91714B0FA00382C0E /* DFGOSRExitCompilerCommon.cpp in Sources */,
0F392C891B46188400844728 /* DFGOSRExitFuzz.cpp in Sources */,
0FEFC9AA1681A3B300567F53 /* DFGOSRExitJumpPlaceholder.cpp in Sources */,
#include "DFGInlineCacheWrapperInlines.h"
#include "DFGJITCode.h"
#include "DFGJITFinalizer.h"
-#include "DFGOSRExitCompiler.h"
+#include "DFGOSRExit.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
#include "DFGSlowPathGenerator.h"
#include "AssemblyHelpers.h"
#include "DFGGraph.h"
#include "DFGMayExit.h"
+#include "DFGOSRExitCompilerCommon.h"
+#include "DFGOSRExitPreparation.h"
+#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
+#include "FrameTracers.h"
#include "JSCInlines.h"
+#include "OperandsInlines.h"
namespace JSC { namespace DFG {
m_patchableCodeOffset = linkBuffer.offsetOf(label);
}
+void OSRExit::emitRestoreArguments(CCallHelpers& jit, const Operands<ValueRecovery>& operands)
+{
+ HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ int operand = operands.operandForIndex(index);
+
+ if (recovery.technique() != DirectArgumentsThatWereNotCreated
+ && recovery.technique() != ClonedArgumentsThatWereNotCreated)
+ continue;
+
+ MinifiedID id = recovery.nodeID();
+ auto iter = alreadyAllocatedArguments.find(id);
+ if (iter != alreadyAllocatedArguments.end()) {
+ JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
+ jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
+ jit.storeValue(regs, CCallHelpers::addressFor(operand));
+ continue;
+ }
+
+ InlineCallFrame* inlineCallFrame =
+ jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
+
+ int stackOffset;
+ if (inlineCallFrame)
+ stackOffset = inlineCallFrame->stackOffset;
+ else
+ stackOffset = 0;
+
+ if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
+ jit.loadPtr(
+ AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
+ GPRInfo::regT0);
+ } else {
+ jit.move(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
+ GPRInfo::regT0);
+ }
+
+ if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
+ jit.load32(
+ AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCount),
+ GPRInfo::regT1);
+ } else {
+ jit.move(
+ AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
+ GPRInfo::regT1);
+ }
+
+ jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
+ switch (recovery.technique()) {
+ case DirectArgumentsThatWereNotCreated:
+ jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
+ break;
+ case ClonedArgumentsThatWereNotCreated:
+ jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ jit.call(GPRInfo::nonArgGPR0);
+ jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
+
+ alreadyAllocatedArguments.add(id, operand);
+ }
+}
+
+void JIT_OPERATION OSRExit::compileOSRExit(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ if (vm->callFrameForCatch)
+ RELEASE_ASSERT(vm->callFrameForCatch == exec);
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
+
+ // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
+ // really be profitable.
+ DeferGCForAWhile deferGC(vm->heap);
+
+ uint32_t exitIndex = vm->osrExitIndex;
+ OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
+
+ if (vm->callFrameForCatch)
+ ASSERT(exit.m_kind == GenericUnwind);
+ if (exit.isExceptionHandler())
+ ASSERT_UNUSED(scope, !!scope.exception());
+
+ prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
+ // Compute the value recoveries.
+ Operands<ValueRecovery> operands;
+ codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
+
+ SpeculationRecovery* recovery = 0;
+ if (exit.m_recoveryIndex != UINT_MAX)
+ recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
+
+ {
+ CCallHelpers jit(codeBlock);
+
+ if (exit.m_kind == GenericUnwind) {
+ // We are acting as a defacto op_catch because we arrive here from genericUnwind().
+ // So, we must restore our call frame and stack pointer.
+ jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
+ jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+ }
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ jit.jitAssertHasValidCallFrame();
+
+ if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
+ Profiler::Database& database = *vm->m_perBytecodeProfiler;
+ Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
+
+ Profiler::OSRExit* profilerExit = compilation->addOSRExit(
+ exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
+ exit.m_kind, exit.m_kind == UncountableInvalidation);
+ jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
+ }
+
+ compileExit(jit, *vm, exit, operands, recovery);
+
+ LinkBuffer patchBuffer(jit, codeBlock);
+ exit.m_code = FINALIZE_CODE_IF(
+ shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
+ patchBuffer,
+ ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
+ exitIndex, toCString(exit.m_codeOrigin).data(),
+ exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
+ toCString(ignoringContext<DumpContext>(operands)).data()));
+ }
+
+ MacroAssembler::repatchJump(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
+
+ vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
+}
+
+#if USE(JSVALUE64)
+
+void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
+{
+ jit.jitAssertTagsInPlace();
+
+ // Pro-forma stuff.
+ if (Options::printEachOSRExit()) {
+ SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
+ debugInfo->codeBlock = jit.codeBlock();
+ debugInfo->kind = exit.m_kind;
+ debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
+
+ jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
+ }
+
+ // Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
+
+ if (recovery) {
+ switch (recovery->type()) {
+ case SpeculativeAdd:
+ jit.sub32(recovery->src(), recovery->dest());
+ jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
+ break;
+
+ case SpeculativeAddImmediate:
+ jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest());
+ jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
+ break;
+
+ case BooleanSpeculationCheck:
+ jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Refine some array and/or value profile, if appropriate.
+
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister;
+ if (exit.m_jsValueSource.isAddress())
+ usedRegister = exit.m_jsValueSource.base();
+ else
+ usedRegister = exit.m_jsValueSource.gpr();
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
+
+ if (isARM64()) {
+ jit.pushToSave(scratch1);
+ jit.pushToSave(scratch2);
+ } else {
+ jit.push(scratch1);
+ jit.push(scratch2);
+ }
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.gpr();
+
+ jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
+ jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
+ jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeAndMiscOffset()), scratch1);
+ jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ jit.lshift32(scratch1, scratch2);
+ jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+ if (isARM64()) {
+ jit.popToRestore(scratch2);
+ jit.popToRestore(scratch1);
+ } else {
+ jit.pop(scratch2);
+ jit.pop(scratch1);
+ }
+ }
+ }
+
+ if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) {
+ if (exit.m_jsValueSource.isAddress()) {
+ // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
+ // since we know how to restore it.
+ jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
+ profile.emitReportValue(jit, JSValueRegs(GPRInfo::tagTypeNumberRegister));
+ jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+ } else
+ profile.emitReportValue(jit, JSValueRegs(exit.m_jsValueSource.gpr()));
+ }
+ }
+
+ // What follows is an intentionally simple OSR exit implementation that generates
+ // fairly poor code but is very easy to hack. In particular, it dumps all state that
+ // needs conversion into a scratch buffer so that in step 6, where we actually do the
+ // conversions, we know that all temp registers are free to use and the variable is
+ // definitely in a well-known spot in the scratch buffer regardless of whether it had
+ // originally been in a register or spilled. This allows us to decouple "where was
+ // the variable" from "how was it represented". Consider that the
+ // Int32DisplacedInJSStack recovery: it tells us that the value is in a
+ // particular place and that that place holds an unboxed int32. We have two different
+ // places that a value could be (displaced, register) and a bunch of different
+ // ways of representing a value. The number of recoveries is two * a bunch. The code
+ // below means that we have to have two + a bunch cases rather than two * a bunch.
+ // Once we have loaded the value from wherever it was, the reboxing is the same
+ // regardless of its location. Likewise, before we do the reboxing, the way we get to
+ // the value (i.e. where we load it from) is the same regardless of its type. Because
+ // the code below always dumps everything into a scratch buffer first, the two
+ // questions become orthogonal, which simplifies adding new types and adding new
+ // locations.
+ //
+ // This raises the question: does using such a suboptimal implementation of OSR exit,
+ // where we always emit code to dump all state into a scratch buffer only to then
+ // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
+ // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
+ // taken more than ~100 times, we jettison the DFG code block along with all of its
+ // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
+ // execute frequently enough for the codegen to matter that much. It probably matters
+ // enough that we don't want to turn this into some super-slow function call, but so
+ // long as we're generating straight-line code, that code can be pretty bad. Also
+ // because we tend to exit only along one OSR exit from any DFG code block - that's an
+ // empirical result that we're extremely confident about - the code size of this
+ // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
+ // harmful to the system: it probably won't reduce either net memory usage or net
+ // execution time. It will only prevent us from cleanly decoupling "where was the
+ // variable" from "how was it represented", which will make it more difficult to add
+ // features in the future and it will make it harder to reason about bugs.
+
+ // Save all state from GPRs into the scratch buffer.
+
+ ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
+ EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedInt52InGPR:
+ case UnboxedStrictInt52InGPR:
+ case UnboxedCellInGPR:
+ jit.store64(recovery.gpr(), scratch + index);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // And voila, all GPRs are free to reuse.
+
+ // Save all state from FPRs into the scratch buffer.
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+
+ switch (recovery.technique()) {
+ case UnboxedDoubleInFPR:
+ case InFPR:
+ jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
+ jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Now, all FPRs are also free.
+
+ // Save all state from the stack into the scratch buffer. For simplicity we
+ // do this even for state that's already in the right place on the stack.
+ // It makes things simpler later.
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+
+ switch (recovery.technique()) {
+ case DisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack:
+ jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, scratch + index);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
+ // could toast some stack that the DFG used. We need to do it before storing to stack offsets
+ // used by baseline.
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(
+ -jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
+ CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
+
+ // Restore the DFG callee saves and then save the ones the baseline JIT uses.
+ jit.emitRestoreCalleeSaves();
+ jit.emitSaveCalleeSavesFor(jit.baselineCodeBlock());
+
+ // The tag registers are needed to materialize recoveries below.
+ jit.emitMaterializeTagCheckRegisters();
+
+ if (exit.isExceptionHandler())
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
+
+ // Do all data format conversions and store the results into the stack.
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ VirtualRegister reg = operands.virtualRegisterForIndex(index);
+
+ if (reg.isLocal() && reg.toLocal() < static_cast<int>(jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters()))
+ continue;
+
+ int operand = reg.offset();
+
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedCellInGPR:
+ case DisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ case InFPR:
+ jit.load64(scratch + index, GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ break;
+
+ case UnboxedInt32InGPR:
+ case Int32DisplacedInJSStack:
+ jit.load64(scratch + index, GPRInfo::regT0);
+ jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
+ jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ break;
+
+ case UnboxedInt52InGPR:
+ case Int52DisplacedInJSStack:
+ jit.load64(scratch + index, GPRInfo::regT0);
+ jit.rshift64(
+ AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
+ jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ break;
+
+ case UnboxedStrictInt52InGPR:
+ case StrictInt52DisplacedInJSStack:
+ jit.load64(scratch + index, GPRInfo::regT0);
+ jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ break;
+
+ case UnboxedDoubleInFPR:
+ case DoubleDisplacedInJSStack:
+ jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
+ jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
+ jit.purifyNaN(FPRInfo::fpRegT0);
+ jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ break;
+
+ case Constant:
+ jit.store64(
+ AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
+ AssemblyHelpers::addressFor(operand));
+ break;
+
+ case DirectArgumentsThatWereNotCreated:
+ case ClonedArgumentsThatWereNotCreated:
+ // Don't do this, yet.
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
+ // recoveries don't recursively refer to each other. But, we don't try to assume that they only
+ // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
+ // Note that we also roughly assume that the arguments might still be materialized outside of its
+ // inline call frame scope - but for now the DFG wouldn't do that.
+
+ emitRestoreArguments(jit, operands);
+
+ // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
+ //
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
+ //
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
+ //
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
+ //
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
+ //
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
+
+ handleExitCounts(jit, exit);
+
+ // Reify inlined call frames.
+
+ reifyInlinedCallFrames(jit, exit);
+
+ // And finish.
+ adjustAndJumpToTarget(vm, jit, exit);
+}
+#endif // USE(JSVALUE64)
+
+void JIT_OPERATION OSRExit::debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw, void* scratch)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
+ CodeBlock* codeBlock = debugInfo->codeBlock;
+ CodeBlock* alternative = codeBlock->alternative();
+ dataLog("Speculation failure in ", *codeBlock);
+ dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
+ if (alternative) {
+ dataLog(
+ "executeCounter = ", alternative->jitExecuteCounter(),
+ ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
+ ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
+ } else
+ dataLog("no alternative code block (i.e. we've been jettisoned)");
+ dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
+ dataLog(" GPRs at time of exit:");
+ char* scratchPointer = static_cast<char*>(scratch);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ GPRReg gpr = GPRInfo::toRegister(i);
+ dataLog(" ", GPRInfo::debugName(gpr), ":", RawPointer(*reinterpret_cast_ptr<void**>(scratchPointer)));
+ scratchPointer += sizeof(EncodedJSValue);
+ }
+ dataLog("\n");
+ dataLog(" FPRs at time of exit:");
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ FPRReg fpr = FPRInfo::toRegister(i);
+ dataLog(" ", FPRInfo::debugName(fpr), ":");
+ uint64_t bits = *reinterpret_cast_ptr<uint64_t*>(scratchPointer);
+ double value = *reinterpret_cast_ptr<double*>(scratchPointer);
+ dataLogF("%llx:%lf", static_cast<long long>(bits), value);
+ scratchPointer += sizeof(EncodedJSValue);
+ }
+ dataLog("\n");
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
private:
static void compileExit(CCallHelpers&, VM&, const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*);
static void emitRestoreArguments(CCallHelpers&, const Operands<ValueRecovery>&);
+ static void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*, void*) WTF_INTERNAL;
};
struct SpeculationFailureDebugInfo {
*/
#include "config.h"
-#include "DFGOSRExitCompiler.h"
+#include "DFGOSRExit.h"
#if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
-#include "DFGOperations.h"
#include "DFGOSRExitCompilerCommon.h"
+#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
#include "JSCInlines.h"
#include <wtf/DataLog.h>
+++ /dev/null
-/*
- * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGOSRExitCompiler.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "CallFrame.h"
-#include "DFGCommon.h"
-#include "DFGJITCode.h"
-#include "DFGOSRExitPreparation.h"
-#include "DFGOperations.h"
-#include "LinkBuffer.h"
-#include "OperandsInlines.h"
-#include "JSCInlines.h"
-#include <wtf/StringPrintStream.h>
-
-namespace JSC { namespace DFG {
-
-void OSRExit::emitRestoreArguments(CCallHelpers& jit, const Operands<ValueRecovery>& operands)
-{
- HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
- int operand = operands.operandForIndex(index);
-
- if (recovery.technique() != DirectArgumentsThatWereNotCreated
- && recovery.technique() != ClonedArgumentsThatWereNotCreated)
- continue;
-
- MinifiedID id = recovery.nodeID();
- auto iter = alreadyAllocatedArguments.find(id);
- if (iter != alreadyAllocatedArguments.end()) {
- JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
- jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
- jit.storeValue(regs, CCallHelpers::addressFor(operand));
- continue;
- }
-
- InlineCallFrame* inlineCallFrame =
- jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
-
- int stackOffset;
- if (inlineCallFrame)
- stackOffset = inlineCallFrame->stackOffset;
- else
- stackOffset = 0;
-
- if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
- jit.loadPtr(
- AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
- GPRInfo::regT0);
- } else {
- jit.move(
- AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
- GPRInfo::regT0);
- }
-
- if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
- jit.load32(
- AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCount),
- GPRInfo::regT1);
- } else {
- jit.move(
- AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
- GPRInfo::regT1);
- }
-
- jit.setupArgumentsWithExecState(
- AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
- switch (recovery.technique()) {
- case DirectArgumentsThatWereNotCreated:
- jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
- break;
- case ClonedArgumentsThatWereNotCreated:
- jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- jit.call(GPRInfo::nonArgGPR0);
- jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
-
- alreadyAllocatedArguments.add(id, operand);
- }
-}
-
-void OSRExit::compileOSRExit(ExecState* exec)
-{
- VM* vm = &exec->vm();
- auto scope = DECLARE_THROW_SCOPE(*vm);
-
- if (vm->callFrameForCatch)
- RELEASE_ASSERT(vm->callFrameForCatch == exec);
-
- CodeBlock* codeBlock = exec->codeBlock();
- ASSERT(codeBlock);
- ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
-
- // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
- // really be profitable.
- DeferGCForAWhile deferGC(vm->heap);
-
- uint32_t exitIndex = vm->osrExitIndex;
- OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
-
- if (vm->callFrameForCatch)
- ASSERT(exit.m_kind == GenericUnwind);
- if (exit.isExceptionHandler())
- ASSERT_UNUSED(scope, !!scope.exception());
-
-
- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
-
- // Compute the value recoveries.
- Operands<ValueRecovery> operands;
- codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
-
- SpeculationRecovery* recovery = 0;
- if (exit.m_recoveryIndex != UINT_MAX)
- recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
-
- {
- CCallHelpers jit(codeBlock);
-
- if (exit.m_kind == GenericUnwind) {
- // We are acting as a defacto op_catch because we arrive here from genericUnwind().
- // So, we must restore our call frame and stack pointer.
- jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
- jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
- }
- jit.addPtr(
- CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
- GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
-
- jit.jitAssertHasValidCallFrame();
-
- if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
- Profiler::Database& database = *vm->m_perBytecodeProfiler;
- Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
-
- Profiler::OSRExit* profilerExit = compilation->addOSRExit(
- exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
- exit.m_kind, exit.m_kind == UncountableInvalidation);
- jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
- }
-
- compileExit(jit, *vm, exit, operands, recovery);
-
- LinkBuffer patchBuffer(jit, codeBlock);
- exit.m_code = FINALIZE_CODE_IF(
- shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
- patchBuffer,
- ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
- exitIndex, toCString(exit.m_codeOrigin).data(),
- exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
- toCString(ignoringContext<DumpContext>(operands)).data()));
- }
-
- MacroAssembler::repatchJump(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
-
- vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
+++ /dev/null
-/*
- * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#if ENABLE(DFG_JIT)
-
-#include "CCallHelpers.h"
-#include "DFGOSRExit.h"
-#include "Operands.h"
-
-#endif // ENABLE(DFG_JIT)
+++ /dev/null
-/*
- * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGOSRExitCompiler.h"
-
-#if ENABLE(DFG_JIT) && USE(JSVALUE64)
-
-#include "DFGOperations.h"
-#include "DFGOSRExitCompilerCommon.h"
-#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
-#include "VirtualRegister.h"
-
-#include <wtf/DataLog.h>
-
-namespace JSC { namespace DFG {
-
-void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
-{
- jit.jitAssertTagsInPlace();
-
- // Pro-forma stuff.
- if (Options::printEachOSRExit()) {
- SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
- debugInfo->codeBlock = jit.codeBlock();
- debugInfo->kind = exit.m_kind;
- debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
-
- jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
- }
-
- // Perform speculation recovery. This only comes into play when an operation
- // starts mutating state before verifying the speculation it has already made.
-
- if (recovery) {
- switch (recovery->type()) {
- case SpeculativeAdd:
- jit.sub32(recovery->src(), recovery->dest());
- jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
- break;
-
- case SpeculativeAddImmediate:
- jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest());
- jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
- break;
-
- case BooleanSpeculationCheck:
- jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
- break;
-
- default:
- break;
- }
- }
-
- // Refine some array and/or value profile, if appropriate.
-
- if (!!exit.m_jsValueSource) {
- if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
- // If the instruction that this originated from has an array profile, then
- // refine it. If it doesn't, then do nothing. The latter could happen for
- // hoisted checks, or checks emitted for operations that didn't have array
- // profiling - either ops that aren't array accesses at all, or weren't
- // known to be array acceses in the bytecode. The latter case is a FIXME
- // while the former case is an outcome of a CheckStructure not knowing why
- // it was emitted (could be either due to an inline cache of a property
- // property access, or due to an array profile).
-
- CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
- if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
- GPRReg usedRegister;
- if (exit.m_jsValueSource.isAddress())
- usedRegister = exit.m_jsValueSource.base();
- else
- usedRegister = exit.m_jsValueSource.gpr();
-
- GPRReg scratch1;
- GPRReg scratch2;
- scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
- scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
-
- if (isARM64()) {
- jit.pushToSave(scratch1);
- jit.pushToSave(scratch2);
- } else {
- jit.push(scratch1);
- jit.push(scratch2);
- }
-
- GPRReg value;
- if (exit.m_jsValueSource.isAddress()) {
- value = scratch1;
- jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
- } else
- value = exit.m_jsValueSource.gpr();
-
- jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
- jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
- jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeAndMiscOffset()), scratch1);
- jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
- jit.lshift32(scratch1, scratch2);
- jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
-
- if (isARM64()) {
- jit.popToRestore(scratch2);
- jit.popToRestore(scratch1);
- } else {
- jit.pop(scratch2);
- jit.pop(scratch1);
- }
- }
- }
-
- if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) {
- if (exit.m_jsValueSource.isAddress()) {
- // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
- // since we know how to restore it.
- jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
- profile.emitReportValue(jit, JSValueRegs(GPRInfo::tagTypeNumberRegister));
- jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
- } else
- profile.emitReportValue(jit, JSValueRegs(exit.m_jsValueSource.gpr()));
- }
- }
-
- // What follows is an intentionally simple OSR exit implementation that generates
- // fairly poor code but is very easy to hack. In particular, it dumps all state that
- // needs conversion into a scratch buffer so that in step 6, where we actually do the
- // conversions, we know that all temp registers are free to use and the variable is
- // definitely in a well-known spot in the scratch buffer regardless of whether it had
- // originally been in a register or spilled. This allows us to decouple "where was
- // the variable" from "how was it represented". Consider that the
- // Int32DisplacedInJSStack recovery: it tells us that the value is in a
- // particular place and that that place holds an unboxed int32. We have two different
- // places that a value could be (displaced, register) and a bunch of different
- // ways of representing a value. The number of recoveries is two * a bunch. The code
- // below means that we have to have two + a bunch cases rather than two * a bunch.
- // Once we have loaded the value from wherever it was, the reboxing is the same
- // regardless of its location. Likewise, before we do the reboxing, the way we get to
- // the value (i.e. where we load it from) is the same regardless of its type. Because
- // the code below always dumps everything into a scratch buffer first, the two
- // questions become orthogonal, which simplifies adding new types and adding new
- // locations.
- //
- // This raises the question: does using such a suboptimal implementation of OSR exit,
- // where we always emit code to dump all state into a scratch buffer only to then
- // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
- // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
- // taken more than ~100 times, we jettison the DFG code block along with all of its
- // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
- // execute frequently enough for the codegen to matter that much. It probably matters
- // enough that we don't want to turn this into some super-slow function call, but so
- // long as we're generating straight-line code, that code can be pretty bad. Also
- // because we tend to exit only along one OSR exit from any DFG code block - that's an
- // empirical result that we're extremely confident about - the code size of this
- // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
- // harmful to the system: it probably won't reduce either net memory usage or net
- // execution time. It will only prevent us from cleanly decoupling "where was the
- // variable" from "how was it represented", which will make it more difficult to add
- // features in the future and it will make it harder to reason about bugs.
-
- // Save all state from GPRs into the scratch buffer.
-
- ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
- EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
-
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
-
- switch (recovery.technique()) {
- case InGPR:
- case UnboxedInt32InGPR:
- case UnboxedInt52InGPR:
- case UnboxedStrictInt52InGPR:
- case UnboxedCellInGPR:
- jit.store64(recovery.gpr(), scratch + index);
- break;
-
- default:
- break;
- }
- }
-
- // And voila, all GPRs are free to reuse.
-
- // Save all state from FPRs into the scratch buffer.
-
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
-
- switch (recovery.technique()) {
- case UnboxedDoubleInFPR:
- case InFPR:
- jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
- break;
-
- default:
- break;
- }
- }
-
- // Now, all FPRs are also free.
-
- // Save all state from the stack into the scratch buffer. For simplicity we
- // do this even for state that's already in the right place on the stack.
- // It makes things simpler later.
-
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
-
- switch (recovery.technique()) {
- case DisplacedInJSStack:
- case CellDisplacedInJSStack:
- case BooleanDisplacedInJSStack:
- case Int32DisplacedInJSStack:
- case DoubleDisplacedInJSStack:
- case Int52DisplacedInJSStack:
- case StrictInt52DisplacedInJSStack:
- jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- jit.store64(GPRInfo::regT0, scratch + index);
- break;
-
- default:
- break;
- }
- }
-
- // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
- // could toast some stack that the DFG used. We need to do it before storing to stack offsets
- // used by baseline.
- jit.addPtr(
- CCallHelpers::TrustedImm32(
- -jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
- CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
-
- // Restore the DFG callee saves and then save the ones the baseline JIT uses.
- jit.emitRestoreCalleeSaves();
- jit.emitSaveCalleeSavesFor(jit.baselineCodeBlock());
-
- // The tag registers are needed to materialize recoveries below.
- jit.emitMaterializeTagCheckRegisters();
-
- if (exit.isExceptionHandler())
- jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
-
- // Do all data format conversions and store the results into the stack.
-
- for (size_t index = 0; index < operands.size(); ++index) {
- const ValueRecovery& recovery = operands[index];
- VirtualRegister reg = operands.virtualRegisterForIndex(index);
-
- if (reg.isLocal() && reg.toLocal() < static_cast<int>(jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters()))
- continue;
-
- int operand = reg.offset();
-
- switch (recovery.technique()) {
- case InGPR:
- case UnboxedCellInGPR:
- case DisplacedInJSStack:
- case CellDisplacedInJSStack:
- case BooleanDisplacedInJSStack:
- case InFPR:
- jit.load64(scratch + index, GPRInfo::regT0);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
- break;
-
- case UnboxedInt32InGPR:
- case Int32DisplacedInJSStack:
- jit.load64(scratch + index, GPRInfo::regT0);
- jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
- jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
- break;
-
- case UnboxedInt52InGPR:
- case Int52DisplacedInJSStack:
- jit.load64(scratch + index, GPRInfo::regT0);
- jit.rshift64(
- AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
- jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
- break;
-
- case UnboxedStrictInt52InGPR:
- case StrictInt52DisplacedInJSStack:
- jit.load64(scratch + index, GPRInfo::regT0);
- jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
- break;
-
- case UnboxedDoubleInFPR:
- case DoubleDisplacedInJSStack:
- jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
- jit.purifyNaN(FPRInfo::fpRegT0);
- jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
- break;
-
- case Constant:
- jit.store64(
- AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
- AssemblyHelpers::addressFor(operand));
- break;
-
- case DirectArgumentsThatWereNotCreated:
- case ClonedArgumentsThatWereNotCreated:
- // Don't do this, yet.
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
- // recoveries don't recursively refer to each other. But, we don't try to assume that they only
- // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
- // Note that we also roughly assume that the arguments might still be materialized outside of its
- // inline call frame scope - but for now the DFG wouldn't do that.
-
- emitRestoreArguments(jit, operands);
-
- // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
- // that all new calls into this code will go to the new JIT, so the execute
- // counter only affects call frames that performed OSR exit and call frames
- // that were still executing the old JIT at the time of another call frame's
- // OSR exit. We want to ensure that the following is true:
- //
- // (a) Code the performs an OSR exit gets a chance to reenter optimized
- // code eventually, since optimized code is faster. But we don't
- // want to do such reentery too aggressively (see (c) below).
- //
- // (b) If there is code on the call stack that is still running the old
- // JIT's code and has never OSR'd, then it should get a chance to
- // perform OSR entry despite the fact that we've exited.
- //
- // (c) Code the performs an OSR exit should not immediately retry OSR
- // entry, since both forms of OSR are expensive. OSR entry is
- // particularly expensive.
- //
- // (d) Frequent OSR failures, even those that do not result in the code
- // running in a hot loop, result in recompilation getting triggered.
- //
- // To ensure (c), we'd like to set the execute counter to
- // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
- // (a) and (b), since then every OSR exit would delay the opportunity for
- // every call frame to perform OSR entry. Essentially, if OSR exit happens
- // frequently and the function has few loops, then the counter will never
- // become non-negative and OSR entry will never be triggered. OSR entry
- // will only happen if a loop gets hot in the old JIT, which does a pretty
- // good job of ensuring (a) and (b). But that doesn't take care of (d),
- // since each speculation failure would reset the execute counter.
- // So we check here if the number of speculation failures is significantly
- // larger than the number of successes (we want 90% success rate), and if
- // there have been a large enough number of failures. If so, we set the
- // counter to 0; otherwise we set the counter to
- // counterValueForOptimizeAfterWarmUp().
-
- handleExitCounts(jit, exit);
-
- // Reify inlined call frames.
-
- reifyInlinedCallFrames(jit, exit);
-
- // And finish.
- adjustAndJumpToTarget(vm, jit, exit);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT) && USE(JSVALUE64)
vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside DFG."));
}
-void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw, void* scratch)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
- CodeBlock* codeBlock = debugInfo->codeBlock;
- CodeBlock* alternative = codeBlock->alternative();
- dataLog("Speculation failure in ", *codeBlock);
- dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
- if (alternative) {
- dataLog(
- "executeCounter = ", alternative->jitExecuteCounter(),
- ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
- ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
- } else
- dataLog("no alternative code block (i.e. we've been jettisoned)");
- dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
- dataLog(" GPRs at time of exit:");
- char* scratchPointer = static_cast<char*>(scratch);
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
- GPRReg gpr = GPRInfo::toRegister(i);
- dataLog(" ", GPRInfo::debugName(gpr), ":", RawPointer(*reinterpret_cast_ptr<void**>(scratchPointer)));
- scratchPointer += sizeof(EncodedJSValue);
- }
- dataLog("\n");
- dataLog(" FPRs at time of exit:");
- for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
- FPRReg fpr = FPRInfo::toRegister(i);
- dataLog(" ", FPRInfo::debugName(fpr), ":");
- uint64_t bits = *reinterpret_cast_ptr<uint64_t*>(scratchPointer);
- double value = *reinterpret_cast_ptr<double*>(scratchPointer);
- dataLogF("%llx:%lf", static_cast<long long>(bits), value);
- scratchPointer += sizeof(EncodedJSValue);
- }
- dataLog("\n");
-}
-
EncodedJSValue JIT_OPERATION operationResolveScopeForHoistingFuncDeclInEval(ExecState* exec, JSScope* scope, UniquedStringImpl* impl)
{
VM& vm = exec->vm();
/*
- * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
void JIT_OPERATION operationProcessTypeProfilerLogDFG(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*, void*) WTF_INTERNAL;
-
void JIT_OPERATION triggerReoptimizationNow(CodeBlock*, OSRExitBase*) WTF_INTERNAL;
#if USE(JSVALUE32_64)
#if ENABLE(DFG_JIT)
#include "CCallHelpers.h"
-#include "DFGOSRExitCompiler.h"
#include "DFGJITCode.h"
+#include "DFGOSRExit.h"
#include "FPRInfo.h"
#include "GPRInfo.h"
#include "LinkBuffer.h"