Revert bytecode checkpoints since it breaks watch
authorkeith_miller@apple.com <keith_miller@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 15 Jan 2020 21:30:57 +0000 (21:30 +0000)
committerkeith_miller@apple.com <keith_miller@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 15 Jan 2020 21:30:57 +0000 (21:30 +0000)
https://bugs.webkit.org/show_bug.cgi?id=206301

Unreviewed, revert.

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@254632 268f45cc-cd09-0410-ab3c-d52691b4dbfc

190 files changed:
JSTests/ChangeLog
JSTests/stress/apply-osr-exit-should-get-length-once-exceptions-occasionally.js [deleted file]
JSTests/stress/apply-osr-exit-should-get-length-once.js [deleted file]
JSTests/stress/load-varargs-then-inlined-call-and-exit-strict.js
JSTests/stress/recursive-tail-call-with-different-argument-count.js
JSTests/stress/rest-varargs-osr-exit-to-checkpoint.js [deleted file]
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/DerivedSources-input.xcfilelist
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
Source/JavaScriptCore/assembler/ProbeFrame.h
Source/JavaScriptCore/b3/testb3.h
Source/JavaScriptCore/bytecode/AccessCase.cpp
Source/JavaScriptCore/bytecode/AccessCaseSnippetParams.cpp
Source/JavaScriptCore/bytecode/BytecodeDumper.cpp
Source/JavaScriptCore/bytecode/BytecodeDumper.h
Source/JavaScriptCore/bytecode/BytecodeIndex.cpp
Source/JavaScriptCore/bytecode/BytecodeIndex.h
Source/JavaScriptCore/bytecode/BytecodeList.rb
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
Source/JavaScriptCore/bytecode/CodeBlock.cpp
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/CodeOrigin.h
Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
Source/JavaScriptCore/bytecode/InlineCallFrame.h
Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
Source/JavaScriptCore/bytecode/Operands.h
Source/JavaScriptCore/bytecode/OperandsInlines.h
Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
Source/JavaScriptCore/bytecode/ValueProfile.h
Source/JavaScriptCore/bytecode/ValueRecovery.cpp
Source/JavaScriptCore/bytecode/ValueRecovery.h
Source/JavaScriptCore/bytecode/VirtualRegister.h
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
Source/JavaScriptCore/bytecompiler/RegisterID.h
Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp
Source/JavaScriptCore/dfg/DFGAbstractHeap.h
Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
Source/JavaScriptCore/dfg/DFGArgumentPosition.h
Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp
Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp
Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h
Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h
Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp
Source/JavaScriptCore/dfg/DFGAvailabilityMap.h
Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
Source/JavaScriptCore/dfg/DFGBasicBlock.h
Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
Source/JavaScriptCore/dfg/DFGCapabilities.cpp
Source/JavaScriptCore/dfg/DFGClobberize.h
Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp
Source/JavaScriptCore/dfg/DFGCommonData.cpp
Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
Source/JavaScriptCore/dfg/DFGDoesGC.cpp
Source/JavaScriptCore/dfg/DFGDriver.cpp
Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
Source/JavaScriptCore/dfg/DFGForAllKills.h
Source/JavaScriptCore/dfg/DFGGraph.cpp
Source/JavaScriptCore/dfg/DFGGraph.h
Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp
Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.h
Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp
Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp
Source/JavaScriptCore/dfg/DFGNode.h
Source/JavaScriptCore/dfg/DFGNodeType.h
Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp
Source/JavaScriptCore/dfg/DFGOSREntry.cpp
Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
Source/JavaScriptCore/dfg/DFGOSRExit.cpp
Source/JavaScriptCore/dfg/DFGOSRExit.h
Source/JavaScriptCore/dfg/DFGOSRExitBase.h
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp
Source/JavaScriptCore/dfg/DFGOpInfo.h
Source/JavaScriptCore/dfg/DFGOperations.cpp
Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp
Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp
Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
Source/JavaScriptCore/dfg/DFGSafeToExecute.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
Source/JavaScriptCore/dfg/DFGThunks.cpp
Source/JavaScriptCore/dfg/DFGThunks.h
Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
Source/JavaScriptCore/dfg/DFGValidate.cpp
Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp
Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp
Source/JavaScriptCore/dfg/DFGVariableAccessData.h
Source/JavaScriptCore/dfg/DFGVariableEvent.cpp
Source/JavaScriptCore/dfg/DFGVariableEvent.h
Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp
Source/JavaScriptCore/dfg/DFGVariableEventStream.h
Source/JavaScriptCore/ftl/FTLCapabilities.cpp
Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/ftl/FTLOSREntry.cpp
Source/JavaScriptCore/ftl/FTLOSRExit.cpp
Source/JavaScriptCore/ftl/FTLOSRExit.h
Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
Source/JavaScriptCore/ftl/FTLOperations.cpp
Source/JavaScriptCore/ftl/FTLOutput.cpp
Source/JavaScriptCore/ftl/FTLOutput.h
Source/JavaScriptCore/ftl/FTLSelectPredictability.h [deleted file]
Source/JavaScriptCore/ftl/FTLSlowPathCall.h
Source/JavaScriptCore/generator/Checkpoints.rb [deleted file]
Source/JavaScriptCore/generator/Opcode.rb
Source/JavaScriptCore/generator/Section.rb
Source/JavaScriptCore/heap/Heap.cpp
Source/JavaScriptCore/interpreter/CallFrame.cpp
Source/JavaScriptCore/interpreter/CallFrame.h
Source/JavaScriptCore/interpreter/CallFrameInlines.h
Source/JavaScriptCore/interpreter/CheckpointOSRExitSideState.h [deleted file]
Source/JavaScriptCore/interpreter/Interpreter.cpp
Source/JavaScriptCore/interpreter/Interpreter.h
Source/JavaScriptCore/interpreter/StackVisitor.cpp
Source/JavaScriptCore/jit/AssemblyHelpers.h
Source/JavaScriptCore/jit/CallFrameShuffler.cpp
Source/JavaScriptCore/jit/CallFrameShuffler.h
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITArithmetic.cpp
Source/JavaScriptCore/jit/JITCall.cpp
Source/JavaScriptCore/jit/JITExceptions.cpp
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JSInterfaceJIT.h
Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
Source/JavaScriptCore/jit/SpecializedThunkJIT.h
Source/JavaScriptCore/jit/ThunkGenerators.cpp
Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
Source/JavaScriptCore/llint/LLIntSlowPaths.h
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
Source/JavaScriptCore/runtime/ArgList.h
Source/JavaScriptCore/runtime/CachedTypes.cpp
Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
Source/JavaScriptCore/runtime/ConstructData.cpp
Source/JavaScriptCore/runtime/ConstructData.h
Source/JavaScriptCore/runtime/DirectArguments.cpp
Source/JavaScriptCore/runtime/DirectArguments.h
Source/JavaScriptCore/runtime/GenericArguments.h
Source/JavaScriptCore/runtime/GenericArgumentsInlines.h
Source/JavaScriptCore/runtime/JSArray.cpp
Source/JavaScriptCore/runtime/JSArray.h
Source/JavaScriptCore/runtime/JSImmutableButterfly.cpp
Source/JavaScriptCore/runtime/JSImmutableButterfly.h
Source/JavaScriptCore/runtime/JSLock.cpp
Source/JavaScriptCore/runtime/ModuleProgramExecutable.cpp
Source/JavaScriptCore/runtime/Options.cpp
Source/JavaScriptCore/runtime/ScopedArguments.cpp
Source/JavaScriptCore/runtime/ScopedArguments.h
Source/JavaScriptCore/runtime/VM.cpp
Source/JavaScriptCore/runtime/VM.h
Source/JavaScriptCore/tools/VMInspector.cpp
Source/JavaScriptCore/wasm/WasmFunctionCodeBlock.h
Source/JavaScriptCore/wasm/WasmLLIntGenerator.cpp
Source/JavaScriptCore/wasm/WasmOperations.cpp
Source/JavaScriptCore/wasm/WasmSlowPaths.cpp
Source/WTF/ChangeLog
Source/WTF/WTF.xcodeproj/project.pbxproj
Source/WTF/wtf/Bitmap.h
Source/WTF/wtf/CMakeLists.txt
Source/WTF/wtf/EnumClassOperatorOverloads.h [deleted file]
Source/WTF/wtf/FastBitVector.h
Source/WTF/wtf/UnalignedAccess.h
Tools/ChangeLog
Tools/Scripts/run-jsc-stress-tests

index 1b448d7..7c8d62d 100644 (file)
@@ -1,3 +1,10 @@
+2020-01-15  Keith Miller  <keith_miller@apple.com>
+
+        Revert bytecode checkpoints since it breaks watch
+        https://bugs.webkit.org/show_bug.cgi?id=206301
+
+        Unreviewed, revert.
+
 2020-01-15  Alexey Shvayka  <shvaikalesh@gmail.com>
 
         Object.preventExtensions should throw if not successful
diff --git a/JSTests/stress/apply-osr-exit-should-get-length-once-exceptions-occasionally.js b/JSTests/stress/apply-osr-exit-should-get-length-once-exceptions-occasionally.js
deleted file mode 100644 (file)
index f3f15a6..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-
-let currentArgCount;
-function expectedArgCount() {
-    return currentArgCount;
-}
-noInline(expectedArgCount);
-
-function callee() {
-    if (arguments.length != expectedArgCount())
-        throw new Error();
-}
-
-function test(array) {
-    callee.apply(undefined, array);
-}
-noInline(test);
-
-let lengthCalls = 0;
-currentArgCount = 2;
-let array = { 0: 1, 1: 2, get length() {
-    if (lengthCalls++ % 10 == 1)
-        throw new Error("throwing an exception in length");
-    return currentArgCount
-} }
-for (let i = 0; i < 1e6; i++) {
-    try {
-        test(array);
-    } catch { }
-}
-
-currentArgCount = 100;
-lengthCalls = 0;
-test(array);
-
-if (lengthCalls !== 1)
-    throw new Error(lengthCalls);
diff --git a/JSTests/stress/apply-osr-exit-should-get-length-once.js b/JSTests/stress/apply-osr-exit-should-get-length-once.js
deleted file mode 100644 (file)
index 8bee45c..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-
-let currentArgCount;
-function expectedArgCount() {
-    return currentArgCount;
-}
-noInline(expectedArgCount);
-
-function callee() {
-    if (arguments.length != expectedArgCount())
-        throw new Error();
-}
-
-function test(array) {
-    callee.apply(undefined, array);
-}
-noInline(test);
-
-let lengthCalls = 0;
-currentArgCount = 2;
-let array = { 0: 1, 1: 2, get length() { lengthCalls++; return currentArgCount } }
-for (let i = 0; i < 1e5; i++)
-    test(array);
-
-
-test(array);
-currentArgCount = 100;
-lengthCalls = 0;
-test(array);
-
-if (lengthCalls !== 1)
-    throw new Error(lengthCalls);
index aa73231..3618f8c 100644 (file)
@@ -18,8 +18,8 @@ function checkEqual(a, b) {
         throw "Error: bad value of a: " + a.a + " versus " + b.a;
     if (a.b != b.b)
         throw "Error: bad value of b: " + a.b + " versus " + b.b;
-    if (a.c.length !== b.c.length)
-        throw "Error: bad value of c, length mismatch: " + a.c.length + " versus " + b.c.length;
+    if (a.c.length != b.c.length)
+        throw "Error: bad value of c, length mismatch: " + a.c + " versus " + b.c;
     for (var i = a.c.length; i--;) {
         if (a.c[i] != b.c[i])
             throw "Error: bad value of c, mismatch at i = " + i + ": " + a.c + " versus " + b.c;
index 53d752d..047fb01 100644 (file)
@@ -18,8 +18,8 @@ noInline(bar);
 for (var i = 0; i < 10000; ++i) {
     var result = foo(40, 2);
     if (result !== 42)
-        throw Error("Wrong result for foo, expected 42, got " + result);
+        throw "Wrong result for foo, expected 42, got " + result;
     result = bar(40, 2);
     if (result !== 42)
-        throw Error("Wrong result for bar, expected 42, got " + result);
+        throw "Wrong result for bar, expected 42, got " + result;
 }
diff --git a/JSTests/stress/rest-varargs-osr-exit-to-checkpoint.js b/JSTests/stress/rest-varargs-osr-exit-to-checkpoint.js
deleted file mode 100644 (file)
index 87ba055..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-"use strict";
-
-function foo(a, b, ...rest) {
-    return rest.length;
-}
-
-function bar(a, b, ...rest) {
-    return foo.call(...rest);
-}
-noInline(bar);
-
-let array = new Array(10);
-for (let i = 0; i < 1e5; ++i) {
-    let result = bar(...array);
-    if (result !== array.length - bar.length - foo.length - 1)
-        throw new Error(i + " " + result);
-}
-
-array.length = 10000;
-if (bar(...array) !== array.length - bar.length - foo.length - 1)
-    throw new Error();
index e1cb0a5..190a66d 100644 (file)
@@ -525,7 +525,6 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS
     bytecode/ObjectPropertyCondition.h
     bytecode/Opcode.h
     bytecode/OpcodeSize.h
-    bytecode/Operands.h
     bytecode/PropertyCondition.h
     bytecode/PutByIdFlags.h
     bytecode/SpeculatedType.h
index f8d8aaf..3496300 100644 (file)
@@ -1,3 +1,10 @@
+2020-01-15  Keith Miller  <keith_miller@apple.com>
+
+        Revert bytecode checkpoints since it breaks watch
+        https://bugs.webkit.org/show_bug.cgi?id=206301
+
+        Unreviewed, revert.
+
 2020-01-15  Alexey Shvayka  <shvaikalesh@gmail.com>
 
         Object.preventExtensions should throw if not successful
index 9ead653..f8c1b4a 100644 (file)
@@ -64,7 +64,6 @@ $(PROJECT_DIR)/disassembler/udis86/optable.xml
 $(PROJECT_DIR)/disassembler/udis86/ud_itab.py
 $(PROJECT_DIR)/generator/Argument.rb
 $(PROJECT_DIR)/generator/Assertion.rb
-$(PROJECT_DIR)/generator/Checkpoints.rb
 $(PROJECT_DIR)/generator/DSL.rb
 $(PROJECT_DIR)/generator/Fits.rb
 $(PROJECT_DIR)/generator/GeneratedFile.rb
index 66f935d..3940f1d 100644 (file)
                5333BBDB2110F7D2007618EC /* DFGSpeculativeJIT32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F1B14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp */; };
                5333BBDC2110F7D9007618EC /* DFGSpeculativeJIT.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DC21328DF82002B2AD7 /* DFGSpeculativeJIT.cpp */; };
                5333BBDD2110F7E1007618EC /* DFGSpeculativeJIT64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F4C14353B2100B08D42 /* DFGSpeculativeJIT64.cpp */; };
-               5338E2A72396EFFB00C61BAD /* CheckpointOSRExitSideState.h in Headers */ = {isa = PBXBuildFile; fileRef = 5338E2A62396EFEC00C61BAD /* CheckpointOSRExitSideState.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               5338EBA323AB04B800382662 /* FTLSelectPredictability.h in Headers */ = {isa = PBXBuildFile; fileRef = 5338EBA223AB04A300382662 /* FTLSelectPredictability.h */; };
                5341FC721DAC343C00E7E4D7 /* B3WasmBoundsCheckValue.h in Headers */ = {isa = PBXBuildFile; fileRef = 5341FC711DAC343C00E7E4D7 /* B3WasmBoundsCheckValue.h */; };
                534638711E70CF3D00F12AC1 /* JSRunLoopTimer.h in Headers */ = {isa = PBXBuildFile; fileRef = 534638701E70CF3D00F12AC1 /* JSRunLoopTimer.h */; settings = {ATTRIBUTES = (Private, ); }; };
                534638751E70DDEC00F12AC1 /* PromiseTimer.h in Headers */ = {isa = PBXBuildFile; fileRef = 534638741E70DDEC00F12AC1 /* PromiseTimer.h */; settings = {ATTRIBUTES = (Private, ); }; };
                5318045D22EAAF0F004A7342 /* B3ExtractValue.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = B3ExtractValue.cpp; path = b3/B3ExtractValue.cpp; sourceTree = "<group>"; };
                531D4E191F59CDD200EC836C /* testapi.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = testapi.cpp; path = API/tests/testapi.cpp; sourceTree = "<group>"; };
                532631B3218777A5007B8191 /* JavaScriptCore.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = JavaScriptCore.modulemap; sourceTree = "<group>"; };
-               5338E2A62396EFEC00C61BAD /* CheckpointOSRExitSideState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CheckpointOSRExitSideState.h; sourceTree = "<group>"; };
-               5338EBA223AB04A300382662 /* FTLSelectPredictability.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLSelectPredictability.h; path = ftl/FTLSelectPredictability.h; sourceTree = "<group>"; };
                533B15DE1DC7F463004D500A /* WasmOps.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmOps.h; sourceTree = "<group>"; };
                5341FC6F1DAC33E500E7E4D7 /* B3WasmBoundsCheckValue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = B3WasmBoundsCheckValue.cpp; path = b3/B3WasmBoundsCheckValue.cpp; sourceTree = "<group>"; };
                5341FC711DAC343C00E7E4D7 /* B3WasmBoundsCheckValue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = B3WasmBoundsCheckValue.h; path = b3/B3WasmBoundsCheckValue.h; sourceTree = "<group>"; };
                534C457A1BC703DC007476A7 /* TypedArrayConstructor.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = TypedArrayConstructor.js; sourceTree = "<group>"; };
                534C457B1BC72411007476A7 /* JSTypedArrayViewConstructor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSTypedArrayViewConstructor.h; sourceTree = "<group>"; };
                534C457D1BC72549007476A7 /* JSTypedArrayViewConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSTypedArrayViewConstructor.cpp; sourceTree = "<group>"; };
-               534D9BF82363C55D0054524D /* SetIteratorPrototype.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = SetIteratorPrototype.js; sourceTree = "<group>"; };
-               534D9BF92363C55D0054524D /* IteratorHelpers.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = IteratorHelpers.js; sourceTree = "<group>"; };
-               534D9BFA2363C55D0054524D /* MapIteratorPrototype.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = MapIteratorPrototype.js; sourceTree = "<group>"; };
                534E034D1E4D4B1600213F64 /* AccessCase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AccessCase.h; sourceTree = "<group>"; };
                534E034F1E4D95ED00213F64 /* AccessCase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AccessCase.cpp; sourceTree = "<group>"; };
                534E03531E53BD2900213F64 /* IntrinsicGetterAccessCase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntrinsicGetterAccessCase.h; sourceTree = "<group>"; };
                                0F485326187DFDEC0083B687 /* FTLRecoveryOpcode.h */,
                                0FCEFAA91804C13E00472CE4 /* FTLSaveRestore.cpp */,
                                0FCEFAAA1804C13E00472CE4 /* FTLSaveRestore.h */,
-                               5338EBA223AB04A300382662 /* FTLSelectPredictability.h */,
                                0F25F1AA181635F300522F39 /* FTLSlowPathCall.cpp */,
                                0F25F1AB181635F300522F39 /* FTLSlowPathCall.h */,
                                0F25F1AC181635F300522F39 /* FTLSlowPathCallKey.cpp */,
                                1429D8DC0ED2205B00B89619 /* CallFrame.h */,
                                A7F869EC0F95C2EC00558697 /* CallFrameClosure.h */,
                                FEA3BBA7212B655800E93AD1 /* CallFrameInlines.h */,
-                               5338E2A62396EFEC00C61BAD /* CheckpointOSRExitSideState.h */,
                                1429D85B0ED218E900B89619 /* CLoopStack.cpp */,
                                14D792640DAA03FB001A9F05 /* CLoopStack.h */,
                                A7C1EAEB17987AB600299DB2 /* CLoopStackInlines.h */,
                                A52704851D027C8800354C37 /* GlobalOperations.js */,
                                E35E03611B7AB4850073AD2A /* InspectorInstrumentationObject.js */,
                                E33F50881B844A1A00413856 /* InternalPromiseConstructor.js */,
-                               534D9BF92363C55D0054524D /* IteratorHelpers.js */,
                                7CF9BC5B1B65D9A3009DB1EF /* IteratorPrototype.js */,
-                               534D9BFA2363C55D0054524D /* MapIteratorPrototype.js */,
                                7035587C1C418419004BD7BF /* MapPrototype.js */,
                                E30677971B8BC6F5003F87F0 /* ModuleLoader.js */,
                                A52704861D027C8800354C37 /* NumberConstructor.js */,
                                7CF9BC5F1B65D9B1009DB1EF /* ReflectObject.js */,
                                654788421C937D2C000781A0 /* RegExpPrototype.js */,
                                84925A9C22B30CC800D1DFFF /* RegExpStringIteratorPrototype.js */,
-                               534D9BF82363C55D0054524D /* SetIteratorPrototype.js */,
                                7035587D1C418419004BD7BF /* SetPrototype.js */,
                                7CF9BC601B65D9B1009DB1EF /* StringConstructor.js */,
                                7CF9BC611B65D9B1009DB1EF /* StringIteratorPrototype.js */,
                                FE1BD0211E72027900134BC9 /* CellProfile.h in Headers */,
                                FEC160322339E9F900A04CB8 /* CellSize.h in Headers */,
                                0F1C3DDA1BBCE09E00E523E4 /* CellState.h in Headers */,
-                               5338E2A72396EFFB00C61BAD /* CheckpointOSRExitSideState.h in Headers */,
                                BC6AAAE50E1F426500AD87D8 /* ClassInfo.h in Headers */,
                                0FE050261AA9095600D33B33 /* ClonedArguments.h in Headers */,
                                BC18C45E0E16F5CD00B34460 /* CLoopStack.h in Headers */,
                                0F9D4C111C3E2C74006CD984 /* FTLPatchpointExceptionHandle.h in Headers */,
                                0F48532A187DFDEC0083B687 /* FTLRecoveryOpcode.h in Headers */,
                                0FCEFAAC1804C13E00472CE4 /* FTLSaveRestore.h in Headers */,
-                               5338EBA323AB04B800382662 /* FTLSelectPredictability.h in Headers */,
                                0F25F1B2181635F300522F39 /* FTLSlowPathCall.h in Headers */,
                                0F25F1B4181635F300522F39 /* FTLSlowPathCallKey.h in Headers */,
                                E322E5A71DA644A8006E7709 /* FTLSnippetParams.h in Headers */,
index 6884083..56d8a07 100644 (file)
@@ -239,6 +239,13 @@ public:
         ASSERT_VALID_CODE_POINTER(m_value);
     }
 
+    template<PtrTag tag>
+    explicit ReturnAddressPtr(FunctionPtr<tag> function)
+        : m_value(untagCodePtr<tag>(function.executableAddress()))
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
     const void* value() const
     {
         return m_value;
index 4229a0d..cab368d 100644 (file)
@@ -46,14 +46,14 @@ public:
         return get<T>(CallFrame::argumentOffset(argument) * sizeof(Register));
     }
     template<typename T = JSValue>
-    T operand(VirtualRegister operand)
+    T operand(int operand)
     {
-        return get<T>(operand.offset() * sizeof(Register));
+        return get<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register));
     }
     template<typename T = JSValue>
-    T operand(VirtualRegister operand, ptrdiff_t offset)
+    T operand(int operand, ptrdiff_t offset)
     {
-        return get<T>(operand.offset() * sizeof(Register) + offset);
+        return get<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register) + offset);
     }
 
     template<typename T>
@@ -62,14 +62,14 @@ public:
         return set<T>(CallFrame::argumentOffset(argument) * sizeof(Register), value);
     }
     template<typename T>
-    void setOperand(VirtualRegister operand, T value)
+    void setOperand(int operand, T value)
     {
-        set<T>(operand.offset() * sizeof(Register), value);
+        set<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register), value);
     }
     template<typename T>
-    void setOperand(VirtualRegister operand, ptrdiff_t offset, T value)
+    void setOperand(int operand, ptrdiff_t offset, T value)
     {
-        set<T>(operand.offset() * sizeof(Register) + offset, value);
+        set<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register) + offset, value);
     }
 
     template<typename T = JSValue>
index 41bc961..35d0175 100644 (file)
@@ -263,20 +263,20 @@ inline void checkDoesNotUseInstruction(Compilation& compilation, const char* tex
 }
 
 template<typename Type>
-struct B3Operand {
+struct Operand {
     const char* name;
     Type value;
 };
 
-typedef B3Operand<int64_t> Int64Operand;
-typedef B3Operand<int32_t> Int32Operand;
-typedef B3Operand<int16_t> Int16Operand;
-typedef B3Operand<int8_t> Int8Operand;
+typedef Operand<int64_t> Int64Operand;
+typedef Operand<int32_t> Int32Operand;
+typedef Operand<int16_t> Int16Operand;
+typedef Operand<int8_t> Int8Operand;
 
-#define MAKE_OPERAND(value) B3Operand<decltype(value)> { #value, value }
+#define MAKE_OPERAND(value) Operand<decltype(value)> { #value, value }
 
 template<typename FloatType>
-void populateWithInterestingValues(Vector<B3Operand<FloatType>>& operands)
+void populateWithInterestingValues(Vector<Operand<FloatType>>& operands)
 {
     operands.append({ "0.", static_cast<FloatType>(0.) });
     operands.append({ "-0.", static_cast<FloatType>(-0.) });
@@ -302,9 +302,9 @@ void populateWithInterestingValues(Vector<B3Operand<FloatType>>& operands)
 }
 
 template<typename FloatType>
-Vector<B3Operand<FloatType>> floatingPointOperands()
+Vector<Operand<FloatType>> floatingPointOperands()
 {
-    Vector<B3Operand<FloatType>> operands;
+    Vector<Operand<FloatType>> operands;
     populateWithInterestingValues(operands);
     return operands;
 };
index 71da86d..c8b52b8 100644 (file)
@@ -1520,7 +1520,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
 
         jit.store32(
             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
-            CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
+            CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCountIncludingThis)));
 
         if (m_type == Getter || m_type == Setter) {
             auto& access = this->as<GetterSetterAccessCase>();
@@ -1809,7 +1809,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
                 jit.store32(
                     CCallHelpers::TrustedImm32(
                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
-                    CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
+                    CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCountIncludingThis)));
                 
                 jit.makeSpaceOnStackForCCall();
                 
index 69b5ee1..b1d404a 100644 (file)
@@ -55,7 +55,7 @@ public:
 
         jit.store32(
             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
-            CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
+            CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCountIncludingThis)));
 
         jit.makeSpaceOnStackForCCall();
 
index 8365115..6058fb0 100644 (file)
@@ -63,7 +63,7 @@ void BytecodeDumperBase::printLocationAndOp(InstructionStream::Offset location,
 
 void BytecodeDumperBase::dumpValue(VirtualRegister reg)
 {
-    m_out.printf("%s", registerName(reg).data());
+    m_out.printf("%s", registerName(reg.offset()).data());
 }
 
 template<typename Traits>
@@ -83,12 +83,12 @@ template void BytecodeDumperBase::dumpValue(GenericBoundLabel<Wasm::GeneratorTra
 #endif // ENABLE(WEBASSEMBLY)
 
 template<class Block>
-CString BytecodeDumper<Block>::registerName(VirtualRegister r) const
+CString BytecodeDumper<Block>::registerName(int r) const
 {
-    if (r.isConstant())
+    if (isConstantRegisterIndex(r))
         return constantName(r);
 
-    return toCString(r);
+    return toCString(VirtualRegister(r));
 }
 
 template <class Block>
@@ -98,10 +98,10 @@ int BytecodeDumper<Block>::outOfLineJumpOffset(InstructionStream::Offset offset)
 }
 
 template<class Block>
-CString BytecodeDumper<Block>::constantName(VirtualRegister reg) const
+CString BytecodeDumper<Block>::constantName(int index) const
 {
-    auto value = block()->getConstant(reg);
-    return toCString(value, "(", reg, ")");
+    auto value = block()->getConstant(index);
+    return toCString(value, "(", VirtualRegister(index), ")");
 }
 
 template<class Block>
@@ -335,7 +335,7 @@ void BytecodeDumper::dumpConstants()
     }
 }
 
-CString BytecodeDumper::constantName(VirtualRegister index) const
+CString BytecodeDumper::constantName(int index) const
 {
     FunctionCodeBlock* block = this->block();
     auto value = formatConstant(block->getConstantType(index), block->getConstant(index));
index edb1129..0d71675 100644 (file)
@@ -61,7 +61,7 @@ public:
     void dumpValue(T v) { m_out.print(v); }
 
 protected:
-    virtual CString registerName(VirtualRegister) const = 0;
+    virtual CString registerName(int) const = 0;
     virtual int outOfLineJumpOffset(InstructionStream::Offset) const = 0;
 
     BytecodeDumperBase(PrintStream& out)
@@ -91,11 +91,11 @@ protected:
 
     void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&);
 
-    CString registerName(VirtualRegister) const override;
+    CString registerName(int r) const override;
     int outOfLineJumpOffset(InstructionStream::Offset offset) const override;
 
 private:
-    virtual CString constantName(VirtualRegister) const;
+    virtual CString constantName(int index) const;
 
     Block* m_block;
 };
@@ -135,7 +135,7 @@ private:
     using JSC::BytecodeDumper<FunctionCodeBlock>::BytecodeDumper;
 
     void dumpConstants();
-    CString constantName(VirtualRegister index) const override;
+    CString constantName(int index) const override;
     CString formatConstant(Type, uint64_t) const;
 };
 
index a2c1f41..15d61ba 100644 (file)
@@ -25,7 +25,6 @@
 
 #include "config.h"
 #include "BytecodeIndex.h"
-
 #include <wtf/PrintStream.h>
 
 namespace JSC {
@@ -33,8 +32,6 @@ namespace JSC {
 void BytecodeIndex::dump(WTF::PrintStream& out) const
 {
     out.print("bc#", offset());
-    if (checkpoint())
-        out.print("cp#", checkpoint());
 }
 
 } // namespace JSC
index 10fba44..5295c29 100644 (file)
@@ -26,7 +26,6 @@
 #pragma once
 
 #include <wtf/HashTraits.h>
-#include <wtf/MathExtras.h>
 
 namespace WTF {
 class PrintStream;
@@ -38,33 +37,24 @@ class BytecodeIndex {
 public:
     BytecodeIndex() = default;
     BytecodeIndex(WTF::HashTableDeletedValueType)
-        : m_packedBits(deletedValue().asBits())
+        : m_offset(deletedValue().asBits())
     {
     }
+    explicit BytecodeIndex(uint32_t bytecodeOffset)
+        : m_offset(bytecodeOffset)
+    { }
 
-    explicit BytecodeIndex(uint32_t bytecodeOffset, uint8_t checkpoint = 0)
-        : m_packedBits(pack(bytecodeOffset, checkpoint))
-    {
-        ASSERT(*this);
-    }
-
-    static constexpr uint32_t numberOfCheckpoints = 4;
-    static_assert(hasOneBitSet(numberOfCheckpoints), "numberOfCheckpoints should be a power of 2");
-    static constexpr uint32_t checkpointMask = numberOfCheckpoints - 1;
-    static constexpr uint32_t checkpointShift = WTF::getMSBSetConstexpr(numberOfCheckpoints);
-
-    uint32_t offset() const { return m_packedBits >> checkpointShift; }
-    uint8_t checkpoint() const { return m_packedBits & checkpointMask; }
-    uint32_t asBits() const { return m_packedBits; }
+    uint32_t offset() const { return m_offset; }
+    uint32_t asBits() const { return m_offset; }
 
-    unsigned hash() const { return WTF::intHash(m_packedBits); }
+    unsigned hash() const { return WTF::intHash(m_offset); }
     static BytecodeIndex deletedValue() { return fromBits(invalidOffset - 1); }
     bool isHashTableDeletedValue() const { return *this == deletedValue(); }
 
     static BytecodeIndex fromBits(uint32_t bits);
 
     // Comparison operators.
-    explicit operator bool() const { return m_packedBits != invalidOffset && m_packedBits != deletedValue().offset(); }
+    explicit operator bool() const { return m_offset != invalidOffset && m_offset != deletedValue().offset(); }
     bool operator ==(const BytecodeIndex& other) const { return asBits() == other.asBits(); }
     bool operator !=(const BytecodeIndex& other) const { return !(*this == other); }
 
@@ -79,22 +69,13 @@ public:
 private:
     static constexpr uint32_t invalidOffset = std::numeric_limits<uint32_t>::max();
 
-    static uint32_t pack(uint32_t bytecodeIndex, uint8_t checkpoint);
-
-    uint32_t m_packedBits { invalidOffset };
+    uint32_t m_offset { invalidOffset };
 };
 
-inline uint32_t BytecodeIndex::pack(uint32_t bytecodeIndex, uint8_t checkpoint)
-{
-    ASSERT(checkpoint < numberOfCheckpoints);
-    ASSERT((bytecodeIndex << checkpointShift) >> checkpointShift == bytecodeIndex);
-    return bytecodeIndex << checkpointShift | checkpoint;
-}
-
 inline BytecodeIndex BytecodeIndex::fromBits(uint32_t bits)
 {
     BytecodeIndex result;
-    result.m_packedBits = bits;
+    result.m_offset = bits;
     return result;
 }
 
index 60d98cf..a9bd483 100644 (file)
@@ -59,7 +59,7 @@ types [
     :WatchpointSet,
 
     :ValueProfile,
-    :ValueProfileAndVirtualRegisterBuffer,
+    :ValueProfileAndOperandBuffer,
     :UnaryArithProfile,
     :BinaryArithProfile,
     :ArrayProfile,
@@ -796,13 +796,6 @@ op :call_varargs,
     metadata: {
         arrayProfile: ArrayProfile,
         profile: ValueProfile,
-    },
-    tmps: {
-        argCountIncludingThis: unsigned,
-    },
-    checkpoints: {
-        determiningArgCount: nil,
-        makeCall: nil,
     }
 
 op :tail_call_varargs,
@@ -817,13 +810,6 @@ op :tail_call_varargs,
     metadata: {
         arrayProfile: ArrayProfile,
         profile: ValueProfile,
-    },
-    tmps: {
-        argCountIncludingThis: unsigned
-    },
-    checkpoints: {
-        determiningArgCount: nil,
-        makeCall: nil,
     }
 
 op :tail_call_forward_arguments,
@@ -864,13 +850,6 @@ op :construct_varargs,
     metadata: {
         arrayProfile: ArrayProfile,
         profile: ValueProfile,
-    },
-    tmps: {
-        argCountIncludingThis: unsigned
-    },
-    checkpoints: {
-        determiningArgCount: nil,
-        makeCall: nil,
     }
 
 op :ret,
@@ -1018,7 +997,7 @@ op :catch,
         thrownValue: VirtualRegister,
     },
     metadata: {
-        buffer: ValueProfileAndVirtualRegisterBuffer.*,
+        buffer: ValueProfileAndOperandBuffer.*,
     }
 
 op :throw,
@@ -1264,8 +1243,6 @@ op :llint_native_call_trampoline
 op :llint_native_construct_trampoline
 op :llint_internal_function_call_trampoline
 op :llint_internal_function_construct_trampoline
-op :checkpoint_osr_exit_from_inlined_call_trampoline
-op :checkpoint_osr_exit_trampoline
 op :handleUncaughtException
 op :op_call_return_location
 op :op_construct_return_location
index 7975fef..918e1d2 100644 (file)
@@ -201,39 +201,4 @@ void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock)
     }
 }
 
-template<typename EnumType1, typename EnumType2>
-constexpr bool enumValuesEqualAsIntegral(EnumType1 v1, EnumType2 v2)
-{
-    using IntType1 = typename std::underlying_type<EnumType1>::type;
-    using IntType2 = typename std::underlying_type<EnumType2>::type;
-    if constexpr (sizeof(IntType1) > sizeof(IntType2))
-        return static_cast<IntType1>(v1) == static_cast<IntType1>(v2);
-    else
-        return static_cast<IntType2>(v1) == static_cast<IntType2>(v2);
-}
-
-Bitmap<maxNumCheckpointTmps> tmpLivenessForCheckpoint(const CodeBlock& codeBlock, BytecodeIndex bytecodeIndex)
-{
-    Bitmap<maxNumCheckpointTmps> result;
-    uint8_t checkpoint = bytecodeIndex.checkpoint();
-
-    if (!checkpoint)
-        return result;
-
-    switch (codeBlock.instructions().at(bytecodeIndex)->opcodeID()) {
-    case op_call_varargs:
-    case op_tail_call_varargs:
-    case op_construct_varargs: {
-        static_assert(enumValuesEqualAsIntegral(OpCallVarargs::makeCall, OpTailCallVarargs::makeCall) && enumValuesEqualAsIntegral(OpCallVarargs::argCountIncludingThis, OpTailCallVarargs::argCountIncludingThis));
-        static_assert(enumValuesEqualAsIntegral(OpCallVarargs::makeCall, OpConstructVarargs::makeCall) && enumValuesEqualAsIntegral(OpCallVarargs::argCountIncludingThis, OpConstructVarargs::argCountIncludingThis));
-        if (checkpoint == OpCallVarargs::makeCall)
-            result.set(OpCallVarargs::argCountIncludingThis);
-        return result;
-    }
-    default:
-        break;
-    }
-    RELEASE_ASSERT_NOT_REACHED();
-}
-
 } // namespace JSC
index 3e5edc0..7379b3c 100644 (file)
@@ -28,7 +28,6 @@
 #include "BytecodeBasicBlock.h"
 #include "BytecodeGraph.h"
 #include "CodeBlock.h"
-#include <wtf/Bitmap.h>
 #include <wtf/FastBitVector.h>
 
 namespace JSC {
@@ -98,8 +97,6 @@ private:
     BytecodeGraph m_graph;
 };
 
-Bitmap<maxNumCheckpointTmps> tmpLivenessForCheckpoint(const CodeBlock&, BytecodeIndex);
-
 inline bool operandIsAlwaysLive(int operand);
 inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand);
 inline bool operandIsLive(const FastBitVector& out, int operand);
index de6ca8d..3d12e19 100644 (file)
 
 namespace JSC {
 
-inline bool virtualRegisterIsAlwaysLive(VirtualRegister reg)
+inline bool operandIsAlwaysLive(int operand)
 {
-    return !reg.isLocal();
+    return !VirtualRegister(operand).isLocal();
 }
 
-inline bool virtualRegisterThatIsNotAlwaysLiveIsLive(const FastBitVector& out, VirtualRegister reg)
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand)
 {
-    unsigned local = reg.toLocal();
+    unsigned local = VirtualRegister(operand).toLocal();
     if (local >= out.numBits())
         return false;
     return out[local];
 }
 
-inline bool virtualRegisterIsLive(const FastBitVector& out, VirtualRegister operand)
+inline bool operandIsLive(const FastBitVector& out, int operand)
 {
-    return virtualRegisterIsAlwaysLive(operand) || virtualRegisterThatIsNotAlwaysLiveIsLive(out, operand);
+    return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand);
 }
 
 inline bool isValidRegisterForLiveness(VirtualRegister operand)
index 78eb4f7..3f32fe8 100644 (file)
@@ -38,7 +38,6 @@
 #include "BytecodeStructs.h"
 #include "BytecodeUseDef.h"
 #include "CallLinkStatus.h"
-#include "CheckpointOSRExitSideState.h"
 #include "CodeBlockInlines.h"
 #include "CodeBlockSet.h"
 #include "DFGCapabilities.h"
@@ -410,7 +409,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
             clonedSymbolTable->prepareForTypeProfiling(locker);
         }
-        replaceConstant(VirtualRegister(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset()), clonedSymbolTable);
+        replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
     }
 
     bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
@@ -644,7 +643,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
             if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
                 // Only do watching if the property we're putting to is not anonymous.
                 if (bytecode.m_var != UINT_MAX) {
-                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable()));
+                    SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset()));
                     const Identifier& ident = identifier(bytecode.m_var);
                     ConcurrentJSLocker locker(symbolTable->m_lock);
                     auto iter = symbolTable->find(locker, ident.impl());
@@ -712,7 +711,8 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink
                 break;
             }
             case ProfileTypeBytecodeLocallyResolved: {
-                SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable()));
+                int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset();
+                SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
                 const Identifier& ident = identifier(bytecode.m_identifier);
                 ConcurrentJSLocker locker(symbolTable->m_lock);
                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
@@ -1088,15 +1088,6 @@ static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transitio
     
     return true;
 }
-
-BytecodeIndex CodeBlock::bytecodeIndexForExit(BytecodeIndex exitIndex) const
-{
-    if (exitIndex.checkpoint()) {
-        const auto& instruction = instructions().at(exitIndex);
-        exitIndex = instruction.next().index();
-    }
-    return exitIndex;
-}
 #endif // ENABLE(DFG_JIT)
 
 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
@@ -1825,10 +1816,10 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch&
     for (int i = 0; i < numParameters(); ++i)
         liveOperands.append(virtualRegisterForArgument(i));
 
-    auto profiles = makeUnique<ValueProfileAndVirtualRegisterBuffer>(liveOperands.size());
+    auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size());
     RELEASE_ASSERT(profiles->m_size == liveOperands.size());
     for (unsigned i = 0; i < profiles->m_size; ++i)
-        profiles->m_buffer.get()[i].m_operand = liveOperands[i];
+        profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
 
     createRareDataIfNecessary();
 
@@ -2727,7 +2718,7 @@ void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numbe
 
     if (auto* rareData = m_rareData.get()) {
         for (auto& profileBucket : rareData->m_catchProfiles) {
-            profileBucket->forEach([&] (ValueProfileAndVirtualRegister& profile) {
+            profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
                 profile.computeUpdatedPrediction(locker);
             });
         }
index 68fd01f..0f9bbea 100644 (file)
@@ -163,7 +163,6 @@ public:
     int numCalleeLocals() const { return m_numCalleeLocals; }
 
     int numVars() const { return m_numVars; }
-    int numTmps() const { return m_unlinkedCode->hasCheckpoints() * maxNumCheckpointTmps; }
 
     int* addressOfNumParameters() { return &m_numParameters; }
     static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
@@ -232,20 +231,20 @@ public:
     bool hasInstalledVMTrapBreakpoints() const;
     bool installVMTrapBreakpoints();
 
-    inline bool isKnownNotImmediate(VirtualRegister reg)
+    inline bool isKnownNotImmediate(int index)
     {
-        if (reg == thisRegister() && !isStrictMode())
+        if (index == thisRegister().offset() && !isStrictMode())
             return true;
 
-        if (reg.isConstant())
-            return getConstant(reg).isCell();
+        if (isConstantRegisterIndex(index))
+            return getConstant(index).isCell();
 
         return false;
     }
 
-    ALWAYS_INLINE bool isTemporaryRegister(VirtualRegister reg)
+    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
     {
-        return reg.offset() >= m_numVars;
+        return index >= m_numVars;
     }
 
     HandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler);
@@ -584,9 +583,10 @@ public:
     }
 
     const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
-    WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) { return m_constantRegisters[reg.toConstantIndex()]; }
-    ALWAYS_INLINE JSValue getConstant(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()].get(); }
-    ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(VirtualRegister reg) const { return m_constantsSourceCodeRepresentation[reg.toConstantIndex()]; }
+    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+    static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
+    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+    ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
 
     FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
     int numberOfFunctionDecls() { return m_functionDecls.size(); }
@@ -776,7 +776,6 @@ public:
 
     void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
     
-    BytecodeIndex bytecodeIndexForExit(BytecodeIndex) const;
     uint32_t osrExitCounter() const { return m_osrExitCounter; }
 
     void countOSRExit() { m_osrExitCounter++; }
@@ -877,7 +876,7 @@ public:
         Vector<SimpleJumpTable> m_switchJumpTables;
         Vector<StringJumpTable> m_stringSwitchJumpTables;
 
-        Vector<std::unique_ptr<ValueProfileAndVirtualRegisterBuffer>> m_catchProfiles;
+        Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles;
 
         DirectEvalCodeCache m_directEvalCodeCache;
     };
@@ -944,10 +943,10 @@ private:
 
     void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable);
 
-    void replaceConstant(VirtualRegister reg, JSValue value)
+    void replaceConstant(int index, JSValue value)
     {
-        ASSERT(reg.isConstant() && static_cast<size_t>(reg.toConstantIndex()) < m_constantRegisters.size());
-        m_constantRegisters[reg.toConstantIndex()].set(*m_vm, this, value);
+        ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+        m_constantRegisters[index - FirstConstantRegisterIndex].set(*m_vm, this, value);
     }
 
     bool shouldVisitStrongly(const ConcurrentJSLocker&);
index f9829a2..2106813 100644 (file)
@@ -160,9 +160,7 @@ public:
     unsigned approximateHash(InlineCallFrame* terminal = nullptr) const;
 
     template <typename Function>
-    void walkUpInlineStack(const Function&) const;
-
-    inline bool inlineStackContainsActiveCheckpoint() const;
+    void walkUpInlineStack(const Function&);
     
     // Get the inline stack. This is slow, and is intended for debugging only.
     Vector<CodeOrigin> inlineStack() const;
index 47c657c..e4a575b 100644 (file)
 #pragma once
 
 #include "BytecodeLivenessAnalysis.h"
-#include "Operands.h"
 #include <wtf/FastBitVector.h>
 
 namespace JSC {
 
 class BytecodeLivenessAnalysis;
-class CodeBlock;
 
-// Note: Full bytecode liveness does not track any information about the liveness of temps.
-// If you want tmp liveness for a checkpoint ask tmpLivenessForCheckpoint.
 class FullBytecodeLiveness {
     WTF_MAKE_FAST_ALLOCATED;
 public:
@@ -51,15 +47,15 @@ public:
         RELEASE_ASSERT_NOT_REACHED();
     }
     
-    bool virtualRegisterIsLive(VirtualRegister reg, BytecodeIndex bytecodeIndex, LivenessCalculationPoint point) const
+    bool operandIsLive(int operand, BytecodeIndex bytecodeIndex, LivenessCalculationPoint point) const
     {
-        return virtualRegisterIsAlwaysLive(reg) || virtualRegisterThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex, point), reg);
+        return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex, point), operand);
     }
     
 private:
     friend class BytecodeLivenessAnalysis;
     
-    // FIXME: Use FastBitVector's view mechanism to make them compact.
+    // FIXME: Use FastBitVector's view mechansim to make them compact.
     // https://bugs.webkit.org/show_bug.cgi?id=204427<Paste>
     Vector<FastBitVector, 0, UnsafeVectorOverflow> m_beforeUseVector;
     Vector<FastBitVector, 0, UnsafeVectorOverflow> m_afterUseVector;
index e29f384..97508bb 100644 (file)
@@ -179,8 +179,7 @@ struct InlineCallFrame {
     WriteBarrier<CodeBlock> baselineCodeBlock;
     CodeOrigin directCaller;
 
-    unsigned argumentCountIncludingThis : 22; // Do not include fixups.
-    unsigned tmpOffset : 10;
+    unsigned argumentCountIncludingThis { 0 }; // Do not include fixups.
     signed stackOffset : 28;
     unsigned kind : 3; // real type is Kind
     bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
@@ -192,9 +191,7 @@ struct InlineCallFrame {
     // InlineCallFrame's fields. This constructor is here just to reduce confusion if
     // we forgot to initialize explicitly.
     InlineCallFrame()
-        : argumentCountIncludingThis(0)
-        , tmpOffset(0)
-        , stackOffset(0)
+        : stackOffset(0)
         , kind(Call)
         , isClosureCall(false)
     {
@@ -222,12 +219,6 @@ struct InlineCallFrame {
         RELEASE_ASSERT(static_cast<signed>(stackOffset) == offset);
     }
 
-    void setTmpOffset(unsigned offset)
-    {
-        tmpOffset = offset;
-        RELEASE_ASSERT(static_cast<unsigned>(tmpOffset) == offset);
-    }
-
     ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
     ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
 
@@ -256,9 +247,9 @@ inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigi
     return baselineCodeBlock;
 }
 
-// These function is defined here and not in CodeOrigin because it needs access to the directCaller field in InlineCallFrame
+// This function is defined here and not in CodeOrigin because it needs access to the directCaller field in InlineCallFrame
 template <typename Function>
-inline void CodeOrigin::walkUpInlineStack(const Function& function) const
+inline void CodeOrigin::walkUpInlineStack(const Function& function)
 {
     CodeOrigin codeOrigin = *this;
     while (true) {
@@ -270,38 +261,11 @@ inline void CodeOrigin::walkUpInlineStack(const Function& function) const
     }
 }
 
-inline bool CodeOrigin::inlineStackContainsActiveCheckpoint() const
-{
-    bool result = false;
-    walkUpInlineStack([&] (CodeOrigin origin) {
-        if (origin.bytecodeIndex().checkpoint())
-            result = true;
-    });
-    return result;
-}
-
-ALWAYS_INLINE Operand remapOperand(InlineCallFrame* inlineCallFrame, Operand operand)
-{
-    if (inlineCallFrame)
-        return operand.isTmp() ? Operand::tmp(operand.value() + inlineCallFrame->tmpOffset) : operand.virtualRegister() + inlineCallFrame->stackOffset;
-    return operand;
-}
-
-ALWAYS_INLINE Operand remapOperand(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
-{
-    return remapOperand(inlineCallFrame, Operand(reg));
-}
-
-ALWAYS_INLINE Operand unmapOperand(InlineCallFrame* inlineCallFrame, Operand operand)
+ALWAYS_INLINE VirtualRegister remapOperand(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
 {
     if (inlineCallFrame)
-        return operand.isTmp() ? Operand::tmp(operand.value() - inlineCallFrame->tmpOffset) : Operand(operand.virtualRegister() - inlineCallFrame->stackOffset);
-    return operand;
-}
-
-ALWAYS_INLINE Operand unmapOperand(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
-{
-    return unmapOperand(inlineCallFrame, Operand(reg));
+        return VirtualRegister(reg.offset() + inlineCallFrame->stackOffset);
+    return reg;
 }
 
 } // namespace JSC
index 81f5a0c..bfb94b3 100644 (file)
@@ -26,7 +26,6 @@
 #pragma once
 
 #include "ConcurrentJSLock.h"
-#include "Operands.h"
 #include "ValueProfile.h"
 #include "VirtualRegister.h"
 #include <wtf/HashMap.h>
@@ -50,7 +49,7 @@ public:
     {
     }
     
-    LazyOperandValueProfileKey(BytecodeIndex bytecodeIndex, Operand operand)
+    LazyOperandValueProfileKey(BytecodeIndex bytecodeIndex, VirtualRegister operand)
         : m_bytecodeIndex(bytecodeIndex)
         , m_operand(operand)
     {
@@ -70,7 +69,7 @@ public:
     
     unsigned hash() const
     {
-        return m_bytecodeIndex.hash() + m_operand.value() + static_cast<unsigned>(m_operand.kind());
+        return m_bytecodeIndex.hash() + m_operand.offset();
     }
     
     BytecodeIndex bytecodeIndex() const
@@ -79,7 +78,7 @@ public:
         return m_bytecodeIndex;
     }
 
-    Operand operand() const
+    VirtualRegister operand() const
     {
         ASSERT(!!*this);
         return m_operand;
@@ -91,7 +90,7 @@ public:
     }
 private: 
     BytecodeIndex m_bytecodeIndex;
-    Operand m_operand;
+    VirtualRegister m_operand;
 };
 
 struct LazyOperandValueProfileKeyHash {
index 701c330..2b69f36 100644 (file)
@@ -42,7 +42,7 @@ MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
     result.m_kind = LazyOperand;
     result.u.lazyOperand.codeBlock = codeBlock;
     result.u.lazyOperand.bytecodeOffset = key.bytecodeIndex();
-    result.u.lazyOperand.operand = key.operand();
+    result.u.lazyOperand.operand = key.operand().offset();
     return result;
 }
 
@@ -57,7 +57,7 @@ void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueReg
         return;
         
     case LazyOperand: {
-        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, u.lazyOperand.operand);
+        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
         
         ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
         LazyOperandValueProfile* profile =
@@ -91,7 +91,7 @@ void MethodOfGettingAValueProfile::reportValue(JSValue value)
         return;
 
     case LazyOperand: {
-        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, u.lazyOperand.operand);
+        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
 
         ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
         LazyOperandValueProfile* profile =
index e8b65ea..57b4b17 100644 (file)
@@ -107,7 +107,7 @@ private:
         struct {
             CodeBlock* codeBlock;
             BytecodeIndex bytecodeOffset;
-            Operand operand;
+            int operand;
         } lazyOperand;
     } u;
 };
index d47a68f..f29ff9f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -35,227 +35,111 @@ namespace JSC {
 
 template<typename T> struct OperandValueTraits;
 
-constexpr unsigned maxNumCheckpointTmps = 4;
-
-// A OperandKind::Tmp is one that exists for exiting to a checkpoint but does not exist between bytecodes.
-enum class OperandKind { Argument, Local, Tmp };
-
-class Operand {
-public:
-    Operand() = default;
-    Operand(const Operand&) = default;
-
-    Operand(VirtualRegister operand)
-        : m_kind(operand.isLocal() ? OperandKind::Local : OperandKind::Argument)
-        , m_operand(operand.offset())
-    { }
-
-    Operand(OperandKind kind, int operand)
-        : m_kind(kind)
-        , m_operand(operand)
-    { 
-        ASSERT(kind == OperandKind::Tmp || VirtualRegister(operand).isLocal() == (kind == OperandKind::Local));
-    }
-    static Operand tmp(uint32_t index) { return Operand(OperandKind::Tmp, index); }
-
-    OperandKind kind() const { return m_kind; }
-    int value() const { return m_operand; }
-    VirtualRegister virtualRegister() const
-    {
-        ASSERT(m_kind != OperandKind::Tmp);
-        return VirtualRegister(m_operand);
-    }
-    uint64_t asBits() const { return bitwise_cast<uint64_t>(*this); }
-    static Operand fromBits(uint64_t value);
-
-    bool isTmp() const { return kind() == OperandKind::Tmp; }
-    bool isArgument() const { return kind() == OperandKind::Argument; }
-    bool isLocal() const { return kind() == OperandKind::Local && virtualRegister().isLocal(); }
-    bool isHeader() const { return kind() != OperandKind::Tmp && virtualRegister().isHeader(); }
-    bool isConstant() const { return kind() != OperandKind::Tmp && virtualRegister().isConstant(); }
-
-    int toArgument() const { ASSERT(isArgument()); return virtualRegister().toArgument(); }
-    int toLocal() const { ASSERT(isLocal()); return virtualRegister().toLocal(); }
-
-    inline bool isValid() const;
-
-    inline bool operator==(const Operand&) const;
-
-    void dump(PrintStream&) const;
-
-private:
-    OperandKind m_kind { OperandKind::Argument };
-    int m_operand { VirtualRegister::invalidVirtualRegister };
-};
-
-ALWAYS_INLINE bool Operand::operator==(const Operand& other) const
-{
-    if (kind() != other.kind())
-        return false;
-    if (isTmp())
-        return value() == other.value();
-    return virtualRegister() == other.virtualRegister();
-}
-
-inline bool Operand::isValid() const
-{
-    if (isTmp())
-        return value() >= 0;
-    return virtualRegister().isValid();
-}
-
-inline Operand Operand::fromBits(uint64_t value)
-{
-    Operand result = bitwise_cast<Operand>(value);
-    ASSERT(result.isValid());
-    return result;
-}
-
-static_assert(sizeof(Operand) == sizeof(uint64_t), "Operand::asBits() relies on this.");
+enum OperandKind { ArgumentOperand, LocalOperand };
 
 enum OperandsLikeTag { OperandsLike };
 
 template<typename T>
 class Operands {
 public:
-    using Storage = std::conditional_t<std::is_same_v<T, bool>, FastBitVector, Vector<T, 0, UnsafeVectorOverflow>>;
-    using RefType = std::conditional_t<std::is_same_v<T, bool>, FastBitReference, T&>;
-    using ConstRefType = std::conditional_t<std::is_same_v<T, bool>, bool, const T&>;
-
-    Operands() = default;
-
-    explicit Operands(size_t numArguments, size_t numLocals, size_t numTmps)
+    Operands()
+        : m_numArguments(0) { }
+    
+    explicit Operands(size_t numArguments, size_t numLocals)
         : m_numArguments(numArguments)
-        , m_numLocals(numLocals)
     {
-        size_t size = numArguments + numLocals + numTmps;
-        m_values.grow(size);
-        if (!WTF::VectorTraits<T>::needsInitialization)
-            m_values.fill(T());
+        if (WTF::VectorTraits<T>::needsInitialization) {
+            m_values.resize(numArguments + numLocals);
+        } else {
+            m_values.fill(T(), numArguments + numLocals);
+        }
     }
 
-    explicit Operands(size_t numArguments, size_t numLocals, size_t numTmps, const T& initialValue)
+    explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue)
         : m_numArguments(numArguments)
-        , m_numLocals(numLocals)
     {
-        m_values.grow(numArguments + numLocals + numTmps);
-        m_values.fill(initialValue);
+        m_values.fill(initialValue, numArguments + numLocals);
     }
     
     template<typename U>
-    explicit Operands(OperandsLikeTag, const Operands<U>& other, const T& initialValue = T())
+    explicit Operands(OperandsLikeTag, const Operands<U>& other)
         : m_numArguments(other.numberOfArguments())
-        , m_numLocals(other.numberOfLocals())
     {
-        m_values.grow(other.size());
-        m_values.fill(initialValue);
+        m_values.fill(T(), other.numberOfArguments() + other.numberOfLocals());
     }
-
+    
     size_t numberOfArguments() const { return m_numArguments; }
-    size_t numberOfLocals() const { return m_numLocals; }
-    size_t numberOfTmps() const { return m_values.size() - numberOfArguments() - numberOfLocals(); }
-
-    size_t tmpIndex(size_t idx) const
-    {
-        ASSERT(idx < numberOfTmps());
-        return idx + numberOfArguments() + numberOfLocals();
-    }
+    size_t numberOfLocals() const { return m_values.size() - m_numArguments; }
+    
     size_t argumentIndex(size_t idx) const
     {
-        ASSERT(idx < numberOfArguments());
+        ASSERT(idx < m_numArguments);
         return idx;
     }
     
     size_t localIndex(size_t idx) const
     {
-        ASSERT(idx < numberOfLocals());
-        return numberOfArguments() + idx;
+        return m_numArguments + idx;
     }
-
-    RefType tmp(size_t idx) { return m_values[tmpIndex(idx)]; }
-    ConstRefType tmp(size_t idx) const { return m_values[tmpIndex(idx)]; }
     
-    RefType argument(size_t idx) { return m_values[argumentIndex(idx)]; }
-    ConstRefType argument(size_t idx) const { return m_values[argumentIndex(idx)]; }
+    T& argument(size_t idx)
+    {
+        return m_values[argumentIndex(idx)];
+    }
+    const T& argument(size_t idx) const
+    {
+        return m_values[argumentIndex(idx)];
+    }
     
-    RefType local(size_t idx) { return m_values[localIndex(idx)]; }
-    ConstRefType local(size_t idx) const { return m_values[localIndex(idx)]; }
+    T& local(size_t idx) { return m_values[localIndex(idx)]; }
+    const T& local(size_t idx) const { return m_values[localIndex(idx)]; }
     
     template<OperandKind operandKind>
     size_t sizeFor() const
     {
-        switch (operandKind) {
-        case OperandKind::Tmp:
-            return numberOfTmps();
-        case OperandKind::Argument:
+        if (operandKind == ArgumentOperand)
             return numberOfArguments();
-        case OperandKind::Local:
-            return numberOfLocals();
-        }
-        RELEASE_ASSERT_NOT_REACHED();
-        return 0;
+        return numberOfLocals();
     }
     template<OperandKind operandKind>
-    RefType atFor(size_t idx)
+    T& atFor(size_t idx)
     {
-        switch (operandKind) {
-        case OperandKind::Tmp:
-            return tmp(idx);
-        case OperandKind::Argument:
+        if (operandKind == ArgumentOperand)
             return argument(idx);
-        case OperandKind::Local:
-            return local(idx);
-        }
-        RELEASE_ASSERT_NOT_REACHED();
-        return tmp(0);
+        return local(idx);
     }
     template<OperandKind operandKind>
-    ConstRefType atFor(size_t idx) const
+    const T& atFor(size_t idx) const
     {
-        switch (operandKind) {
-        case OperandKind::Tmp:
-            return tmp(idx);
-        case OperandKind::Argument:
+        if (operandKind == ArgumentOperand)
             return argument(idx);
-        case OperandKind::Local:
-            return local(idx);
-        }
-        RELEASE_ASSERT_NOT_REACHED();
-        return tmp(0);
+        return local(idx);
     }
-
-    void ensureLocals(size_t size, const T& ensuredValue = T())
+    
+    void ensureLocals(size_t size)
     {
-        if (size <= numberOfLocals())
+        size_t oldSize = m_values.size();
+        size_t newSize = m_numArguments + size;
+        if (newSize <= oldSize)
             return;
 
-        size_t newSize = numberOfArguments() + numberOfTmps() + size;
-        size_t oldNumLocals = numberOfLocals();
-        size_t oldNumTmps = numberOfTmps();
         m_values.grow(newSize);
-        for (size_t i = 0; i < oldNumTmps; ++i)
-            m_values[newSize - 1 - i] = m_values[tmpIndex(oldNumTmps - 1 - i)];
-
-        m_numLocals = size;
-        if (ensuredValue != T() || !WTF::VectorTraits<T>::needsInitialization) {
-            for (size_t i = 0; i < size - oldNumLocals; ++i)
-                m_values[localIndex(oldNumLocals + i)] = ensuredValue;
+        if (!WTF::VectorTraits<T>::needsInitialization) {
+            for (size_t i = oldSize; i < m_values.size(); ++i)
+                m_values[i] = T();
         }
     }
 
-    void ensureTmps(size_t size, const T& ensuredValue = T())
+    void ensureLocals(size_t size, const T& ensuredValue)
     {
-        if (size <= numberOfTmps())
+        size_t oldSize = m_values.size();
+        size_t newSize = m_numArguments + size;
+        if (newSize <= oldSize)
             return;
 
-        size_t oldSize = m_values.size();
-        size_t newSize = numberOfArguments() + numberOfLocals() + size;
         m_values.grow(newSize);
-
-        if (ensuredValue != T() || !WTF::VectorTraits<T>::needsInitialization) {
-            for (size_t i = oldSize; i < newSize; ++i)
-                m_values[i] = ensuredValue;
-        }
+        for (size_t i = oldSize; i < m_values.size(); ++i)
+            m_values[i] = ensuredValue;
     }
     
     void setLocal(size_t idx, const T& value)
@@ -280,76 +164,84 @@ public:
         ASSERT(idx >= numberOfLocals() || local(idx) == T());
         setLocal(idx, value);
     }
-
-    RefType getForOperandIndex(size_t index) { return m_values[index]; }
-    ConstRefType getForOperandIndex(size_t index) const { return const_cast<Operands*>(this)->getForOperandIndex(index); }
-
-    size_t operandIndex(VirtualRegister operand) const
+    
+    size_t operandIndex(int operand) const
     {
-        if (operand.isArgument())
-            return argumentIndex(operand.toArgument());
-        return localIndex(operand.toLocal());
+        if (operandIsArgument(operand))
+            return argumentIndex(VirtualRegister(operand).toArgument());
+        return localIndex(VirtualRegister(operand).toLocal());
     }
     
-    size_t operandIndex(Operand op) const
+    size_t operandIndex(VirtualRegister virtualRegister) const
     {
-        if (!op.isTmp())
-            return operandIndex(op.virtualRegister());
-        return tmpIndex(op.value());
+        return operandIndex(virtualRegister.offset());
     }
     
-    RefType operand(VirtualRegister operand)
+    T& operand(int operand)
     {
-        if (operand.isArgument())
-            return argument(operand.toArgument());
-        return local(operand.toLocal());
+        if (operandIsArgument(operand))
+            return argument(VirtualRegister(operand).toArgument());
+        return local(VirtualRegister(operand).toLocal());
     }
 
-    RefType operand(Operand op)
+    T& operand(VirtualRegister virtualRegister)
     {
-        if (!op.isTmp())
-            return operand(op.virtualRegister());
-        return tmp(op.value());
+        return operand(virtualRegister.offset());
     }
 
-    ConstRefType operand(VirtualRegister operand) const { return const_cast<Operands*>(this)->operand(operand); }
-    ConstRefType operand(Operand operand) const { return const_cast<Operands*>(this)->operand(operand); }
+    const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
+    const T& operand(VirtualRegister operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
     
-    bool hasOperand(VirtualRegister operand) const
+    bool hasOperand(int operand) const
     {
-        if (operand.isArgument())
+        if (operandIsArgument(operand))
             return true;
-        return static_cast<size_t>(operand.toLocal()) < numberOfLocals();
+        return static_cast<size_t>(VirtualRegister(operand).toLocal()) < numberOfLocals();
     }
-    bool hasOperand(Operand op) const
+    bool hasOperand(VirtualRegister reg) const
     {
-        if (op.isTmp()) {
-            ASSERT(op.value() >= 0);
-            return static_cast<size_t>(op.value()) < numberOfTmps();
-        }
-        return hasOperand(op.virtualRegister());
+        return hasOperand(reg.offset());
     }
     
-    void setOperand(Operand operand, const T& value)
+    void setOperand(int operand, const T& value)
     {
         this->operand(operand) = value;
     }
+    
+    void setOperand(VirtualRegister virtualRegister, const T& value)
+    {
+        setOperand(virtualRegister.offset(), value);
+    }
 
     size_t size() const { return m_values.size(); }
-    ConstRefType at(size_t index) const { return m_values[index]; }
-    RefType at(size_t index) { return m_values[index]; }
-    ConstRefType operator[](size_t index) const { return at(index); }
-    RefType operator[](size_t index) { return at(index); }
-
-    Operand operandForIndex(size_t index) const
+    const T& at(size_t index) const { return m_values[index]; }
+    T& at(size_t index) { return m_values[index]; }
+    const T& operator[](size_t index) const { return at(index); }
+    T& operator[](size_t index) { return at(index); }
+
+    bool isArgument(size_t index) const { return index < m_numArguments; }
+    bool isLocal(size_t index) const { return !isArgument(index); }
+    int operandForIndex(size_t index) const
     {
         if (index < numberOfArguments())
-            return virtualRegisterForArgument(index);
-        else if (index < numberOfLocals() + numberOfArguments())
-            return virtualRegisterForLocal(index - numberOfArguments());
-        return Operand::tmp(index - (numberOfLocals() + numberOfArguments()));
+            return virtualRegisterForArgument(index).offset();
+        return virtualRegisterForLocal(index - numberOfArguments()).offset();
     }
-
+    VirtualRegister virtualRegisterForIndex(size_t index) const
+    {
+        return VirtualRegister(operandForIndex(index));
+    }
+    
+    void setOperandFirstTime(int operand, const T& value)
+    {
+        if (operandIsArgument(operand)) {
+            setArgumentFirstTime(VirtualRegister(operand).toArgument(), value);
+            return;
+        }
+        
+        setLocalFirstTime(VirtualRegister(operand).toLocal(), value);
+    }
+    
     void fill(T value)
     {
         for (size_t i = 0; i < m_values.size(); ++i)
@@ -365,7 +257,6 @@ public:
     {
         ASSERT(numberOfArguments() == other.numberOfArguments());
         ASSERT(numberOfLocals() == other.numberOfLocals());
-        ASSERT(numberOfTmps() == other.numberOfTmps());
         
         return m_values == other.m_values;
     }
@@ -374,10 +265,9 @@ public:
     void dump(PrintStream& out) const;
     
 private:
-    // The first m_numArguments of m_values are arguments, the next m_numLocals are locals, and the rest are tmps.
-    Storage m_values;
-    unsigned m_numArguments { 0 };
-    unsigned m_numLocals { 0 };
+    // The first m_numArguments of m_values are arguments, the rest are locals.
+    Vector<T, 0, UnsafeVectorOverflow> m_values;
+    unsigned m_numArguments;
 };
 
 } // namespace JSC
index 0b1cc1f..65fedda 100644 (file)
 
 namespace JSC {
 
-inline void Operand::dump(PrintStream& out) const
-{
-    if (isTmp())
-        out.print("tmp", value());
-    else
-        out.print(virtualRegister());
-}
-
 template<typename T>
 void Operands<T>::dumpInContext(PrintStream& out, DumpContext* context) const
 {
@@ -52,11 +44,6 @@ void Operands<T>::dumpInContext(PrintStream& out, DumpContext* context) const
             continue;
         out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context));
     }
-    for (size_t tmpIndex = 0; tmpIndex < numberOfTmps(); ++tmpIndex) {
-        if (!tmp(tmpIndex))
-            continue;
-        out.print(comma, "tmp", tmpIndex, ":", inContext(tmp(tmpIndex), context));
-    }
 }
 
 template<typename T>
@@ -73,11 +60,6 @@ void Operands<T>::dump(PrintStream& out) const
             continue;
         out.print(comma, "loc", localIndex, ":", local(localIndex));
     }
-    for (size_t tmpIndex = 0; tmpIndex < numberOfTmps(); ++tmpIndex) {
-        if (!tmp(tmpIndex))
-            continue;
-        out.print(comma, "tmp", tmpIndex, ":", tmp(tmpIndex));
-    }
 }
 
 } // namespace JSC
index 7e93262..d5ddee8 100644 (file)
@@ -72,7 +72,6 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM& vm, Structure* structure, CodeType code
     , m_codeType(static_cast<unsigned>(codeType))
     , m_didOptimize(static_cast<unsigned>(MixedTriState))
     , m_age(0)
-    , m_hasCheckpoints(false)
     , m_parseMode(info.parseMode())
     , m_codeGenerationMode(codeGenerationMode)
     , m_metadata(UnlinkedMetadataTable::create())
index 3082b0a..ad1f90b 100644 (file)
@@ -141,9 +141,6 @@ public:
     bool hasExpressionInfo() { return m_expressionInfo.size(); }
     const Vector<ExpressionRangeInfo>& expressionInfo() { return m_expressionInfo; }
 
-    bool hasCheckpoints() const { return m_hasCheckpoints; }
-    void setHasCheckpoints() { m_hasCheckpoints = true; }
-
     // Special registers
     void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
     void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; }
@@ -201,8 +198,9 @@ public:
     }
 
     const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
-    const WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()]; }
-    ALWAYS_INLINE JSValue getConstant(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()].get(); }
+    const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
     const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
 
     unsigned numberOfConstantIdentifierSets() const { return m_rareData ? m_rareData->m_constantIdentifierSets.size() : 0; }
@@ -421,7 +419,6 @@ private:
     unsigned m_didOptimize : 2;
     unsigned m_age : 3;
     static_assert(((1U << 3) - 1) >= maxAge);
-    bool m_hasCheckpoints : 1;
 public:
     ConcurrentJSLock m_lock;
 private:
index 90f93dd..dfa0776 100644 (file)
@@ -176,28 +176,28 @@ inline BytecodeIndex getRareCaseProfileBytecodeIndex(RareCaseProfile* rareCasePr
     return rareCaseProfile->m_bytecodeIndex;
 }
 
-struct ValueProfileAndVirtualRegister : public ValueProfile {
-    VirtualRegister m_operand;
+struct ValueProfileAndOperand : public ValueProfile {
+    int m_operand;
 };
 
-struct ValueProfileAndVirtualRegisterBuffer {
+struct ValueProfileAndOperandBuffer {
     WTF_MAKE_STRUCT_FAST_ALLOCATED;
 
-    ValueProfileAndVirtualRegisterBuffer(unsigned size)
+    ValueProfileAndOperandBuffer(unsigned size)
         : m_size(size)
     {
         // FIXME: ValueProfile has more stuff than we need. We could optimize these value profiles
         // to be more space efficient.
         // https://bugs.webkit.org/show_bug.cgi?id=175413
-        m_buffer = MallocPtr<ValueProfileAndVirtualRegister, VMMalloc>::malloc(m_size * sizeof(ValueProfileAndVirtualRegister));
+        m_buffer = MallocPtr<ValueProfileAndOperand, VMMalloc>::malloc(m_size * sizeof(ValueProfileAndOperand));
         for (unsigned i = 0; i < m_size; ++i)
-            new (&m_buffer.get()[i]) ValueProfileAndVirtualRegister();
+            new (&m_buffer.get()[i]) ValueProfileAndOperand();
     }
 
-    ~ValueProfileAndVirtualRegisterBuffer()
+    ~ValueProfileAndOperandBuffer()
     {
         for (unsigned i = 0; i < m_size; ++i)
-            m_buffer.get()[i].~ValueProfileAndVirtualRegister();
+            m_buffer.get()[i].~ValueProfileAndOperand();
     }
 
     template <typename Function>
@@ -208,7 +208,7 @@ struct ValueProfileAndVirtualRegisterBuffer {
     }
 
     unsigned m_size;
-    MallocPtr<ValueProfileAndVirtualRegister, VMMalloc> m_buffer;
+    MallocPtr<ValueProfileAndOperand, VMMalloc> m_buffer;
 };
 
 } // namespace JSC
index b13947f..c5ca4de 100644 (file)
@@ -35,22 +35,22 @@ JSValue ValueRecovery::recover(CallFrame* callFrame) const
 {
     switch (technique()) {
     case DisplacedInJSStack:
-        return callFrame->r(virtualRegister()).jsValue();
+        return callFrame->r(virtualRegister().offset()).jsValue();
     case Int32DisplacedInJSStack:
-        return jsNumber(callFrame->r(virtualRegister()).unboxedInt32());
+        return jsNumber(callFrame->r(virtualRegister().offset()).unboxedInt32());
     case Int52DisplacedInJSStack:
-        return jsNumber(callFrame->r(virtualRegister()).unboxedInt52());
+        return jsNumber(callFrame->r(virtualRegister().offset()).unboxedInt52());
     case StrictInt52DisplacedInJSStack:
-        return jsNumber(callFrame->r(virtualRegister()).unboxedStrictInt52());
+        return jsNumber(callFrame->r(virtualRegister().offset()).unboxedStrictInt52());
     case DoubleDisplacedInJSStack:
-        return jsNumber(purifyNaN(callFrame->r(virtualRegister()).unboxedDouble()));
+        return jsNumber(purifyNaN(callFrame->r(virtualRegister().offset()).unboxedDouble()));
     case CellDisplacedInJSStack:
-        return callFrame->r(virtualRegister()).unboxedCell();
+        return callFrame->r(virtualRegister().offset()).unboxedCell();
     case BooleanDisplacedInJSStack:
 #if USE(JSVALUE64)
-        return callFrame->r(virtualRegister()).jsValue();
+        return callFrame->r(virtualRegister().offset()).jsValue();
 #else
-        return jsBoolean(callFrame->r(virtualRegister()).unboxedBoolean());
+        return jsBoolean(callFrame->r(virtualRegister().offset()).unboxedBoolean());
 #endif
     case Constant:
         return constant();
index 5e43bc7..d8f18c3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
index fd17007..3619529 100644 (file)
 
 namespace JSC {
 
-inline bool virtualRegisterIsLocal(int operand)
+inline bool operandIsLocal(int operand)
 {
     return operand < 0;
 }
 
-inline bool virtualRegisterIsArgument(int operand)
+inline bool operandIsArgument(int operand)
 {
     return operand >= 0;
 }
@@ -49,32 +49,25 @@ public:
     friend VirtualRegister virtualRegisterForLocal(int);
     friend VirtualRegister virtualRegisterForArgument(int, int);
 
-    static constexpr int invalidVirtualRegister = 0x3fffffff;
-    static constexpr int firstConstantRegisterIndex = FirstConstantRegisterIndex;
-
     VirtualRegister(RegisterID*);
     VirtualRegister(RefPtr<RegisterID>);
 
     VirtualRegister()
-        : m_virtualRegister(invalidVirtualRegister)
+        : m_virtualRegister(s_invalidVirtualRegister)
     { }
 
     explicit VirtualRegister(int virtualRegister)
         : m_virtualRegister(virtualRegister)
     { }
 
-    VirtualRegister(CallFrameSlot slot)
-        : m_virtualRegister(static_cast<int>(slot))
-    { }
-
-    bool isValid() const { return (m_virtualRegister != invalidVirtualRegister); }
-    bool isLocal() const { return virtualRegisterIsLocal(m_virtualRegister); }
-    bool isArgument() const { return virtualRegisterIsArgument(m_virtualRegister); }
+    bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); }
+    bool isLocal() const { return operandIsLocal(m_virtualRegister); }
+    bool isArgument() const { return operandIsArgument(m_virtualRegister); }
     bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < CallFrameSlot::thisArgument; }
-    bool isConstant() const { return m_virtualRegister >= firstConstantRegisterIndex; }
+    bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; }
     int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); }
     int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); }
-    int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - firstConstantRegisterIndex; }
+    int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; }
     int offset() const { return m_virtualRegister; }
     int offsetInBytes() const { return m_virtualRegister * sizeof(Register); }
 
@@ -113,6 +106,9 @@ public:
     void dump(PrintStream& out) const;
 
 private:
+    static constexpr int s_invalidVirtualRegister = 0x3fffffff;
+    static constexpr int s_firstConstantRegisterIndex = FirstConstantRegisterIndex;
+
     static int localToOperand(int local) { return -1 - local; }
     static int operandToLocal(int operand) { return -1 - operand; }
     static int operandToArgument(int operand) { return operand - CallFrame::thisArgumentOffset(); }
index cc8143e..b52fc62 100644 (file)
@@ -1187,7 +1187,7 @@ RegisterID* BytecodeGenerator::initializeNextParameter()
     VirtualRegister reg = virtualRegisterForArgument(m_codeBlock->numParameters());
     m_parameters.grow(m_parameters.size() + 1);
     auto& parameter = registerFor(reg);
-    parameter.setIndex(reg);
+    parameter.setIndex(reg.offset());
     m_codeBlock->addParameter();
     return &parameter;
 }
@@ -1196,7 +1196,7 @@ void BytecodeGenerator::initializeParameters(FunctionParameters& parameters)
 {
     // Make sure the code block knows about all of our parameters, and make sure that parameters
     // needing destructuring are noted.
-    m_thisRegister.setIndex(VirtualRegister(initializeNextParameter()->index())); // this
+    m_thisRegister.setIndex(initializeNextParameter()->index()); // this
 
     bool nonSimpleArguments = false;
     for (unsigned i = 0; i < parameters.size(); ++i) {
@@ -1637,11 +1637,11 @@ bool BytecodeGenerator::emitEqualityOpImpl(RegisterID* dst, RegisterID* src1, Re
 
     if (m_lastInstruction->is<OpTypeof>()) {
         auto op = m_lastInstruction->as<OpTypeof>();
-        if (src1->virtualRegister() == op.m_dst
+        if (src1->index() == op.m_dst.offset()
             && src1->isTemporary()
-            && src2->virtualRegister().isConstant()
-            && m_codeBlock->constantRegister(src2->virtualRegister()).get().isString()) {
-            const String& value = asString(m_codeBlock->constantRegister(src2->virtualRegister()).get())->tryGetValue();
+            && m_codeBlock->isConstantRegisterIndex(src2->index())
+            && m_codeBlock->constantRegister(src2->index()).get().isString()) {
+            const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue();
             if (value == "undefined") {
                 rewind();
                 OpIsUndefined::emit(this, dst, op.m_value);
@@ -3235,8 +3235,6 @@ RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func
     // Emit call.
     ASSERT(dst != ignoredResult());
     VarargsOp::emit(this, dst, func, thisRegister, arguments ? arguments : VirtualRegister(0), firstFreeRegister, firstVarArgOffset);
-    if (VarargsOp::opcodeID != op_tail_call_forward_arguments)
-        ASSERT(m_codeBlock->hasCheckpoints());
     return dst;
 }
 
index a5f366b..a82afdf 100644 (file)
@@ -1010,7 +1010,6 @@ namespace JSC {
         bool shouldEmitControlFlowProfilerHooks() const { return m_codeGenerationMode.contains(CodeGenerationMode::ControlFlowProfiler); }
         
         bool isStrictMode() const { return m_codeBlock->isStrictMode(); }
-        void setUsesCheckpoints() { m_codeBlock->setHasCheckpoints(); }
 
         SourceParseMode parseMode() const { return m_codeBlock->parseMode(); }
         
index 73b4320..da3fe12 100644 (file)
@@ -69,12 +69,12 @@ namespace JSC {
         {
         }
 
-        void setIndex(VirtualRegister index)
+        void setIndex(int index)
         {
 #if ASSERT_ENABLED
             m_didSetIndex = true;
 #endif
-            m_virtualRegister = index;
+            m_virtualRegister = VirtualRegister(index);
         }
 
         void setTemporary()
index e64071b..b83fee0 100644 (file)
@@ -40,14 +40,6 @@ void AbstractHeap::Payload::dump(PrintStream& out) const
         out.print(value());
 }
 
-void AbstractHeap::Payload::dumpAsOperand(PrintStream& out) const
-{
-    if (isTop())
-        out.print("TOP");
-    else
-        out.print(Operand::fromBits(value()));
-}
-
 void AbstractHeap::dump(PrintStream& out) const
 {
     out.print(kind());
@@ -57,13 +49,6 @@ void AbstractHeap::dump(PrintStream& out) const
         out.print("(", DOMJIT::HeapRange::fromRaw(payload().value32()), ")");
         return;
     }
-    if (kind() == Stack) {
-        out.print("(");
-        payload().dumpAsOperand(out);
-        out.print(")");
-        return;
-    }
-
     out.print("(", payload(), ")");
 }
 
index 3c4ce3d..bf99377 100644 (file)
@@ -28,7 +28,6 @@
 #if ENABLE(DFG_JIT)
 
 #include "DOMJITHeapRange.h"
-#include "OperandsInlines.h"
 #include "VirtualRegister.h"
 #include <wtf/HashMap.h>
 #include <wtf/PrintStream.h>
@@ -124,15 +123,10 @@ public:
             , m_value(bitwise_cast<intptr_t>(pointer))
         {
         }
-
-        Payload(Operand operand)
-            : m_isTop(false)
-            , m_value(operand.asBits())
-        {
-        }
-
+        
         Payload(VirtualRegister operand)
-            : Payload(Operand(operand))
+            : m_isTop(false)
+            , m_value(operand.offset())
         {
         }
         
@@ -189,7 +183,6 @@ public:
         }
         
         void dump(PrintStream&) const;
-        void dumpAsOperand(PrintStream&) const;
         
     private:
         bool m_isTop;
@@ -211,7 +204,6 @@ public:
     {
         ASSERT(kind != InvalidAbstractHeap && kind != World && kind != Heap && kind != SideState);
         m_value = encode(kind, payload);
-        ASSERT(this->kind() == kind && this->payload() == payload);
     }
     
     AbstractHeap(WTF::HashTableDeletedValueType)
@@ -227,11 +219,6 @@ public:
         ASSERT(kind() != World && kind() != InvalidAbstractHeap);
         return payloadImpl();
     }
-    Operand operand() const
-    {
-        ASSERT(kind() == Stack && !payload().isTop());
-        return Operand::fromBits(payload().value());
-    }
     
     AbstractHeap supertype() const
     {
index 5bb278d..c26c24a 100644 (file)
@@ -388,7 +388,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
             
     case GetLocal: {
         VariableAccessData* variableAccessData = node->variableAccessData();
-        AbstractValue value = m_state.operand(variableAccessData->operand());
+        AbstractValue value = m_state.operand(variableAccessData->local().offset());
         // The value in the local should already be checked.
         DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(variableAccessData->flushFormat())));
         if (value.value())
@@ -399,7 +399,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         
     case GetStack: {
         StackAccessData* data = node->stackAccessData();
-        AbstractValue value = m_state.operand(data->operand);
+        AbstractValue value = m_state.operand(data->local);
         // The value in the local should already be checked.
         DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(data->format)));
         if (value.value())
@@ -409,12 +409,12 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
     }
         
     case SetLocal: {
-        m_state.operand(node->operand()) = forNode(node->child1());
+        m_state.operand(node->local()) = forNode(node->child1());
         break;
     }
         
     case PutStack: {
-        m_state.operand(node->stackAccessData()->operand) = forNode(node->child1());
+        m_state.operand(node->stackAccessData()->local) = forNode(node->child1());
         break;
     }
         
@@ -436,7 +436,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         // Assert that the state of arguments has been set. SetArgumentDefinitely/SetArgumentMaybe means
         // that someone set the argument values out-of-band, and currently this always means setting to a
         // non-clear value.
-        ASSERT(!m_state.operand(node->operand()).isClear());
+        ASSERT(!m_state.operand(node->local()).isClear());
         break;
 
     case InitializeEntrypointArguments: {
@@ -465,12 +465,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         break;
     }
 
-    case VarargsLength: {
-        clobberWorld();
-        setTypeForNode(node, SpecInt32Only);
-        break;
-    }
-
     case LoadVarargs:
     case ForwardVarargs: {
         // FIXME: ForwardVarargs should check if the count becomes known, and if it does, it should turn
@@ -489,7 +483,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         LoadVarargsData* data = node->loadVarargsData();
         m_state.operand(data->count).setNonCellType(SpecInt32Only);
         for (unsigned i = data->limit - 1; i--;)
-            m_state.operand(data->start + i).makeHeapTop();
+            m_state.operand(data->start.offset() + i).makeHeapTop();
         break;
     }
 
@@ -2371,7 +2365,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
             unsigned argumentIndex;
             if (argumentIndexChecked.safeGet(argumentIndex) != CheckedState::DidOverflow) {
                 if (inlineCallFrame) {
-                    if (argumentIndex < static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1)) {
+                    if (argumentIndex < inlineCallFrame->argumentCountIncludingThis - 1) {
                         setForNode(node, m_state.operand(
                             virtualRegisterForArgument(argumentIndex + 1) + inlineCallFrame->stackOffset));
                         m_state.setShouldTryConstantFolding(true);
index 89c08fa..d73247b 100644 (file)
@@ -120,7 +120,7 @@ public:
     {
         for (unsigned i = 0; i < m_variables.size(); ++i) {
             VariableAccessData* variable = m_variables[i]->find();
-            Operand operand = variable->operand();
+            VirtualRegister operand = variable->local();
 
             if (i)
                 out.print(" ");
index 1fe20fe..713e799 100644 (file)
@@ -358,12 +358,9 @@ private:
                 case NewArrayBuffer:
                     break;
                     
-                case VarargsLength:
-                    break;
-
                 case LoadVarargs:
-                    if (node->loadVarargsData()->offset && (node->argumentsChild()->op() == NewArrayWithSpread || node->argumentsChild()->op() == Spread || node->argumentsChild()->op() == NewArrayBuffer))
-                        escape(node->argumentsChild(), node);
+                    if (node->loadVarargsData()->offset && (node->child1()->op() == NewArrayWithSpread || node->child1()->op() == Spread || node->child1()->op() == NewArrayBuffer))
+                        escape(node->child1(), node);
                     break;
                     
                 case CallVarargs:
@@ -496,10 +493,10 @@ private:
                             return;
                         }
                         ASSERT(!heap.payload().isTop());
-                        Operand operand = heap.operand();
+                        VirtualRegister reg(heap.payload().value32());
                         // The register may not point to an argument or local, for example if we are looking at SetArgumentCountIncludingThis.
-                        if (!operand.isHeader())
-                            clobberedByThisBlock.operand(operand) = true;
+                        if (!reg.isHeader())
+                            clobberedByThisBlock.operand(reg) = true;
                     },
                     NoOpClobberize());
             }
@@ -563,16 +560,16 @@ private:
                         if (inlineCallFrame) {
                             if (inlineCallFrame->isVarargs()) {
                                 isClobberedByBlock |= clobberedByThisBlock.operand(
-                                    VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis));
+                                    inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis);
                             }
 
                             if (!isClobberedByBlock || inlineCallFrame->isClosureCall) {
                                 isClobberedByBlock |= clobberedByThisBlock.operand(
-                                    VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee));
+                                    inlineCallFrame->stackOffset + CallFrameSlot::callee);
                             }
 
                             if (!isClobberedByBlock) {
-                                for (unsigned i = 0; i < static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1); ++i) {
+                                for (unsigned i = 0; i < inlineCallFrame->argumentCountIncludingThis - 1; ++i) {
                                     VirtualRegister reg =
                                         VirtualRegister(inlineCallFrame->stackOffset) +
                                         CallFrame::argumentOffset(i);
@@ -630,7 +627,7 @@ private:
                                 m_graph, node, NoOpClobberize(),
                                 [&] (AbstractHeap heap) {
                                     if (heap.kind() == Stack && !heap.payload().isTop()) {
-                                        if (argumentsInvolveStackSlot(inlineCallFrame, heap.operand()))
+                                        if (argumentsInvolveStackSlot(inlineCallFrame, VirtualRegister(heap.payload().value32())))
                                             found = true;
                                         return;
                                     }
@@ -809,7 +806,7 @@ private:
                         
                         bool safeToGetStack = index >= numberOfArgumentsToSkip;
                         if (inlineCallFrame)
-                            safeToGetStack &= index < static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
+                            safeToGetStack &= index < inlineCallFrame->argumentCountIncludingThis - 1;
                         else {
                             safeToGetStack &=
                                 index < static_cast<unsigned>(codeBlock()->numParameters()) - 1;
@@ -848,23 +845,9 @@ private:
                     node->convertToIdentityOn(result);
                     break;
                 }
-                
-                case VarargsLength: {
-                    Node* candidate = node->argumentsChild().node();
-                    if (!isEliminatedAllocation(candidate))
-                        break;
-
-                    // VarargsLength can exit, so it better be exitOK.
-                    DFG_ASSERT(m_graph, node, node->origin.exitOK);
-                    NodeOrigin origin = node->origin.withExitOK(true);
-
-
-                    node->convertToIdentityOn(emitCodeToGetArgumentsArrayLength(insertionSet, candidate, nodeIndex, origin, /* addThis = */ true));
-                    break;
-                }
-
+                    
                 case LoadVarargs: {
-                    Node* candidate = node->argumentsChild().node();
+                    Node* candidate = node->child1().node();
                     if (!isEliminatedAllocation(candidate))
                         break;
                     
@@ -879,10 +862,10 @@ private:
                             jsNumber(argumentCountIncludingThis));
                         insertionSet.insertNode(
                             nodeIndex, SpecNone, KillStack, node->origin.takeValidExit(canExit),
-                            OpInfo(varargsData->count));
+                            OpInfo(varargsData->count.offset()));
                         insertionSet.insertNode(
                             nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
-                            OpInfo(varargsData->count), Edge(argumentCountIncludingThisNode));
+                            OpInfo(varargsData->count.offset()), Edge(argumentCountIncludingThisNode));
                         insertionSet.insertNode(
                             nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
                             OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)),
@@ -891,15 +874,14 @@ private:
 
                     auto storeValue = [&] (Node* value, unsigned storeIndex) {
                         VirtualRegister reg = varargsData->start + storeIndex;
-                        ASSERT(reg.isLocal());
                         StackAccessData* data =
                             m_graph.m_stackAccessData.add(reg, FlushedJSValue);
                         
                         insertionSet.insertNode(
-                            nodeIndex, SpecNone, KillStack, node->origin.takeValidExit(canExit), OpInfo(reg));
+                            nodeIndex, SpecNone, KillStack, node->origin.takeValidExit(canExit), OpInfo(reg.offset()));
                         insertionSet.insertNode(
                             nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
-                            OpInfo(reg), Edge(value));
+                            OpInfo(reg.offset()), Edge(value));
                         insertionSet.insertNode(
                             nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
                             OpInfo(data), Edge(value));
@@ -953,7 +935,7 @@ private:
                                 ASSERT(candidate->op() == PhantomCreateRest);
                                 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
                                 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
-                                unsigned frameArgumentCount = static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
+                                unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
                                 if (frameArgumentCount >= numberOfArgumentsToSkip)
                                     return frameArgumentCount - numberOfArgumentsToSkip;
                                 return 0;
@@ -1001,7 +983,7 @@ private:
                                     ASSERT(candidate->op() == PhantomCreateRest);
                                     unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
                                     InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
-                                    unsigned frameArgumentCount = static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
+                                    unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
                                     for (unsigned loadIndex = numberOfArgumentsToSkip; loadIndex < frameArgumentCount; ++loadIndex) {
                                         VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
                                         StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
@@ -1037,7 +1019,9 @@ private:
 
                         InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
 
-                        if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
+                        if (inlineCallFrame
+                            && !inlineCallFrame->isVarargs()) {
+
                             unsigned argumentCountIncludingThis = inlineCallFrame->argumentCountIncludingThis;
                             if (argumentCountIncludingThis > varargsData->offset)
                                 argumentCountIncludingThis -= varargsData->offset;
@@ -1046,6 +1030,7 @@ private:
                             RELEASE_ASSERT(argumentCountIncludingThis >= 1);
 
                             if (argumentCountIncludingThis <= varargsData->limit) {
+                                
                                 storeArgumentCountIncludingThis(argumentCountIncludingThis);
 
                                 DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum, varargsData->limit, varargsData->mandatoryMinimum);
index 1fdc309..ff1d568 100644 (file)
 
 namespace JSC { namespace DFG {
 
-bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, Operand operand)
+bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
 {
-    if (operand.isTmp())
-        return false;
-
-    VirtualRegister reg = operand.virtualRegister();
     if (!inlineCallFrame)
         return (reg.isArgument() && reg.toArgument()) || reg.isHeader();
-
+    
     if (inlineCallFrame->isClosureCall
         && reg == VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee))
         return true;
@@ -50,19 +46,19 @@ bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, Operand operand
         return true;
     
     // We do not include fixups here since it is not related to |arguments|, rest parameters, and varargs.
-    unsigned numArguments = static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
+    unsigned numArguments = inlineCallFrame->argumentCountIncludingThis - 1;
     VirtualRegister argumentStart =
         VirtualRegister(inlineCallFrame->stackOffset) + CallFrame::argumentOffset(0);
     return reg >= argumentStart && reg < argumentStart + numArguments;
 }
 
-bool argumentsInvolveStackSlot(Node* candidate, Operand operand)
+bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister reg)
 {
-    return argumentsInvolveStackSlot(candidate->origin.semantic.inlineCallFrame(), operand);
+    return argumentsInvolveStackSlot(candidate->origin.semantic.inlineCallFrame(), reg);
 }
 
 Node* emitCodeToGetArgumentsArrayLength(
-    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin, bool addThis)
+    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
 {
     Graph& graph = insertionSet.graph();
 
@@ -73,14 +69,11 @@ Node* emitCodeToGetArgumentsArrayLength(
         || arguments->op() == NewArrayBuffer
         || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments
         || arguments->op() == PhantomCreateRest || arguments->op() == PhantomNewArrayBuffer
-        || arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread,
+        || arguments->op() == PhantomNewArrayWithSpread,
         arguments->op());
 
-    if (arguments->op() == PhantomSpread)
-        return emitCodeToGetArgumentsArrayLength(insertionSet, arguments->child1().node(), nodeIndex, origin, addThis);
-
     if (arguments->op() == PhantomNewArrayWithSpread) {
-        unsigned numberOfNonSpreadArguments = addThis;
+        unsigned numberOfNonSpreadArguments = 0;
         BitVector* bitVector = arguments->bitVector();
         Node* currentSum = nullptr;
         for (unsigned i = 0; i < arguments->numChildren(); i++) {
@@ -110,7 +103,7 @@ Node* emitCodeToGetArgumentsArrayLength(
 
     if (arguments->op() == NewArrayBuffer || arguments->op() == PhantomNewArrayBuffer) {
         return insertionSet.insertConstant(
-            nodeIndex, origin, jsNumber(arguments->castOperand<JSImmutableButterfly*>()->length() + addThis));
+            nodeIndex, origin, jsNumber(arguments->castOperand<JSImmutableButterfly*>()->length()));
     }
     
     InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame();
@@ -120,7 +113,7 @@ Node* emitCodeToGetArgumentsArrayLength(
         numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip();
     
     if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
-        unsigned argumentsSize = inlineCallFrame->argumentCountIncludingThis - !addThis;
+        unsigned argumentsSize = inlineCallFrame->argumentCountIncludingThis - 1;
         if (argumentsSize >= numberOfArgumentsToSkip)
             argumentsSize -= numberOfArgumentsToSkip;
         else
@@ -136,14 +129,14 @@ Node* emitCodeToGetArgumentsArrayLength(
         nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked),
         Edge(argumentCount, Int32Use),
         insertionSet.insertConstantForUse(
-            nodeIndex, origin, jsNumber(numberOfArgumentsToSkip + !addThis), Int32Use));
+            nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use));
 
     if (numberOfArgumentsToSkip) {
         // The above subtraction may produce a negative number if this number is non-zero. We correct that here.
         result = insertionSet.insertNode(
             nodeIndex, SpecInt32Only, ArithMax, origin, 
             Edge(result, Int32Use), 
-            insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(static_cast<unsigned>(addThis)), Int32Use));
+            insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use));
         result->setResult(NodeResultInt32);
     }
 
index 74f939e..9a7718f 100644 (file)
 
 namespace JSC { namespace DFG {
 
-bool argumentsInvolveStackSlot(InlineCallFrame*, Operand);
-bool argumentsInvolveStackSlot(Node* candidate, Operand);
+bool argumentsInvolveStackSlot(InlineCallFrame*, VirtualRegister);
+bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister);
 
 Node* emitCodeToGetArgumentsArrayLength(
-    InsertionSet&, Node* arguments, unsigned nodeIndex, NodeOrigin, bool addThis = false);
+    InsertionSet&, Node* arguments, unsigned nodeIndex, NodeOrigin);
 
 } } // namespace JSC::DFG
 
index 897c70d..96dac39 100644 (file)
@@ -139,7 +139,8 @@ public:
     
     unsigned numberOfArguments() const { return m_block->valuesAtTail.numberOfArguments(); }
     unsigned numberOfLocals() const { return m_block->valuesAtTail.numberOfLocals(); }
-    AbstractValue& operand(Operand operand) { return m_block->valuesAtTail.operand(operand); }
+    AbstractValue& operand(int operand) { return m_block->valuesAtTail.operand(operand); }
+    AbstractValue& operand(VirtualRegister operand) { return m_block->valuesAtTail.operand(operand); }
     AbstractValue& local(size_t index) { return m_block->valuesAtTail.local(index); }
     AbstractValue& argument(size_t index) { return m_block->valuesAtTail.argument(index); }
     
index c2b55aa..7743e7d 100644 (file)
@@ -65,10 +65,10 @@ void AvailabilityMap::pruneHeap()
 
 void AvailabilityMap::pruneByLiveness(Graph& graph, CodeOrigin where)
 {
-    Operands<Availability> localsCopy(OperandsLike, m_locals, Availability::unavailable());
+    Operands<Availability> localsCopy(m_locals.numberOfArguments(), m_locals.numberOfLocals(), Availability::unavailable());
     graph.forAllLiveInBytecode(
         where,
-        [&] (Operand reg) {
+        [&] (VirtualRegister reg) {
             localsCopy.operand(reg) = m_locals.operand(reg);
         });
     m_locals = WTFMove(localsCopy);
index 80c12bf..5355256 100644 (file)
@@ -66,9 +66,9 @@ struct AvailabilityMap {
     }
     
     template<typename HasFunctor, typename AddFunctor>
-    void closeStartingWithLocal(Operand op, const HasFunctor& has, const AddFunctor& add)
+    void closeStartingWithLocal(VirtualRegister reg, const HasFunctor& has, const AddFunctor& add)
     {
-        Availability availability = m_locals.operand(op);
+        Availability availability = m_locals.operand(reg);
         if (!availability.hasNode())
             return;
         
index e48e0c2..c8d1d17 100644 (file)
@@ -34,7 +34,7 @@ namespace JSC { namespace DFG {
 
 DEFINE_ALLOCATOR_WITH_HEAP_IDENTIFIER(BasicBlock);
 
-BasicBlock::BasicBlock(BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals, unsigned numTmps, float executionCount)
+BasicBlock::BasicBlock(BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals, float executionCount)
     : bytecodeBegin(bytecodeBegin)
     , index(NoBlock)
     , cfaStructureClobberStateAtHead(StructuresAreWatched)
@@ -50,11 +50,11 @@ BasicBlock::BasicBlock(BytecodeIndex bytecodeBegin, unsigned numArguments, unsig
     , isLinked(false)
 #endif
     , isReachable(false)
-    , variablesAtHead(numArguments, numLocals, numTmps)
-    , variablesAtTail(numArguments, numLocals, numTmps)
-    , valuesAtHead(numArguments, numLocals, numTmps)
-    , valuesAtTail(numArguments, numLocals, numTmps)
-    , intersectionOfPastValuesAtHead(numArguments, numLocals, numTmps, AbstractValue::fullTop())
+    , variablesAtHead(numArguments, numLocals)
+    , variablesAtTail(numArguments, numLocals)
+    , valuesAtHead(numArguments, numLocals)
+    , valuesAtTail(numArguments, numLocals)
+    , intersectionOfPastValuesAtHead(numArguments, numLocals, AbstractValue::fullTop())
     , executionCount(executionCount)
 {
 }
@@ -72,15 +72,6 @@ void BasicBlock::ensureLocals(unsigned newNumLocals)
     intersectionOfPastValuesAtHead.ensureLocals(newNumLocals, AbstractValue::fullTop());
 }
 
-void BasicBlock::ensureTmps(unsigned newNumTmps)
-{
-    variablesAtHead.ensureTmps(newNumTmps);
-    variablesAtTail.ensureTmps(newNumTmps);
-    valuesAtHead.ensureTmps(newNumTmps);
-    valuesAtTail.ensureTmps(newNumTmps);
-    intersectionOfPastValuesAtHead.ensureTmps(newNumTmps, AbstractValue::fullTop());
-}
-
 void BasicBlock::replaceTerminal(Graph& graph, Node* node)
 {
     NodeAndIndex result = findTerminal();
index d2c5ae9..d457175 100644 (file)
@@ -49,12 +49,11 @@ DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(BasicBlock);
 struct BasicBlock : RefCounted<BasicBlock> {
     WTF_MAKE_STRUCT_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(BasicBlock);
     BasicBlock(
-        BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals, unsigned numTmps,
+        BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals,
         float executionCount);
     ~BasicBlock();
     
     void ensureLocals(unsigned newNumLocals);
-    void ensureTmps(unsigned newNumTmps);
     
     size_t size() const { return m_nodes.size(); }
     bool isEmpty() const { return !size(); }
index 008cf52..7508e21 100644 (file)
@@ -51,7 +51,7 @@ void BlockInsertionSet::insert(size_t index, Ref<BasicBlock>&& block)
 
 BasicBlock* BlockInsertionSet::insert(size_t index, float executionCount)
 {
-    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_graph.block(0)->variablesAtHead.numberOfArguments(), m_graph.block(0)->variablesAtHead.numberOfLocals(), m_graph.block(0)->variablesAtHead.numberOfTmps(), executionCount));
+    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_graph.block(0)->variablesAtHead.numberOfArguments(), m_graph.block(0)->variablesAtHead.numberOfLocals(), executionCount));
     block->isReachable = true;
     auto* result = block.ptr();
     insert(index, WTFMove(block));
index 93be0ed..7effad0 100644 (file)
@@ -113,7 +113,6 @@ public:
         , m_constantOne(graph.freeze(jsNumber(1)))
         , m_numArguments(m_codeBlock->numParameters())
         , m_numLocals(m_codeBlock->numCalleeLocals())
-        , m_numTmps(m_codeBlock->numTmps())
         , m_parameterSlots(0)
         , m_numPassedVarArgs(0)
         , m_inlineStackTop(0)
@@ -142,17 +141,6 @@ private:
             m_graph.block(i)->ensureLocals(newNumLocals);
     }
 
-    void ensureTmps(unsigned newNumTmps)
-    {
-        VERBOSE_LOG("   ensureTmps: trying to raise m_numTmps from ", m_numTmps, " to ", newNumTmps, "\n");
-        if (newNumTmps <= m_numTmps)
-            return;
-        m_numTmps = newNumTmps;
-        for (size_t i = 0; i < m_graph.numBlocks(); ++i)
-            m_graph.block(i)->ensureTmps(newNumTmps);
-    }
-
-
     // Helper for min and max.
     template<typename ChecksFunctor>
     bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
@@ -284,17 +272,7 @@ private:
     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
     void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
     
-    void progressToNextCheckpoint()
-    {
-        m_currentIndex = BytecodeIndex(m_currentIndex.offset(), m_currentIndex.checkpoint() + 1);
-        // At this point, it's again OK to OSR exit.
-        m_exitOK = true;
-        addToGraph(ExitOK);
-
-        processSetLocalQueue();
-    }
-
-    VariableAccessData* newVariableAccessData(Operand operand)
+    VariableAccessData* newVariableAccessData(VirtualRegister operand)
     {
         ASSERT(!operand.isConstant());
         
@@ -303,14 +281,16 @@ private:
     }
     
     // Get/Set the operands/result of a bytecode instruction.
-    Node* getDirect(Operand operand)
+    Node* getDirect(VirtualRegister operand)
     {
         ASSERT(!operand.isConstant());
 
+        // Is this an argument?
         if (operand.isArgument())
-            return getArgument(operand.virtualRegister());
+            return getArgument(operand);
 
-        return getLocalOrTmp(operand);
+        // Must be a local.
+        return getLocal(operand);
     }
 
     Node* get(VirtualRegister operand)
@@ -320,8 +300,8 @@ private:
             unsigned oldSize = m_constants.size();
             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
-                JSValue value = codeBlock.getConstant(operand);
-                SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand);
+                JSValue value = codeBlock.getConstant(operand.offset());
+                SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
                 if (constantIndex >= oldSize) {
                     m_constants.grow(constantIndex + 1);
                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
@@ -380,10 +360,9 @@ private:
         // initializing locals at the top of a function.
         ImmediateNakedSet
     };
-
-    Node* setDirect(Operand operand, Node* value, SetMode setMode = NormalSet)
+    Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        addToGraph(MovHint, OpInfo(operand), value);
+        addToGraph(MovHint, OpInfo(operand.offset()), value);
 
         // We can't exit anymore because our OSR exit state has changed.
         m_exitOK = false;
@@ -397,7 +376,7 @@ private:
         
         return delayed.execute(this);
     }
-
+    
     void processSetLocalQueue()
     {
         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
@@ -415,17 +394,18 @@ private:
         ASSERT(node->op() == GetLocal);
         ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
         ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-        LazyOperandValueProfileKey key(m_currentIndex, node->operand());
+        LazyOperandValueProfileKey key(m_currentIndex, node->local());
         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
         node->variableAccessData()->predict(prediction);
         return node;
     }
 
     // Used in implementing get/set, above, where the operand is a local variable.
-    Node* getLocalOrTmp(Operand operand)
+    Node* getLocal(VirtualRegister operand)
     {
-        ASSERT(operand.isTmp() || operand.isLocal());
-        Node*& node = m_currentBlock->variablesAtTail.operand(operand);
+        unsigned local = operand.toLocal();
+
+        Node* node = m_currentBlock->variablesAtTail.local(local);
         
         // This has two goals: 1) link together variable access datas, and 2)
         // try to avoid creating redundant GetLocals. (1) is required for
@@ -450,26 +430,20 @@ private:
             variable = newVariableAccessData(operand);
         
         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
+        m_currentBlock->variablesAtTail.local(local) = node;
         return node;
     }
-    Node* setLocalOrTmp(const CodeOrigin& semanticOrigin, Operand operand, Node* value, SetMode setMode = NormalSet)
+    Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
-        ASSERT(operand.isTmp() || operand.isLocal());
         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
 
-        if (operand.isTmp() && static_cast<unsigned>(operand.value()) >= m_numTmps) {
-            if (inlineCallFrame())
-                dataLogLn(*inlineCallFrame());
-            dataLogLn("Bad operand: ", operand, " but current number of tmps is: ", m_numTmps, " code block has: ", m_profiledBlock->numTmps(), " tmps.");
-            CRASH();
-        }
-
-        if (setMode != ImmediateNakedSet && !operand.isTmp()) {
-            VirtualRegister reg = operand.virtualRegister();
-            ArgumentPosition* argumentPosition = findArgumentPositionForLocal(reg);
+        unsigned local = operand.toLocal();
+        
+        if (setMode != ImmediateNakedSet) {
+            ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
             if (argumentPosition)
                 flushDirect(operand, argumentPosition);
-            else if (m_graph.needsScopeRegister() && reg == m_codeBlock->scopeRegister())
+            else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
                 flush(operand);
         }
 
@@ -479,7 +453,7 @@ private:
         variableAccessData->mergeCheckArrayHoistingFailed(
             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
-        m_currentBlock->variablesAtTail.operand(operand) = node;
+        m_currentBlock->variablesAtTail.local(local) = node;
         return node;
     }
 
@@ -511,21 +485,20 @@ private:
         m_currentBlock->variablesAtTail.argument(argument) = node;
         return node;
     }
-    Node* setArgument(const CodeOrigin& semanticOrigin, Operand operand, Node* value, SetMode setMode = NormalSet)
+    Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
         SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
 
-        VirtualRegister reg = operand.virtualRegister();
-        unsigned argument = reg.toArgument();
+        unsigned argument = operand.toArgument();
         ASSERT(argument < m_numArguments);
         
-        VariableAccessData* variableAccessData = newVariableAccessData(reg);
+        VariableAccessData* variableAccessData = newVariableAccessData(operand);
 
         // Always flush arguments, except for 'this'. If 'this' is created by us,
         // then make sure that it's never unboxed.
         if (argument || m_graph.needsFlushedThis()) {
             if (setMode != ImmediateNakedSet)
-                flushDirect(reg);
+                flushDirect(operand);
         }
         
         if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
@@ -561,16 +534,14 @@ private:
             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
             return stack->m_argumentPositions[argument];
         }
-        return nullptr;
+        return 0;
     }
     
-    ArgumentPosition* findArgumentPosition(Operand operand)
+    ArgumentPosition* findArgumentPosition(VirtualRegister operand)
     {
-        if (operand.isTmp())
-            return nullptr;
         if (operand.isArgument())
             return findArgumentPositionForArgument(operand.toArgument());
-        return findArgumentPositionForLocal(operand.virtualRegister());
+        return findArgumentPositionForLocal(operand);
     }
 
     template<typename AddFlushDirectFunc>
@@ -581,9 +552,9 @@ private:
             ASSERT(!m_graph.hasDebuggerEnabled());
             numArguments = inlineCallFrame->argumentsWithFixup.size();
             if (inlineCallFrame->isClosureCall)
-                addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, CallFrameSlot::callee));
+                addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
             if (inlineCallFrame->isVarargs())
-                addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, CallFrameSlot::argumentCountIncludingThis));
+                addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
         } else
             numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
 
@@ -606,7 +577,6 @@ private:
 
                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
                 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
-                // Note: We don't need to handle tmps here because tmps are not required to be flushed to the stack.
                 const auto& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex, m_graph.appropriateLivenessCalculationPoint(origin, isCallerOrigin));
                 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
                     if (livenessAtBytecode[local])
@@ -616,27 +586,27 @@ private:
             });
     }
 
-    void flush(Operand operand)
+    void flush(VirtualRegister operand)
     {
         flushDirect(m_inlineStackTop->remapOperand(operand));
     }
     
-    void flushDirect(Operand operand)
+    void flushDirect(VirtualRegister operand)
     {
         flushDirect(operand, findArgumentPosition(operand));
     }
 
-    void flushDirect(Operand operand, ArgumentPosition* argumentPosition)
+    void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
     {
         addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
     }
 
     template<NodeType nodeType>
-    void addFlushOrPhantomLocal(Operand operand, ArgumentPosition* argumentPosition)
+    void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
     {
         ASSERT(!operand.isConstant());
         
-        Node*& node = m_currentBlock->variablesAtTail.operand(operand);
+        Node* node = m_currentBlock->variablesAtTail.operand(operand);
         
         VariableAccessData* variable;
         
@@ -646,25 +616,26 @@ private:
             variable = newVariableAccessData(operand);
         
         node = addToGraph(nodeType, OpInfo(variable));
+        m_currentBlock->variablesAtTail.operand(operand) = node;
         if (argumentPosition)
             argumentPosition->addVariable(variable);
     }
 
-    void phantomLocalDirect(Operand operand)
+    void phantomLocalDirect(VirtualRegister operand)
     {
         addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
     }
 
     void flush(InlineStackEntry* inlineStackEntry)
     {
-        auto addFlushDirect = [&] (InlineCallFrame*, Operand operand) { flushDirect(operand); };
+        auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
         flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
     }
 
     void flushForTerminal()
     {
-        auto addFlushDirect = [&] (InlineCallFrame*, Operand operand) { flushDirect(operand); };
-        auto addPhantomLocalDirect = [&] (InlineCallFrame*, Operand operand) { phantomLocalDirect(operand); };
+        auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
+        auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
         flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
     }
 
@@ -792,11 +763,6 @@ private:
             Edge(child1), Edge(child2), Edge(child3));
         return addToGraph(result);
     }
-    Node* addToGraph(NodeType op, Operand operand, Node* child1)
-    {
-        ASSERT(op == MovHint);
-        return addToGraph(op, OpInfo(operand.kind()), OpInfo(operand.value()), child1);
-    }
     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
     {
         Node* result = m_graph.addNode(
@@ -1127,10 +1093,8 @@ private:
 
     // The number of arguments passed to the function.
     unsigned m_numArguments;
-    // The number of locals (vars + temporaries) used by the bytecode for the function.
+    // The number of locals (vars + temporaries) used in the function.
     unsigned m_numLocals;
-    // The max number of temps used for forwarding data to an OSR exit checkpoint.
-    unsigned m_numTmps;
     // The number of slots (in units of sizeof(Register)) that we need to
     // preallocate for arguments to outgoing calls from this frame. This
     // number includes the CallFrame slots that we initialize for the callee
@@ -1197,17 +1161,14 @@ private:
         
         ~InlineStackEntry();
         
-        Operand remapOperand(Operand operand) const
+        VirtualRegister remapOperand(VirtualRegister operand) const
         {
             if (!m_inlineCallFrame)
                 return operand;
-
-            if (operand.isTmp())
-                return Operand::tmp(operand.value() + m_inlineCallFrame->tmpOffset);
             
-            ASSERT(!operand.virtualRegister().isConstant());
+            ASSERT(!operand.isConstant());
 
-            return operand.virtualRegister() + m_inlineCallFrame->stackOffset;
+            return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
         }
     };
     
@@ -1216,8 +1177,13 @@ private:
     ICStatusContextStack m_icContextStack;
     
     struct DelayedSetLocal {
+        CodeOrigin m_origin;
+        VirtualRegister m_operand;
+        Node* m_value;
+        SetMode m_setMode;
+        
         DelayedSetLocal() { }
-        DelayedSetLocal(const CodeOrigin& origin, Operand operand, Node* value, SetMode setMode)
+        DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
             : m_origin(origin)
             , m_operand(operand)
             , m_value(value)
@@ -1230,13 +1196,8 @@ private:
         {
             if (m_operand.isArgument())
                 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
-            return parser->setLocalOrTmp(m_origin, m_operand, m_value, m_setMode);
+            return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
         }
-
-        CodeOrigin m_origin;
-        Operand m_operand;
-        Node* m_value { nullptr };
-        SetMode m_setMode;
     };
     
     Vector<DelayedSetLocal, 2> m_setLocalQueue;
@@ -1249,7 +1210,7 @@ private:
 BasicBlock* ByteCodeParser::allocateTargetableBlock(BytecodeIndex bytecodeIndex)
 {
     ASSERT(bytecodeIndex);
-    Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, m_numTmps, 1));
+    Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
     BasicBlock* blockPtr = block.ptr();
     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
     if (m_inlineStackTop->m_blockLinkingTargets.size())
@@ -1261,7 +1222,7 @@ BasicBlock* ByteCodeParser::allocateTargetableBlock(BytecodeIndex bytecodeIndex)
 
 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
 {
-    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_numArguments, m_numLocals, m_numTmps, 1));
+    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_numArguments, m_numLocals, 1));
     BasicBlock* blockPtr = block.ptr();
     m_graph.appendBlock(WTFMove(block));
     return blockPtr;
@@ -1464,7 +1425,7 @@ bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant c
             if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
                 continue;
             // If the target InlineCallFrame is Varargs, we do not know how many arguments are actually filled by LoadVarargs. Varargs InlineCallFrame's
-            // argumentCountIncludingThis is maximum number of potentially filled arguments by xkLoadVarargs. We "continue" to the upper frame which may be
+            // argumentCountIncludingThis is maximum number of potentially filled arguments by LoadVarargs. We "continue" to the upper frame which may be
             // a good target to jump into.
             if (callFrame->isVarargs())
                 continue;
@@ -1490,7 +1451,7 @@ bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant c
         // We must set the callee to the right value
         if (stackEntry->m_inlineCallFrame) {
             if (stackEntry->m_inlineCallFrame->isClosureCall)
-                setDirect(remapOperand(stackEntry->m_inlineCallFrame, CallFrameSlot::callee), callTargetNode, NormalSet);
+                setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
         } else
             addToGraph(SetCallee, callTargetNode);
 
@@ -1655,18 +1616,16 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
     ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
     int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
     
-    Operand inlineCallFrameStart = VirtualRegister(m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).value() + CallFrame::headerSizeInRegisters);
+    int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
     
     ensureLocals(
-        inlineCallFrameStart.toLocal() + 1 +
+        VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
         CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
     
-    ensureTmps((m_inlineStackTop->m_inlineCallFrame ? m_inlineStackTop->m_inlineCallFrame->tmpOffset : 0) + m_inlineStackTop->m_codeBlock->numTmps() + codeBlock->numTmps());
-
     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
 
     if (result.isValid())
-        result = m_inlineStackTop->remapOperand(result).virtualRegister();
+        result = m_inlineStackTop->remapOperand(result);
 
     VariableAccessData* calleeVariable = nullptr;
     if (callee.isClosureCall()) {
@@ -1679,7 +1638,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
 
     InlineStackEntry* callerStackTop = m_inlineStackTop;
     InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
-        inlineCallFrameStart.virtualRegister(), argumentCountIncludingThis, kind, continuationBlock);
+        (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
 
     // This is where the actual inlining really happens.
     BytecodeIndex oldIndex = m_currentIndex;
@@ -1714,9 +1673,9 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
         // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
         // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
         for (int index = 0; index < argumentCountIncludingThis; ++index) {
-            Operand argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
+            VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
             Node* value = getDirect(argumentToGet);
-            addToGraph(MovHint, OpInfo(argumentToGet), value);
+            addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
         }
         break;
@@ -1760,16 +1719,16 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
         // In such cases, we do not need to move frames.
         if (registerOffsetAfterFixup != registerOffset) {
             for (int index = 0; index < argumentCountIncludingThis; ++index) {
-                Operand argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
+                VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
                 Node* value = getDirect(argumentToGet);
-                Operand argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
-                addToGraph(MovHint, OpInfo(argumentToSet), value);
+                VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
+                addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
                 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
             }
         }
         for (int index = 0; index < arityFixupCount; ++index) {
-            Operand argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
-            addToGraph(MovHint, OpInfo(argumentToSet), undefined);
+            VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
+            addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
             m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
         }
 
@@ -1936,12 +1895,12 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
         emitFunctionChecks(callVariant, callTargetNode, thisArgument);
         
         int remappedRegisterOffset =
-        m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).virtualRegister().offset();
+        m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
         
         ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
         
         int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
-        int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).virtualRegister().offset();
+        int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
         
         LoadVarargsData* data = m_graph.m_loadVarargsData.add();
         data->start = VirtualRegister(remappedArgumentStart + 1);
@@ -1949,24 +1908,11 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
         data->offset = argumentsOffset;
         data->limit = maxArgumentCountIncludingThis;
         data->mandatoryMinimum = mandatoryMinimum;
-
-        if (callOp == TailCallForwardVarargs) {
-            Node* argumentCount;
-            if (!inlineCallFrame())
-                argumentCount = addToGraph(GetArgumentCountIncludingThis);
-            else if (inlineCallFrame()->isVarargs())
-                argumentCount = getDirect(remapOperand(inlineCallFrame(), CallFrameSlot::argumentCountIncludingThis));
-            else 
-                argumentCount = addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(inlineCallFrame()->argumentCountIncludingThis))));
-            addToGraph(ForwardVarargs, OpInfo(data), argumentCount);
-        } else {
-            Node* arguments = get(argumentsArgument);
-            auto argCountTmp = m_inlineStackTop->remapOperand(Operand::tmp(OpCallVarargs::argCountIncludingThis));
-            setDirect(argCountTmp, addToGraph(VarargsLength, OpInfo(data), arguments));
-            progressToNextCheckpoint();
-
-            addToGraph(LoadVarargs, OpInfo(data), getLocalOrTmp(argCountTmp), arguments);
-        }
+        
+        if (callOp == TailCallForwardVarargs)
+            addToGraph(ForwardVarargs, OpInfo(data));
+        else
+            addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
         
         // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
         // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
@@ -1978,14 +1924,14 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
         // SSA. Fortunately, we also have other reasons for not inserting control flow
         // before SSA.
         
-        VariableAccessData* countVariable = newVariableAccessData(data->count);
+        VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCountIncludingThis));
         // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
         // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
         // mostly just a formality.
         countVariable->predict(SpecInt32Only);
         countVariable->mergeIsProfitableToUnbox(true);
         Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
-        m_currentBlock->variablesAtTail.setOperand(countVariable->operand(), setArgumentCount);
+        m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
         
         set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
         unsigned numSetArguments = 0;
@@ -2009,7 +1955,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister
             }
             
             Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
-            m_currentBlock->variablesAtTail.setOperand(variable->operand(), setArgument);
+            m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
             ++numSetArguments;
         }
     };
@@ -2109,7 +2055,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     // yet.
     VERBOSE_LOG("Register offset: ", registerOffset);
     VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
-    calleeReg = m_inlineStackTop->remapOperand(calleeReg).virtualRegister();
+    calleeReg = m_inlineStackTop->remapOperand(calleeReg);
     VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
 
@@ -5212,7 +5158,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_new_regexp: {
             auto bytecode = currentInstruction->as<OpNewRegexp>();
             ASSERT(bytecode.m_regexp.isConstant());
-            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp));
+            FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
             set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
             NEXT_OPCODE(op_new_regexp);
         }
@@ -6293,7 +6239,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
 
             RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
 
-            ValueProfileAndVirtualRegisterBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
+            ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
 
             if (!buffer) {
                 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
@@ -6310,7 +6256,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             {
                 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
 
-                buffer->forEach([&] (ValueProfileAndVirtualRegister& profile) {
+                buffer->forEach([&] (ValueProfileAndOperand& profile) {
                     VirtualRegister operand(profile.m_operand);
                     SpeculatedType prediction = profile.computeUpdatedPrediction(locker);
                     if (operand.isLocal())
@@ -6338,14 +6284,14 @@ void ByteCodeParser::parseBlock(unsigned limit)
             m_exitOK = false; 
 
             unsigned numberOfLocals = 0;
-            buffer->forEach([&] (ValueProfileAndVirtualRegister& profile) {
+            buffer->forEach([&] (ValueProfileAndOperand& profile) {
                 VirtualRegister operand(profile.m_operand);
                 if (operand.isArgument())
                     return;
                 ASSERT(operand.isLocal());
                 Node* value = addToGraph(ExtractCatchLocal, OpInfo(numberOfLocals), OpInfo(localPredictions[numberOfLocals]));
                 ++numberOfLocals;
-                addToGraph(MovHint, OpInfo(operand), value);
+                addToGraph(MovHint, OpInfo(profile.m_operand), value);
                 localsToSet.uncheckedAppend(std::make_pair(operand, value));
             });
             if (numberOfLocals)
@@ -6473,7 +6419,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             
         case op_jneq_ptr: {
             auto bytecode = currentInstruction->as<OpJneqPtr>();
-            FrozenValue* frozenPointer = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_specialPointer));
+            FrozenValue* frozenPointer = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_specialPointer.offset()));
             unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
             Node* child = get(bytecode.m_value);
             if (bytecode.metadata(codeBlock).m_hasJumped) {
@@ -6941,8 +6887,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
         case op_create_lexical_environment: {
             auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
             ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
-            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable));
-            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue));
+            FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
+            FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
             Node* scope = get(bytecode.m_scope);
             Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
             set(bytecode.m_dst, lexicalEnvironment);
@@ -6972,7 +6918,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // loads from the scope register later, as that would prevent the DFG from tracking the
             // bytecode-level liveness of the scope register.
             auto bytecode = currentInstruction->as<OpGetScope>();
-            Node* callee = get(CallFrameSlot::callee);
+            Node* callee = get(VirtualRegister(CallFrameSlot::callee));
             Node* result;
             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
                 result = weakJSConstant(function->scope());
@@ -7397,10 +7343,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
         // The owner is the machine code block, and we already have a barrier on that when the
         // plan finishes.
         m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion());
-        m_inlineCallFrame->setTmpOffset((m_caller->m_inlineCallFrame ? m_caller->m_inlineCallFrame->tmpOffset : 0) + m_caller->m_codeBlock->numTmps());
         m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters);
         m_inlineCallFrame->argumentCountIncludingThis = argumentCountIncludingThis;
-        RELEASE_ASSERT(m_inlineCallFrame->argumentCountIncludingThis == argumentCountIncludingThis);
         if (callee) {
             m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
             m_inlineCallFrame->isClosureCall = false;
@@ -7762,7 +7706,7 @@ void ByteCodeParser::parse()
                     Node* node = block->at(nodeIndex);
 
                     if (node->hasVariableAccessData(m_graph))
-                        mapping.operand(node->operand()) = node->variableAccessData();
+                        mapping.operand(node->local()) = node->variableAccessData();
 
                     if (node->op() != ForceOSRExit)
                         continue;
@@ -7782,24 +7726,24 @@ void ByteCodeParser::parse()
                             RELEASE_ASSERT(successor->predecessors.isEmpty());
                     }
 
-                    auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, Operand operand) {
+                    auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
                         VariableAccessData* variable = mapping.operand(operand);
                         if (!variable) {
                             variable = newVariableAccessData(operand);
                             mapping.operand(operand) = variable;
                         }
 
-                        Operand argument = unmapOperand(inlineCallFrame, operand);
+                        VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
                         if (argument.isArgument() && !argument.isHeader()) {
                             const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
                             arguments[argument.toArgument()]->addVariable(variable);
                         }
                         insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
                     };
-                    auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, Operand operand) {
+                    auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
                     };
-                    auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, Operand operand) {
+                    auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
                         insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
                     };
                     flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
@@ -7864,7 +7808,6 @@ void ByteCodeParser::parse()
         ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
     }
 
-    m_graph.m_tmps = m_numTmps;
     m_graph.m_localVars = m_numLocals;
     m_graph.m_parameterSlots = m_parameterSlots;
 }
index 4bdb7ba..a240149 100644 (file)
@@ -170,22 +170,22 @@ private:
         bool changed = false;
         const Operands<Optional<JSValue>>& mustHandleValues = m_graph.m_plan.mustHandleValues();
         for (size_t i = mustHandleValues.size(); i--;) {
-            Operand operand = mustHandleValues.operandForIndex(i);
+            int operand = mustHandleValues.operandForIndex(i);
             Optional<JSValue> value = mustHandleValues[i];
             if (!value) {
                 if (m_verbose)
-                    dataLog("   Not live in bytecode: ", operand, "\n");
+                    dataLog("   Not live in bytecode: ", VirtualRegister(operand), "\n");
                 continue;
             }
             Node* node = block->variablesAtHead.operand(operand);
             if (!node) {
                 if (m_verbose)
-                    dataLog("   Not live: ", operand, "\n");
+                    dataLog("   Not live: ", VirtualRegister(operand), "\n");
                 continue;
             }
             
             if (m_verbose)
-                dataLog("   Widening ", operand, " with ", value.value(), "\n");
+                dataLog("   Widening ", VirtualRegister(operand), " with ", value.value(), "\n");
             
             AbstractValue& target = block->valuesAtHead.operand(operand);
             changed |= target.mergeOSREntryValue(m_graph, value.value(), node->variableAccessData(), node);
index 873fa04..d98fd4a 100644 (file)
@@ -54,9 +54,8 @@ public:
         m_graph.clearReplacements();
         canonicalizeLocalsInBlocks();
         specialCaseArguments();
-        propagatePhis<OperandKind::Local>();
-        propagatePhis<OperandKind::Argument>();
-        propagatePhis<OperandKind::Tmp>();
+        propagatePhis<LocalOperand>();
+        propagatePhis<ArgumentOperand>();
         computeIsFlushed();
         
         m_graph.m_form = ThreadedCPS;
@@ -212,20 +211,10 @@ private:
     void canonicalizeGetLocal(Node* node)
     {
         VariableAccessData* variable = node->variableAccessData();
-        switch (variable->operand().kind()) {
-        case OperandKind::Argument: {
-            canonicalizeGetLocalFor<OperandKind::Argument>(node, variable, variable->operand().toArgument());
-            break;
-        }
-        case OperandKind::Local: {
-            canonicalizeGetLocalFor<OperandKind::Local>(node, variable, variable->operand().toLocal());
-            break;
-        }
-        case OperandKind::Tmp: {
-            canonicalizeGetLocalFor<OperandKind::Tmp>(node, variable, variable->operand().value());
-            break;
-        }
-        }
+        if (variable->local().isArgument())
+            canonicalizeGetLocalFor<ArgumentOperand>(node, variable, variable->local().toArgument());
+        else
+            canonicalizeGetLocalFor<LocalOperand>(node, variable, variable->local().toLocal());
     }
     
     template<NodeType nodeType, OperandKind operandKind>
@@ -240,7 +229,6 @@ private:
             case Flush:
             case PhantomLocal:
             case GetLocal:
-                ASSERT(otherNode->child1().node());
                 otherNode = otherNode->child1().node();
                 break;
             default:
@@ -282,25 +270,15 @@ private:
     void canonicalizeFlushOrPhantomLocal(Node* node)
     {
         VariableAccessData* variable = node->variableAccessData();
-        switch (variable->operand().kind()) {
-        case OperandKind::Argument: {
-            canonicalizeFlushOrPhantomLocalFor<nodeType, OperandKind::Argument>(node, variable, variable->operand().toArgument());
-            break;
-        }
-        case OperandKind::Local: {
-            canonicalizeFlushOrPhantomLocalFor<nodeType, OperandKind::Local>(node, variable, variable->operand().toLocal());
-            break;
-        }
-        case OperandKind::Tmp: {
-            canonicalizeFlushOrPhantomLocalFor<nodeType, OperandKind::Tmp>(node, variable, variable->operand().value());
-            break;
-        }
-        }
+        if (variable->local().isArgument())
+            canonicalizeFlushOrPhantomLocalFor<nodeType, ArgumentOperand>(node, variable, variable->local().toArgument());
+        else
+            canonicalizeFlushOrPhantomLocalFor<nodeType, LocalOperand>(node, variable, variable->local().toLocal());
     }
     
     void canonicalizeSet(Node* node)
     {
-        m_block->variablesAtTail.setOperand(node->operand(), node);
+        m_block->variablesAtTail.setOperand(node->local(), node);
     }
     
     void canonicalizeLocalsInBlock()
@@ -309,9 +287,8 @@ private:
             return;
         ASSERT(m_block->isReachable);
         
-        clearVariables<OperandKind::Argument>();
-        clearVariables<OperandKind::Local>();
-        clearVariables<OperandKind::Tmp>();
+        clearVariables<ArgumentOperand>();
+        clearVariables<LocalOperand>();
         
         // Assumes that all phi references have been removed. Assumes that things that
         // should be live have a non-zero ref count, but doesn't assume that the ref
@@ -411,7 +388,7 @@ private:
     template<OperandKind operandKind>
     void propagatePhis()
     {
-        Vector<PhiStackEntry, 128>& phiStack = phiStackFor<operandKind>();
+        Vector<PhiStackEntry, 128>& phiStack = operandKind == ArgumentOperand ? m_argumentPhiStack : m_localPhiStack;
         
         // Ensure that attempts to use this fail instantly.
         m_block = 0;
@@ -489,12 +466,9 @@ private:
     template<OperandKind operandKind>
     Vector<PhiStackEntry, 128>& phiStackFor()
     {
-        switch (operandKind) {
-        case OperandKind::Argument: return m_argumentPhiStack;
-        case OperandKind::Local: return m_localPhiStack;
-        case OperandKind::Tmp: return m_tmpPhiStack;
-        }
-        RELEASE_ASSERT_NOT_REACHED();
+        if (operandKind == ArgumentOperand)
+            return m_argumentPhiStack;
+        return m_localPhiStack;
     }
     
     void computeIsFlushed()
@@ -547,7 +521,6 @@ private:
     BasicBlock* m_block;
     Vector<PhiStackEntry, 128> m_argumentPhiStack;
     Vector<PhiStackEntry, 128> m_localPhiStack;
-    Vector<PhiStackEntry, 128> m_tmpPhiStack;
     Vector<Node*, 128> m_flushedLocalOpWorklist;
 };
 
index 4cce174..2381c6b 100644 (file)
@@ -147,7 +147,8 @@ public:
             break;
         case Stack: {
             ASSERT(!heap.payload().isTop());
-            m_abstractHeapStackMap.remove(heap.payload().value());
+            ASSERT(heap.payload().value() == heap.payload().value32());
+            m_abstractHeapStackMap.remove(heap.payload().value32());
             if (clobberConservatively)
                 m_fallbackStackMap.clear();
             else
@@ -171,7 +172,7 @@ public:
                 if (!clobberConservatively)
                     break;
                 if (pair.key.heap().kind() == Stack) {
-                    auto iterator = m_abstractHeapStackMap.find(pair.key.heap().payload().value());
+                    auto iterator = m_abstractHeapStackMap.find(pair.key.heap().payload().value32());
                     if (iterator != m_abstractHeapStackMap.end() && iterator->value->key == pair.key)
                         return false;
                     return true;
@@ -225,7 +226,8 @@ private:
             AbstractHeap abstractHeap = location.heap();
             if (abstractHeap.payload().isTop())
                 return add(m_fallbackStackMap, location, node);
-            auto addResult = m_abstractHeapStackMap.add(abstractHeap.payload().value(), nullptr);
+            ASSERT(abstractHeap.payload().value() == abstractHeap.payload().value32());
+            auto addResult = m_abstractHeapStackMap.add(abstractHeap.payload().value32(), nullptr);
             if (addResult.isNewEntry) {
                 addResult.iterator->value.reset(new ImpureDataSlot {location, node, 0});
                 return nullptr;
@@ -247,7 +249,8 @@ private:
         case SideState:
             RELEASE_ASSERT_NOT_REACHED();
         case Stack: {
-            auto iterator = m_abstractHeapStackMap.find(location.heap().payload().value());
+            ASSERT(location.heap().payload().value() == location.heap().payload().value32());
+            auto iterator = m_abstractHeapStackMap.find(location.heap().payload().value32());
             if (iterator != m_abstractHeapStackMap.end()
                 && iterator->value->key == location)
                 return iterator->value->value;
@@ -295,7 +298,7 @@ private:
     // a duplicate in the past and now only live in m_fallbackStackMap.
     //
     // Obviously, TOP always goes into m_fallbackStackMap since it does not have a unique value.
-    HashMap<int64_t, std::unique_ptr<ImpureDataSlot>, DefaultHash<int64_t>::Hash, WTF::SignedWithZeroKeyHashTraits<int64_t>> m_abstractHeapStackMap;
+    HashMap<int32_t, std::unique_ptr<ImpureDataSlot>, DefaultHash<int32_t>::Hash, WTF::SignedWithZeroKeyHashTraits<int32_t>> m_abstractHeapStackMap;
     Map m_fallbackStackMap;
 
     Map m_heapMap;
index aa222b8..2477af0 100644 (file)
@@ -307,8 +307,6 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const I
     case llint_native_construct_trampoline:
     case llint_internal_function_call_trampoline:
     case llint_internal_function_construct_trampoline:
-    case checkpoint_osr_exit_from_inlined_call_trampoline:
-    case checkpoint_osr_exit_trampoline:
     case handleUncaughtException:
     case op_call_return_location:
     case op_construct_return_location:
index 362c478..bfb1d14 100644 (file)
@@ -111,9 +111,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
     // scan would read. That's what this does.
     for (InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
         if (inlineCallFrame->isClosureCall)
-            read(AbstractHeap(Stack, VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
+            read(AbstractHeap(Stack, inlineCallFrame->stackOffset + CallFrameSlot::callee));
         if (inlineCallFrame->isVarargs())
-            read(AbstractHeap(Stack, VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis)));
+            read(AbstractHeap(Stack, inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis));
     }
 
     // We don't want to specifically account which nodes can read from the scope
@@ -441,7 +441,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
         return;
 
     case KillStack:
-        write(AbstractHeap(Stack, node->unlinkedOperand()));
+        write(AbstractHeap(Stack, node->unlinkedLocal()));
         return;
          
     case MovHint:
@@ -497,7 +497,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
         return;
 
     case Flush:
-        read(AbstractHeap(Stack, node->operand()));
+        read(AbstractHeap(Stack, node->local()));
         write(SideState);
         return;
 
@@ -765,12 +765,12 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
         return;
         
     case GetCallee:
-        read(AbstractHeap(Stack, VirtualRegister(CallFrameSlot::callee)));
-        def(HeapLocation(StackLoc, AbstractHeap(Stack, VirtualRegister(CallFrameSlot::callee))), LazyNode(node));
+        read(AbstractHeap(Stack, CallFrameSlot::callee));
+        def(HeapLocation(StackLoc, AbstractHeap(Stack, CallFrameSlot::callee)), LazyNode(node));
         return;
 
     case SetCallee:
-        write(AbstractHeap(Stack, VirtualRegister(CallFrameSlot::callee)));
+        write(AbstractHeap(Stack, CallFrameSlot::callee));
         return;
         
     case GetArgumentCountIncludingThis: {
@@ -781,7 +781,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
     }
 
     case SetArgumentCountIncludingThis:
-        write(AbstractHeap(Stack, VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
+        write(AbstractHeap(Stack, CallFrameSlot::argumentCountIncludingThis));
         return;
 
     case GetRestLength:
@@ -789,42 +789,36 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
         return;
         
     case GetLocal:
-        read(AbstractHeap(Stack, node->operand()));
-        def(HeapLocation(StackLoc, AbstractHeap(Stack, node->operand())), LazyNode(node));
+        read(AbstractHeap(Stack, node->local()));
+        def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node));
         return;
         
     case SetLocal:
-        write(AbstractHeap(Stack, node->operand()));
-        def(HeapLocation(StackLoc, AbstractHeap(Stack, node->operand())), LazyNode(node->child1().node()));
+        write(AbstractHeap(Stack, node->local()));
+        def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node->child1().node()));
         return;
         
     case GetStack: {
-        AbstractHeap heap(Stack, node->stackAccessData()->operand);
+        AbstractHeap heap(Stack, node->stackAccessData()->local);
         read(heap);
         def(HeapLocation(StackLoc, heap), LazyNode(node));
         return;
     }
         
     case PutStack: {
-        AbstractHeap heap(Stack, node->stackAccessData()->operand);
+        AbstractHeap heap(Stack, node->stackAccessData()->local);
         write(heap);
         def(HeapLocation(StackLoc, heap), LazyNode(node->child1().node()));
         return;
     }
         
-    case VarargsLength: {
-        read(World);
-        write(Heap);
-        return;  
-    }
-
     case LoadVarargs: {
         read(World);
         write(Heap);
         LoadVarargsData* data = node->loadVarargsData();
-        write(AbstractHeap(Stack, data->count));
+        write(AbstractHeap(Stack, data->count.offset()));
         for (unsigned i = data->limit; i--;)
-            write(AbstractHeap(Stack, data->start + static_cast<int>(i)));
+            write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
         return;
     }
         
@@ -833,9 +827,9 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
         read(Stack);
         
         LoadVarargsData* data = node->loadVarargsData();
-        write(AbstractHeap(Stack, data->count));
+        write(AbstractHeap(Stack, data->count.offset()));
         for (unsigned i = data->limit; i--;)
-            write(AbstractHeap(Stack, data->start + static_cast<int>(i)));
+            write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
         return;
     }
         
index 15746c0..95866a7 100644 (file)
@@ -39,7 +39,7 @@ static void addBytecodeLiveness(Graph& graph, AvailabilityMap& availabilityMap,
 {
     graph.forAllLiveInBytecode(
         node->origin.forExit,
-        [&] (Operand reg) {
+        [&] (VirtualRegister reg) {
             availabilityMap.closeStartingWithLocal(
                 reg,
                 [&] (Node* node) -> bool {
index 0712ccb..b4fc8d4 100644 (file)
@@ -56,7 +56,7 @@ CallSiteIndex CommonData::addCodeOrigin(CodeOrigin codeOrigin)
         codeOrigins.append(codeOrigin);
     unsigned index = codeOrigins.size() - 1;
     ASSERT(codeOrigins[index] == codeOrigin);
-    return CallSiteIndex(index);
+    return CallSiteIndex(BytecodeIndex(index));
 }
 
 CallSiteIndex CommonData::addUniqueCallSiteIndex(CodeOrigin codeOrigin)
@@ -64,13 +64,13 @@ CallSiteIndex CommonData::addUniqueCallSiteIndex(CodeOrigin codeOrigin)
     codeOrigins.append(codeOrigin);
     unsigned index = codeOrigins.size() - 1;
     ASSERT(codeOrigins[index] == codeOrigin);
-    return CallSiteIndex(index);
+    return CallSiteIndex(BytecodeIndex(index));
 }
 
 CallSiteIndex CommonData::lastCallSite() const
 {
     RELEASE_ASSERT(codeOrigins.size());
-    return CallSiteIndex(codeOrigins.size() - 1);
+    return CallSiteIndex(BytecodeIndex(codeOrigins.size() - 1));
 }
 
 DisposableCallSiteIndex CommonData::addDisposableCallSiteIndex(CodeOrigin codeOrigin)
index 6d447d3..d37d298 100644 (file)
@@ -383,7 +383,7 @@ private:
                 // GetMyArgumentByVal in such statically-out-of-bounds accesses; we just lose CFA unless
                 // GCSE removes the access entirely.
                 if (inlineCallFrame) {
-                    if (index >= static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1))
+                    if (index >= inlineCallFrame->argumentCountIncludingThis - 1)
                         break;
                 } else {
                     if (index >= m_state.numberOfArguments() - 1)
@@ -404,7 +404,7 @@ private:
                         virtualRegisterForArgument(index + 1), FlushedJSValue);
                 }
                 
-                if (inlineCallFrame && !inlineCallFrame->isVarargs() && index < static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1)) {
+                if (inlineCallFrame && !inlineCallFrame->isVarargs() && index < inlineCallFrame->argumentCountIncludingThis - 1) {
                     node->convertToGetStack(data);
                     eliminated = true;
                     break;
index 1f30dff..a583993 100644 (file)
@@ -297,7 +297,6 @@ bool doesGC(Graph& graph, Node* node)
     case InByVal:
     case InstanceOf:
     case InstanceOfCustom:
-    case VarargsLength:
     case LoadVarargs:
     case NumberToStringWithRadix:
     case NumberToStringWithValidRadixConstant:
index 45fbc43..17b3707 100644 (file)
@@ -90,6 +90,7 @@ static CompilationResult compileImpl(
     // Make sure that any stubs that the DFG is going to use are initialized. We want to
     // make sure that all JIT code generation does finalization on the main thread.
     vm.getCTIStub(arityFixupGenerator);
+    vm.getCTIStub(osrExitThunkGenerator);
     vm.getCTIStub(osrExitGenerationThunkGenerator);
     vm.getCTIStub(throwExceptionFromCallSlowPathGenerator);
     vm.getCTIStub(linkCallThunkGenerator);
index 922d35b..374d526 100644 (file)
@@ -2439,12 +2439,6 @@ private:
             break;
         }
 
-        case ForwardVarargs:
-        case LoadVarargs: {
-            fixEdge<KnownInt32Use>(node->child1());
-            break;
-        }
-
 #if ASSERT_ENABLED
         // Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes.
         case SetArgumentDefinitely:
@@ -2477,7 +2471,8 @@ private:
         case ConstructForwardVarargs:
         case TailCallForwardVarargs:
         case TailCallForwardVarargsInlinedCaller:
-        case VarargsLength:
+        case LoadVarargs:
+        case ForwardVarargs:
         case ProfileControlFlow:
         case NewObject:
         case NewPromise:
index bf4350c..0bf1c3f 100644 (file)
 
 namespace JSC { namespace DFG {
 
-namespace ForAllKillsInternal {
-constexpr bool verbose = false;
-}
-
 // Utilities for finding the last points where a node is live in DFG SSA. This accounts for liveness due
 // to OSR exit. This is usually used for enumerating over all of the program points where a node is live,
 // by exploring all blocks where the node is live at tail and then exploring all program points where the
@@ -57,13 +53,13 @@ void forAllKilledOperands(Graph& graph, Node* nodeBefore, Node* nodeAfter, const
     
     CodeOrigin after = nodeAfter->origin.forExit;
     
-    Operand alreadyNoted;
+    VirtualRegister alreadyNoted;
     // If we MovHint something that is live at the time, then we kill the old value.
     if (nodeAfter->containsMovHint()) {
-        Operand operand = nodeAfter->unlinkedOperand();
-        if (graph.isLiveInBytecode(operand, after)) {
-            functor(operand);
-            alreadyNoted = operand;
+        VirtualRegister reg = nodeAfter->unlinkedLocal();
+        if (graph.isLiveInBytecode(reg, after)) {
+            functor(reg);
+            alreadyNoted = reg;
         }
     }
     
@@ -74,48 +70,29 @@ void forAllKilledOperands(Graph& graph, Node* nodeBefore, Node* nodeAfter, const
     // other loop, below.
     auto* beforeInlineCallFrame = before.inlineCallFrame();
     if (beforeInlineCallFrame == after.inlineCallFrame()) {
+        int stackOffset = beforeInlineCallFrame ? beforeInlineCallFrame->stackOffset : 0;
         CodeBlock* codeBlock = graph.baselineCodeBlockFor(beforeInlineCallFrame);
-        if (after.bytecodeIndex().checkpoint()) {
-            ASSERT(before.bytecodeIndex().checkpoint() != after.bytecodeIndex().checkpoint());
-            ASSERT_WITH_MESSAGE(before.bytecodeIndex().offset() == after.bytecodeIndex().offset(), "When the DFG does code motion it should change the forExit origin to match the surrounding bytecodes.");
-
-            auto liveBefore = tmpLivenessForCheckpoint(*codeBlock, before.bytecodeIndex());
-            auto liveAfter = tmpLivenessForCheckpoint(*codeBlock, after.bytecodeIndex());
-            liveAfter.invert();
-            liveBefore.filter(liveAfter);
-
-            liveBefore.forEachSetBit([&] (size_t tmp) {
-                functor(remapOperand(beforeInlineCallFrame, Operand::tmp(tmp)));
-            });
-            // No locals can die at a checkpoint.
-            return;
-        }
-
         FullBytecodeLiveness& fullLiveness = graph.livenessFor(codeBlock);
         const FastBitVector& liveBefore = fullLiveness.getLiveness(before.bytecodeIndex(), LivenessCalculationPoint::BeforeUse);
         const FastBitVector& liveAfter = fullLiveness.getLiveness(after.bytecodeIndex(), LivenessCalculationPoint::BeforeUse);
         
         (liveBefore & ~liveAfter).forEachSetBit(
             [&] (size_t relativeLocal) {
-                functor(remapOperand(beforeInlineCallFrame, virtualRegisterForLocal(relativeLocal)));
+                functor(virtualRegisterForLocal(relativeLocal) + stackOffset);
             });
         return;
     }
-
-    ASSERT_WITH_MESSAGE(!after.bytecodeIndex().checkpoint(), "Transitioning across a checkpoint but before and after don't share an inlineCallFrame.");
-
+    
     // Detect kills the super conservative way: it is killed if it was live before and dead after.
-    BitVector liveAfter = graph.localsAndTmpsLiveInBytecode(after);
-    unsigned numLocals = graph.block(0)->variablesAtHead.numberOfLocals();
-    graph.forAllLocalsAndTmpsLiveInBytecode(
+    BitVector liveAfter = graph.localsLiveInBytecode(after);
+    graph.forAllLocalsLiveInBytecode(
         before,
-        [&] (Operand operand) {
-            if (operand == alreadyNoted)
+        [&] (VirtualRegister reg) {
+            if (reg == alreadyNoted)
                 return;
-            unsigned offset = operand.isTmp() ? numLocals + operand.value() : operand.toLocal();
-            if (liveAfter.get(offset))
+            if (liveAfter.get(reg.toLocal()))
                 return;
-            functor(operand);
+            functor(reg);
         });
 }
     
@@ -128,8 +105,7 @@ void forAllKilledNodesAtNodeIndex(
     static constexpr unsigned seenInClosureFlag = 1;
     static constexpr unsigned calledFunctorFlag = 2;
     HashMap<Node*, unsigned> flags;
-
-    ASSERT(nodeIndex);
+    
     Node* node = block->at(nodeIndex);
     
     graph.doToChildren(
@@ -144,13 +120,15 @@ void forAllKilledNodesAtNodeIndex(
             }
         });
 
-    Node* before = block->at(nodeIndex - 1);
+    Node* before = nullptr;
+    if (nodeIndex)
+        before = block->at(nodeIndex - 1);
 
     forAllKilledOperands(
         graph, before, node,
-        [&] (Operand operand) {
+        [&] (VirtualRegister reg) {
             availabilityMap.closeStartingWithLocal(
-                operand,
+                reg,
                 [&] (Node* node) -> bool {
                     return flags.get(node) & seenInClosureFlag;
                 },
@@ -181,7 +159,6 @@ void forAllKillsInBlock(
     // Start at the second node, because the functor is expected to only inspect nodes from the start of
     // the block up to nodeIndex (exclusive), so if nodeIndex is zero then the functor has nothing to do.
     for (unsigned nodeIndex = 1; nodeIndex < block->size(); ++nodeIndex) {
-        dataLogLnIf(ForAllKillsInternal::verbose, "local availability at index: ", nodeIndex, " ", localAvailability.m_availability);
         forAllKilledNodesAtNodeIndex(
             graph, localAvailability.m_availability, block, nodeIndex,
             [&] (Node* node) {
index b9f8572..e51355c 100644 (file)
@@ -308,8 +308,8 @@ void Graph::dump(PrintStream& out, const char* prefixStr, Node* node, DumpContex
     if (node->hasVariableAccessData(*this)) {
         VariableAccessData* variableAccessData = node->tryGetVariableAccessData();
         if (variableAccessData) {
-            Operand operand = variableAccessData->operand();
-            out.print(comma, variableAccessData->operand(), "(", VariableAccessDataDump(*this, variableAccessData), ")");
+            VirtualRegister operand = variableAccessData->local();
+            out.print(comma, variableAccessData->local(), "(", VariableAccessDataDump(*this, variableAccessData), ")");
             operand = variableAccessData->machineLocal();
             if (operand.isValid())
                 out.print(comma, "machine:", operand);
@@ -317,13 +317,13 @@ void Graph::dump(PrintStream& out, const char* prefixStr, Node* node, DumpContex
     }
     if (node->hasStackAccessData()) {
         StackAccessData* data = node->stackAccessData();
-        out.print(comma, data->operand);
+        out.print(comma, data->local);
         if (data->machineLocal.isValid())
             out.print(comma, "machine:", data->machineLocal);
         out.print(comma, data->format);
     }
-    if (node->hasUnlinkedOperand())
-        out.print(comma, node->unlinkedOperand());
+    if (node->hasUnlinkedLocal()) 
+        out.print(comma, node->unlinkedLocal());
     if (node->hasVectorLengthHint())
         out.print(comma, "vectorLengthHint = ", node->vectorLengthHint());
     if (node->hasLazyJSValue())
@@ -515,10 +515,9 @@ void Graph::dumpBlockHeader(PrintStream& out, const char* prefixStr, BasicBlock*
         out.print(prefix, "  Phi Nodes:");
         for (size_t i = 0; i < block->phis.size(); ++i) {
             Node* phiNode = block->phis[i];
-            ASSERT(phiNode->op() == Phi);
             if (!phiNode->shouldGenerate() && phiNodeDumpMode == DumpLivePhisOnly)
                 continue;
-            out.print(" @", phiNode->index(), "<", phiNode->operand(), ",", phiNode->refCount(), ">->(");
+            out.print(" @", phiNode->index(), "<", phiNode->local(), ",", phiNode->refCount(), ">->(");
             if (phiNode->child1()) {
                 out.print("@", phiNode->child1()->index());
                 if (phiNode->child2()) {
@@ -885,7 +884,7 @@ void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, Va
         bool shouldContinue = true;
         switch (node->op()) {
         case SetLocal: {
-            if (node->operand() == variableAccessData->operand())
+            if (node->local() == variableAccessData->local())
                 shouldContinue = false;
             break;
         }
@@ -894,9 +893,9 @@ void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, Va
             if (node->variableAccessData() != variableAccessData)
                 continue;
             substitute(block, indexInBlock, node, newGetLocal);
-            Node* oldTailNode = block.variablesAtTail.operand(variableAccessData->operand());
+            Node* oldTailNode = block.variablesAtTail.operand(variableAccessData->local());
             if (oldTailNode == node)
-                block.variablesAtTail.operand(variableAccessData->operand()) = newGetLocal;
+                block.variablesAtTail.operand(variableAccessData->local()) = newGetLocal;
             shouldContinue = false;
             break;
         }
@@ -1134,56 +1133,36 @@ BytecodeKills& Graph::killsFor(InlineCallFrame* inlineCallFrame)
     return killsFor(baselineCodeBlockFor(inlineCallFrame));
 }
 
-bool Graph::isLiveInBytecode(Operand operand, CodeOrigin codeOrigin)
+bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin)
 {
     static constexpr bool verbose = false;
     
     if (verbose)
         dataLog("Checking of operand is live: ", operand, "\n");
     bool isCallerOrigin = false;
-
     CodeOrigin* codeOriginPtr = &codeOrigin;
-    auto* inlineCallFrame = codeOriginPtr->inlineCallFrame();
-    // We need to handle tail callers because we may decide to exit to the
-    // the return bytecode following the tail call.
-    for (; codeOriginPtr; codeOriginPtr = inlineCallFrame ? &inlineCallFrame->directCaller : nullptr) {
-        inlineCallFrame = codeOriginPtr->inlineCallFrame();
-        if (operand.isTmp()) {
-            unsigned tmpOffset = inlineCallFrame ? inlineCallFrame->tmpOffset : 0;
-            unsigned operandIndex = static_cast<unsigned>(operand.value());
-
-            ASSERT(operand.value() >= 0);
-            // This tmp should have belonged to someone we inlined.
-            if (operandIndex > tmpOffset + maxNumCheckpointTmps)
-                return false;
-
-            CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
-            if (!codeBlock->numTmps() || operandIndex < tmpOffset)
-                continue;
-
-            auto bitMap = tmpLivenessForCheckpoint(*codeBlock, codeOriginPtr->bytecodeIndex());
-            return bitMap.get(operandIndex - tmpOffset);
-        }
-
-        VirtualRegister reg = operand.virtualRegister() - codeOriginPtr->stackOffset();
+    for (;;) {
+        VirtualRegister reg = VirtualRegister(
+            operand.offset() - codeOriginPtr->stackOffset());
         
         if (verbose)
             dataLog("reg = ", reg, "\n");
 
-        if (operand.virtualRegister().offset() < codeOriginPtr->stackOffset() + CallFrame::headerSizeInRegisters) {
+        auto* inlineCallFrame = codeOriginPtr->inlineCallFrame();
+        if (operand.offset() < codeOriginPtr->stackOffset() + CallFrame::headerSizeInRegisters) {
             if (reg.isArgument()) {
                 RELEASE_ASSERT(reg.offset() < CallFrame::headerSizeInRegisters);
 
 
                 if (inlineCallFrame->isClosureCall
-                    && reg == CallFrameSlot::callee) {
+                    && reg.offset() == CallFrameSlot::callee) {
                     if (verbose)
                         dataLog("Looks like a callee.\n");
                     return true;
                 }
                 
                 if (inlineCallFrame->isVarargs()
-                    && reg == CallFrameSlot::argumentCountIncludingThis) {
+                    && reg.offset() == CallFrameSlot::argumentCountIncludingThis) {
                     if (verbose)
                         dataLog("Looks like the argument count.\n");
                     return true;
@@ -1197,39 +1176,42 @@ bool Graph::isLiveInBytecode(Operand operand, CodeOrigin codeOrigin)
             CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
             FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
             BytecodeIndex bytecodeIndex = codeOriginPtr->bytecodeIndex();
-            return fullLiveness.virtualRegisterIsLive(reg, bytecodeIndex, appropriateLivenessCalculationPoint(*codeOriginPtr, isCallerOrigin));
+            return fullLiveness.operandIsLive(reg.offset(), bytecodeIndex, appropriateLivenessCalculationPoint(*codeOriginPtr, isCallerOrigin));
+        }
+
+        if (!inlineCallFrame) {
+            if (verbose)
+                dataLog("Ran out of stack, returning true.\n");
+            return true;
         }
 
         // Arguments are always live. This would be redundant if it wasn't for our
         // op_call_varargs inlining.
-        if (inlineCallFrame && reg.isArgument()
+        if (reg.isArgument()
             && static_cast<size_t>(reg.toArgument()) < inlineCallFrame->argumentsWithFixup.size()) {
             if (verbose)
                 dataLog("Argument is live.\n");
             return true;
         }
 
+        // We need to handle tail callers because we may decide to exit to the
+        // the return bytecode following the tail call.
+        codeOriginPtr = &inlineCallFrame->directCaller;
         isCallerOrigin = true;
     }
-
-    if (operand.isTmp())
-        return false;
-
-    if (verbose)
-        dataLog("Ran out of stack, returning true.\n");
-    return true;    
+    
+    RELEASE_ASSERT_NOT_REACHED();
 }
 
-BitVector Graph::localsAndTmpsLiveInBytecode(CodeOrigin codeOrigin)
+BitVector Graph::localsLiveInBytecode(CodeOrigin codeOrigin)
 {
     BitVector result;
-    unsigned numLocals = block(0)->variablesAtHead.numberOfLocals();
-    result.ensureSize(numLocals + block(0)->variablesAtHead.numberOfTmps());
-    forAllLocalsAndTmpsLiveInBytecode(
+    result.ensureSize(block(0)->variablesAtHead.numberOfLocals());
+    forAllLocalsLiveInBytecode(
         codeOrigin,
-        [&] (Operand operand) {
-            unsigned offset = operand.isTmp() ? numLocals + operand.value() : operand.toLocal();
-            result.quickSet(offset);
+        [&] (VirtualRegister reg) {
+            ASSERT(reg.isLocal());
+            result.quickSet(reg.toLocal());
         });
     return result;
 }
@@ -1653,8 +1635,8 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren
 
     for (Node* node = operandNode; node;) {
         if (node->accessesStack(*this)) {
-            if (m_form != SSA && node->operand().isArgument()) {
-                int argument = node->operand().toArgument();
+            if (m_form != SSA && node->local().isArgument()) {
+                int argument = node->local().toArgument();
                 Node* argumentNode = m_rootToArguments.find(block(0))->value[argument];
                 // FIXME: We should match SetArgumentDefinitely nodes at other entrypoints as well:
                 // https://bugs.webkit.org/show_bug.cgi?id=175841
@@ -1674,7 +1656,7 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren
                     return MethodOfGettingAValueProfile::fromLazyOperand(
                         profiledBlock,
                         LazyOperandValueProfileKey(
-                            node->origin.semantic.bytecodeIndex(), node->operand()));
+                            node->origin.semantic.bytecodeIndex(), node->local()));
                 }
             }
 
index d433d85..ec4f22b 100644 (file)
@@ -840,13 +840,13 @@ public:
     // Quickly query if a single local is live at the given point. This is faster than calling
     // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
     // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
-    bool isLiveInBytecode(Operand, CodeOrigin);
+    bool isLiveInBytecode(VirtualRegister, CodeOrigin);
     
-    // Quickly get all of the non-argument locals and tmps live at the given point. This doesn't give you
+    // Quickly get all of the non-argument locals live at the given point. This doesn't give you
     // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
     // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
     template<typename Functor>
-    void forAllLocalsAndTmpsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
+    void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
     {
         // Support for not redundantly reporting arguments. Necessary because in case of a varargs
         // call, only the callee knows that arguments are live while in the case of a non-varargs
@@ -881,14 +881,6 @@ public:
                 if (livenessAtBytecode[relativeLocal])
                     functor(reg);
             }
-
-            if (codeOriginPtr->bytecodeIndex().checkpoint()) {
-                ASSERT(codeBlock->numTmps());
-                auto liveTmps = tmpLivenessForCheckpoint(*codeBlock, codeOriginPtr->bytecodeIndex());
-                liveTmps.forEachSetBit([&] (size_t tmp) {
-                    functor(remapOperand(inlineCallFrame, Operand::tmp(tmp)));
-                });
-            }
             
             if (!inlineCallFrame)
                 break;
@@ -912,9 +904,9 @@ public:
         }
     }
     
-    // Get a BitVector of all of the locals and tmps live right now. This is mostly useful if
+    // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
     // you want to compare two sets of live locals from two different CodeOrigins.
-    BitVector localsAndTmpsLiveInBytecode(CodeOrigin);
+    BitVector localsLiveInBytecode(CodeOrigin);
 
     LivenessCalculationPoint appropriateLivenessCalculationPoint(CodeOrigin origin, bool isCallerOrigin)
     {
@@ -946,12 +938,12 @@ public:
         return LivenessCalculationPoint::BeforeUse;
     }
     
-    // Tells you all of the operands live at the given CodeOrigin. This is a small
-    // extension to forAllLocalsOrTmpsLiveInBytecode(), since all arguments are always presumed live.
+    // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
+    // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
     template<typename Functor>
     void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
     {
-        forAllLocalsAndTmpsLiveInBytecode(codeOrigin, functor);
+        forAllLocalsLiveInBytecode(codeOrigin, functor);
         
         // Report all arguments as being live.
         for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
@@ -1122,7 +1114,6 @@ public:
     std::unique_ptr<BackwardsCFG> m_backwardsCFG;
     std::unique_ptr<BackwardsDominators> m_backwardsDominators;
     std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
-    unsigned m_tmps;
     unsigned m_localVars;
     unsigned m_nextMachineLocal;
     unsigned m_parameterSlots;
index 202d225..99aeffc 100644 (file)
@@ -45,8 +45,8 @@ static constexpr bool verbose = false;
 InPlaceAbstractState::InPlaceAbstractState(Graph& graph)
     : m_graph(graph)
     , m_abstractValues(*graph.m_abstractValuesCache)
-    , m_variables(OperandsLike, graph.block(0)->variablesAtHead)
-    , m_block(nullptr)
+    , m_variables(m_graph.m_codeBlock->numParameters(), graph.m_localVars)
+    , m_block(0)
 {
 }
 
index 1865e6a..bf5ead1 100644 (file)
@@ -167,11 +167,13 @@ public:
         return fastForward(m_variables[index]);
     }
 
-    AbstractValue& operand(Operand operand)
+    AbstractValue& operand(int operand)
     {
         return variableAt(m_variables.operandIndex(operand));
     }
     
+    AbstractValue& operand(VirtualRegister operand) { return this->operand(operand.offset()); }
+    
     AbstractValue& local(size_t index)
     {
         return variableAt(m_variables.localIndex(index));
index c5544bb..70eef88 100644 (file)
@@ -85,6 +85,8 @@ void JITCompiler::linkOSRExits()
         }
     }
     
+    MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm().getCTIStub(osrExitThunkGenerator);
+    auto osrExitThunkLabel = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code());
     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
         JumpList& failureJumps = info.m_failureJumps;
@@ -95,7 +97,13 @@ void JITCompiler::linkOSRExits()
 
         jitAssertHasValidCallFrame();
         store32(TrustedImm32(i), &vm().osrExitIndex);
-        info.m_patchableJump = patchableJump();
+        if (Options::useProbeOSRExit()) {
+            Jump target = jump();
+            addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(target, osrExitThunkLabel);
+            });
+        } else
+            info.m_patchableJump = patchableJump();
     }
 }
 
@@ -587,12 +595,11 @@ void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label bloc
             default:
                 break;
             }
-
-            ASSERT(!variable->operand().isTmp());
-            if (variable->operand().virtualRegister() != variable->machineLocal()) {
+            
+            if (variable->local() != variable->machineLocal()) {
                 entry->m_reshufflings.append(
                     OSREntryReshuffling(
-                        variable->operand().virtualRegister().offset(), variable->machineLocal().offset()));
+                        variable->local().offset(), variable->machineLocal().offset()));
             }
         }
     }
index 4880874..d1bd7da 100644 (file)
@@ -135,7 +135,7 @@ public:
 
     void emitStoreCallSiteIndex(CallSiteIndex callSite)
     {
-        store32(TrustedImm32(callSite.bits()), tagFor(CallFrameSlot::argumentCountIncludingThis));
+        store32(TrustedImm32(callSite.bits()), tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCountIncludingThis)));
     }
 
     // Add a call out from JIT code, without an exception check.
index 5c35e53..42f68ad 100644 (file)
@@ -63,7 +63,7 @@ public:
         return true;
     }
 
-    bool isValidFlushLocation(BasicBlock* startingBlock, unsigned index, Operand operand)
+    bool isValidFlushLocation(BasicBlock* startingBlock, unsigned index, VirtualRegister operand)
     {
         // This code is not meant to be fast. We just use it for assertions. If we got liveness wrong,
         // this function would return false for a Flush that we insert.
@@ -81,7 +81,7 @@ public:
         auto flushIsDefinitelyInvalid = [&] (BasicBlock* block, unsigned index) {
             bool allGood = false;
             for (unsigned i = index; i--; ) {
-                if (block->at(i)->accessesStack(m_graph) && block->at(i)->operand() == operand) {
+                if (block->at(i)->accessesStack(m_graph) && block->at(i)->local() == operand) {
                     allGood = true;
                     break;
                 }
@@ -115,7 +115,8 @@ public:
     void handleBlockForTryCatch(BasicBlock* block, InsertionSet& insertionSet)
     {
         HandlerInfo* currentExceptionHandler = nullptr;
-        Operands<bool> liveAtCatchHead(0, m_graph.block(0)->variablesAtTail.numberOfLocals(), m_graph.block(0)->variablesAtTail.numberOfTmps());
+        FastBitVector liveAtCatchHead;
+        liveAtCatchHead.resize(m_graph.block(0)->variablesAtTail.numberOfLocals());
 
         HandlerInfo* cachedHandlerResult;
         CodeOrigin cachedCodeOrigin;
@@ -132,11 +133,11 @@ public:
                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
                 if (HandlerInfo* handler = codeBlock->handlerForBytecodeIndex(bytecodeIndexToCheck)) {
-                    liveAtCatchHead.fill(false);
+                    liveAtCatchHead.clearAll();
 
                     BytecodeIndex catchBytecodeIndex = BytecodeIndex(handler->target);
-                    m_graph.forAllLocalsAndTmpsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (Operand operand) {
-                        liveAtCatchHead.operand(operand) = true;
+                    m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) {
+                        liveAtCatchHead[operand.toLocal()] = true;
                     });
 
                     cachedHandlerResult = handler;
@@ -155,12 +156,12 @@ public:
             return cachedHandlerResult;
         };
 
-        Operands<VariableAccessData*> currentBlockAccessData(OperandsLike, block->variablesAtTail, nullptr);
+        Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
 
         auto flushEverything = [&] (NodeOrigin origin, unsigned index) {
             RELEASE_ASSERT(currentExceptionHandler);
-            auto flush = [&] (Operand operand) {
-                if (operand.isArgument() || liveAtCatchHead.operand(operand)) {
+            auto flush = [&] (VirtualRegister operand) {
+                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) || operand.isArgument()) {
 
                     ASSERT(isValidFlushLocation(block, index, operand));
 
@@ -177,8 +178,6 @@ public:
 
             for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++)
                 flush(virtualRegisterForLocal(local));
-            for (unsigned tmp = 0; tmp < block->variablesAtTail.numberOfTmps(); ++tmp)
-                flush(Operand::tmp(tmp));
             flush(VirtualRegister(CallFrame::thisArgumentOffset()));
         };
 
@@ -193,8 +192,9 @@ public:
             }
 
             if (currentExceptionHandler && (node->op() == SetLocal || node->op() == SetArgumentDefinitely || node->op() == SetArgumentMaybe)) {
-                Operand operand = node->operand();
-                if (operand.isArgument() || liveAtCatchHead.operand(operand)) {
+                VirtualRegister operand = node->local();
+                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) || operand.isArgument()) {
+
                     ASSERT(isValidFlushLocation(block, nodeIndex, operand));
 
                     VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand);
@@ -207,7 +207,7 @@ public:
             }
 
             if (node->accessesStack(m_graph))
-                currentBlockAccessData.operand(node->operand()) = node->variableAccessData();
+                currentBlockAccessData.operand(node->local()) = node->variableAccessData();
         }
 
         if (currentExceptionHandler) {
@@ -216,7 +216,7 @@ public:
         }
     }
 
-    VariableAccessData* newVariableAccessData(Operand operand)
+    VariableAccessData* newVariableAccessData(VirtualRegister operand)
     {
         ASSERT(!operand.isConstant());
         
index fff96bf..73a3d75 100644 (file)
@@ -85,7 +85,7 @@ private:
         m_state.fill(Epoch());
         m_graph.forAllLiveInBytecode(
             block->terminal()->origin.forExit,
-            [&] (Operand reg) {
+            [&] (VirtualRegister reg) {
                 m_state.operand(reg) = currentEpoch;
             });
         
@@ -99,7 +99,7 @@ private:
             Node* node = block->at(nodeIndex);
             
             if (node->op() == MovHint) {
-                Epoch localEpoch = m_state.operand(node->unlinkedOperand());
+                Epoch localEpoch = m_state.operand(node->unlinkedLocal());
                 if (DFGMovHintRemovalPhaseInternal::verbose)
                     dataLog("    At ", node, ": current = ", currentEpoch, ", local = ", localEpoch, "\n");
                 if (!localEpoch || localEpoch == currentEpoch) {
@@ -107,7 +107,7 @@ private:
                     node->child1() = Edge();
                     m_changed = true;
                 }
-                m_state.operand(node->unlinkedOperand()) = Epoch();
+                m_state.operand(node->unlinkedLocal()) = Epoch();
             }
             
             if (mayExit(m_graph, node) != DoesNotExit)
@@ -116,15 +116,15 @@ private:
             if (nodeIndex) {
                 forAllKilledOperands(
                     m_graph, block->at(nodeIndex - 1), node,
-                    [&] (Operand operand) {
+                    [&] (VirtualRegister reg) {
                         // This function is a bit sloppy - it might claim to kill a local even if
                         // it's still live after. We need to protect against that.
-                        if (!!m_state.operand(operand))
+                        if (!!m_state.operand(reg))
                             return;
                         
                         if (DFGMovHintRemovalPhaseInternal::verbose)
-                            dataLog("    Killed operand at ", node, ": ", operand, "\n");
-                        m_state.operand(operand) = currentEpoch;
+                            dataLog("    Killed operand at ", node, ": ", reg, "\n");
+                        m_state.operand(reg) = currentEpoch;
                     });
             }
         }
index 517af10..4bfd3e5 100644 (file)
@@ -251,13 +251,13 @@ struct StackAccessData {
     {
     }
     
-    StackAccessData(Operand operand, FlushFormat format)
-        : operand(operand)
+    StackAccessData(VirtualRegister local, FlushFormat format)
+        : local(local)
         , format(format)
     {
     }
     
-    Operand operand;
+    VirtualRegister local;
     VirtualRegister machineLocal;
     FlushFormat format;
     
@@ -915,7 +915,6 @@ public:
         switch (op()) {
         case GetMyArgumentByVal:
         case GetMyArgumentByValOutOfBounds:
-        case VarargsLength:
         case LoadVarargs:
         case ForwardVarargs:
         case CallVarargs:
@@ -937,11 +936,9 @@ public:
         switch (op()) {
         case GetMyArgumentByVal:
         case GetMyArgumentByValOutOfBounds:
-        case VarargsLength:
-            return child1();
         case LoadVarargs:
         case ForwardVarargs:
-            return child2();
+            return child1();
         case CallVarargs:
         case CallForwardVarargs:
         case ConstructVarargs:
@@ -989,9 +986,9 @@ public:
         return m_opInfo.as<VariableAccessData*>()->find();
     }
     
-    Operand operand()
+    VirtualRegister local()
     {
-        return variableAccessData()->operand();
+        return variableAccessData()->local();
     }
     
     VirtualRegister machineLocal()
@@ -999,7 +996,7 @@ public:
         return variableAccessData()->machineLocal();
     }
     
-    bool hasUnlinkedOperand()
+    bool hasUnlinkedLocal()
     {
         switch (op()) {
         case ExtractOSREntryLocal:
@@ -1012,10 +1009,10 @@ public:
         }
     }
     
-    Operand unlinkedOperand()
+    VirtualRegister unlinkedLocal()
     {
-        ASSERT(hasUnlinkedOperand());
-        return Operand::fromBits(m_opInfo.as<uint64_t>());
+        ASSERT(hasUnlinkedLocal());
+        return VirtualRegister(m_opInfo.as<int32_t>());
     }
     
     bool hasStackAccessData()
@@ -1366,7 +1363,7 @@ public:
     
     bool hasLoadVarargsData()
     {
-        return op() == LoadVarargs || op() == ForwardVarargs || op() == VarargsLength;
+        return op() == LoadVarargs || op() == ForwardVarargs;
     }
     
     LoadVarargsData* loadVarargsData()
index 7c6d1b2..c047c6e 100644 (file)
@@ -74,7 +74,6 @@ namespace JSC { namespace DFG {
     macro(GetLocal, NodeResultJS | NodeMustGenerate) \
     macro(SetLocal, 0) \
     \
-    /* These are used in SSA form to represent to track */\
     macro(PutStack, NodeMustGenerate) \
     macro(KillStack, NodeMustGenerate) \
     macro(GetStack, NodeResultJS) \
@@ -203,7 +202,6 @@ namespace JSC { namespace DFG {
     macro(GetByValWithThis, NodeResultJS | NodeMustGenerate) \
     macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \
     macro(GetMyArgumentByValOutOfBounds, NodeResultJS | NodeMustGenerate) \
-    macro(VarargsLength, NodeMustGenerate | NodeResultInt32) \
     macro(LoadVarargs, NodeMustGenerate) \
     macro(ForwardVarargs, NodeMustGenerate) \
     macro(PutByValDirect, NodeMustGenerate | NodeHasVarArgs) \
index d0a9406..79bc796 100644 (file)
 #include "JSCInlines.h"
 
 namespace JSC { namespace DFG {
+namespace DFGOSRAvailabilityAnalysisPhaseInternal {
+static constexpr bool verbose = false;
+}
 
 class OSRAvailabilityAnalysisPhase : public Phase {
-    static constexpr bool verbose = false;
 public:
     OSRAvailabilityAnalysisPhase(Graph& graph)
         : Phase(graph, "OSR availability analysis")
@@ -73,8 +75,8 @@ public:
             dataLog("Live: ");
             m_graph.forAllLiveInBytecode(
                 block->at(0)->origin.forExit,
-                [&] (Operand operand) {
-                    dataLog(operand, " ");
+                [&] (VirtualRegister reg) {
+                    dataLog(reg, " ");
                 });
             dataLogLn("");
         };
@@ -89,7 +91,7 @@ public:
                 if (!block)
                     continue;
                 
-                if (verbose) {
+                if (DFGOSRAvailabilityAnalysisPhaseInternal::verbose) {
                     dataLogLn("Before changing Block #", block->index);
                     dumpAvailability(block);
                 }
@@ -104,7 +106,7 @@ public:
                 block->ssa->availabilityAtTail = calculator.m_availability;
                 changed = true;
 
-                if (verbose) {
+                if (DFGOSRAvailabilityAnalysisPhaseInternal::verbose) {
                     dataLogLn("After changing Block #", block->index);
                     dumpAvailability(block);
                 }
@@ -118,7 +120,7 @@ public:
                     BasicBlock* successor = block->successor(successorIndex);
                     successor->ssa->availabilityAtHead.pruneByLiveness(
                         m_graph, successor->at(0)->origin.forExit);
-                    if (verbose) {
+                    if (DFGOSRAvailabilityAnalysisPhaseInternal::verbose) {
                         dataLogLn("After pruning Block #", successor->index);
                         dumpAvailability(successor);
                         dumpBytecodeLivenessAtHead(successor);
@@ -206,28 +208,28 @@ void LocalOSRAvailabilityCalculator::executeNode(Node* node)
     switch (node->op()) {
     case PutStack: {
         StackAccessData* data = node->stackAccessData();
-        m_availability.m_locals.operand(data->operand).setFlush(data->flushedAt());
+        m_availability.m_locals.operand(data->local).setFlush(data->flushedAt());
         break;
     }
         
     case KillStack: {
-        m_availability.m_locals.operand(node->unlinkedOperand()).setFlush(FlushedAt(ConflictingFlush));
+        m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
         break;
     }
 
     case GetStack: {
         StackAccessData* data = node->stackAccessData();
-        m_availability.m_locals.operand(data->operand) = Availability(node, data->flushedAt());
+        m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt());
         break;
     }
 
     case MovHint: {
-        m_availability.m_locals.operand(node->unlinkedOperand()).setNode(node->child1().node());
+        m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
         break;
     }
 
     case ZombieHint: {
-        m_availability.m_locals.operand(node->unlinkedOperand()).setNodeUnavailable();
+        m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
         break;
     }
 
@@ -240,18 +242,15 @@ void LocalOSRAvailabilityCalculator::executeNode(Node* node)
         }
         break;
     }
-
-    case VarargsLength: {
-        break;
-    }
-
+        
     case LoadVarargs:
     case ForwardVarargs: {
         LoadVarargsData* data = node->loadVarargsData();
-        m_availability.m_locals.operand(data->count) = Availability(FlushedAt(FlushedInt32, data->machineCount));
+        m_availability.m_locals.operand(data->count) =
+            Availability(FlushedAt(FlushedInt32, data->machineCount));
         for (unsigned i = data->limit; i--;) {
-            m_availability.m_locals.operand(data->start + i) =
-                Availability(FlushedAt(FlushedJSValue, data->machineStart + i));
+            m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
+                Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
         }
         break;
     }
@@ -273,20 +272,20 @@ void LocalOSRAvailabilityCalculator::executeNode(Node* node)
         if (inlineCallFrame->isVarargs()) {
             // Record how to read each argument and the argument count.
             Availability argumentCount =
-                m_availability.m_locals.operand(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis));
+                m_availability.m_locals.operand(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis);
             
             m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount);
         }
         
         if (inlineCallFrame->isClosureCall) {
             Availability callee = m_availability.m_locals.operand(
-                VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee));
+                inlineCallFrame->stackOffset + CallFrameSlot::callee);
             m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee);
         }
         
-        for (unsigned i = numberOfArgumentsToSkip; i < static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1); ++i) {
+        for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->argumentCountIncludingThis - 1; ++i) {
             Availability argument = m_availability.m_locals.operand(
-                VirtualRegister(inlineCallFrame->stackOffset + CallFrame::argumentOffset(i)));
+                inlineCallFrame->stackOffset + CallFrame::argumentOffset(i));
             
             m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument);
         }
index ee5f7f3..4497308 100644 (file)
@@ -322,7 +322,7 @@ void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, Byteco
     
     // 7) Fix the call frame to have the right code block.
     
-    *bitwise_cast<CodeBlock**>(pivot - (CallFrameSlot::codeBlock + 1)) = codeBlock;
+    *bitwise_cast<CodeBlock**>(pivot - 1 - CallFrameSlot::codeBlock) = codeBlock;
     
     if (Options::verboseOSR())
         dataLogF("    OSR returning data buffer %p.\n", scratch);
@@ -382,10 +382,10 @@ MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallF
 
     auto instruction = baselineCodeBlock->instructions().at(callFrame->bytecodeIndex());
     ASSERT(instruction->is<OpCatch>());
-    ValueProfileAndVirtualRegisterBuffer* buffer = instruction->as<OpCatch>().metadata(baselineCodeBlock).m_buffer;
+    ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(baselineCodeBlock).m_buffer;
     JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
     unsigned index = 0;
-    buffer->forEach([&] (ValueProfileAndVirtualRegister& profile) {
+    buffer->forEach([&] (ValueProfileAndOperand& profile) {
         if (!VirtualRegister(profile.m_operand).isLocal())
             return;
         dataBuffer[index] = callFrame->uncheckedR(profile.m_operand).jsValue();
index 6096866..dc76bcb 100644 (file)
@@ -102,10 +102,10 @@ public:
             VariableAccessData* variable = previousHead->variableAccessData();
             locals[local] = newRoot->appendNode(
                 m_graph, variable->prediction(), ExtractOSREntryLocal, origin,
-                OpInfo(variable->operand().virtualRegister()));
+                OpInfo(variable->local().offset()));
             
             newRoot->appendNode(
-                m_graph, SpecNone, MovHint, origin, OpInfo(variable->operand().virtualRegister()),
+                m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()),
                 Edge(locals[local]));
         }
 
index 239df52..1b8e278 100644 (file)
@@ -30,7 +30,6 @@
 
 #include "AssemblyHelpers.h"
 #include "BytecodeUseDef.h"
-#include "CheckpointOSRExitSideState.h"
 #include "ClonedArguments.h"
 #include "DFGGraph.h"
 #include "DFGMayExit.h"
 
 namespace JSC { namespace DFG {
 
+// Probe based OSR Exit.
+
+using CPUState = Probe::CPUState;
+using Context = Probe::Context;
+using Frame = Probe::Frame;
+
+static void reifyInlinedCallFrames(Probe::Context&, CodeBlock* baselineCodeBlock, const OSRExitBase&);
+static void adjustAndJumpToTarget(Probe::Context&, VM&, CodeBlock*, CodeBlock* baselineCodeBlock, OSRExit&);
+static void printOSRExit(Context&, uint32_t osrExitIndex, const OSRExit&);
+
+static JSValue jsValueFor(CPUState& cpu, JSValueSource source)
+{
+    if (source.isAddress()) {
+        JSValue result;
+        std::memcpy(&result, cpu.gpr<uint8_t*>(source.base()) + source.offset(), sizeof(JSValue));
+        return result;
+    }
+#if USE(JSVALUE64)
+    return JSValue::decode(cpu.gpr<EncodedJSValue>(source.gpr()));
+#else
+    if (source.hasKnownTag())
+        return JSValue(source.tag(), cpu.gpr<int32_t>(source.payloadGPR()));
+    return JSValue(cpu.gpr<int32_t>(source.tagGPR()), cpu.gpr<int32_t>(source.payloadGPR()));
+#endif
+}
+
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+// Based on AssemblyHelpers::emitRestoreCalleeSavesFor().
+static void restoreCalleeSavesFor(Context& context, CodeBlock* codeBlock)
+{
+    ASSERT(codeBlock);
+
+    const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+    RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+    unsigned registerCount = calleeSaves->size();
+
+    UCPURegister* physicalStackFrame = context.fp<UCPURegister*>();
+    for (unsigned i = 0; i < registerCount; i++) {
+        RegisterAtOffset entry = calleeSaves->at(i);
+        if (dontRestoreRegisters.get(entry.reg()))
+            continue;
+        // The callee saved values come from the original stack, not the recovered stack.
+        // Hence, we read the values directly from the physical stack memory instead of
+        // going through context.stack().
+        ASSERT(!(entry.offset() % sizeof(UCPURegister)));
+        context.gpr(entry.reg().gpr()) = physicalStackFrame[entry.offset() / sizeof(UCPURegister)];
+    }
+}
+
+// Based on AssemblyHelpers::emitSaveCalleeSavesFor().
+static void saveCalleeSavesFor(Context& context, CodeBlock* codeBlock)
+{
+    auto& stack = context.stack();
+    ASSERT(codeBlock);
+
+    const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+    RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+    unsigned registerCount = calleeSaves->size();
+
+    for (unsigned i = 0; i < registerCount; i++) {
+        RegisterAtOffset entry = calleeSaves->at(i);
+        if (dontSaveRegisters.get(entry.reg()))
+            continue;
+        stack.set(context.fp(), entry.offset(), context.gpr<UCPURegister>(entry.reg().gpr()));
+    }
+}
+
+// Based on AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer().
+static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context& context)
+{
+    VM& vm = *context.arg<VM*>();
+
+    RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
+    RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
+    unsigned registerCount = allCalleeSaves->size();
+
+    VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
+    UCPURegister* calleeSaveBuffer = reinterpret_cast<UCPURegister*>(entryRecord->calleeSaveRegistersBuffer);
+
+    // Restore all callee saves.
+    for (unsigned i = 0; i < registerCount; i++) {
+        RegisterAtOffset entry = allCalleeSaves->at(i);
+        if (dontRestoreRegisters.get(entry.reg()))
+            continue;
+        size_t uintptrOffset = entry.offset() / sizeof(UCPURegister);
+        if (entry.reg().isGPR())
+            context.gpr(entry.reg().gpr()) = calleeSaveBuffer[uintptrOffset];
+        else {
+#if USE(JSVALUE64)
+            context.fpr(entry.reg().fpr()) = bitwise_cast<double>(calleeSaveBuffer[uintptrOffset]);
+#else
+            // FIXME: <https://webkit.org/b/193275> support callee-saved floating point registers on 32-bit architectures
+            RELEASE_ASSERT_NOT_REACHED();
+#endif
+        }
+    }
+}
+
+// Based on AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer().
+static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context& context)
+{
+    VM& vm = *context.arg<VM*>();
+    auto& stack = context.stack();
+
+    VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
+    void* calleeSaveBuffer = entryRecord->calleeSaveRegistersBuffer;
+
+    RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
+    RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+    unsigned registerCount = allCalleeSaves->size();
+
+    for (unsigned i = 0; i < registerCount; i++) {
+        RegisterAtOffset entry = allCalleeSaves->at(i);
+        if (dontCopyRegisters.get(entry.reg()))
+            continue;
+        if (entry.reg().isGPR())
+            stack.set(calleeSaveBuffer, entry.offset(), context.gpr<UCPURegister>(entry.reg().gpr()));
+        else {
+#if USE(JSVALUE64)
+            stack.set(calleeSaveBuffer, entry.offset(), context.fpr<UCPURegister>(entry.reg().fpr()));
+#else
+            // FIXME: <https://webkit.org/b/193275> support callee-saved floating point registers on 32-bit architectures
+            RELEASE_ASSERT_NOT_REACHED();
+#endif
+        }
+    }
+}
+
+// Based on AssemblyHelpers::emitSaveOrCopyCalleeSavesFor().
+static void saveOrCopyCalleeSavesFor(Context& context, CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, bool wasCalledViaTailCall)
+{
+    Frame frame(context.fp(), context.stack());
+    ASSERT(codeBlock);
+
+    const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+    RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+    unsigned registerCount = calleeSaves->size();
+
+    RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
+
+    for (unsigned i = 0; i < registerCount; i++) {
+        RegisterAtOffset entry = calleeSaves->at(i);
+        if (dontSaveRegisters.get(entry.reg()))
+            continue;
+
+        uintptr_t savedRegisterValue;
+
+        if (wasCalledViaTailCall && baselineCalleeSaves.get(entry.reg()))
+            savedRegisterValue = frame.get<uintptr_t>(entry.offset());
+        else
+            savedRegisterValue = context.gpr(entry.reg().gpr());
+
+        frame.set(offsetVirtualRegister.offsetInBytes() + entry.offset(), savedRegisterValue);
+    }
+}
+#else // not NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+static void restoreCalleeSavesFor(Context&, CodeBlock*) { }
+static void saveCalleeSavesFor(Context&, CodeBlock*) { }
+static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context&) { }
+static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context&) { }
+static void saveOrCopyCalleeSavesFor(Context&, CodeBlock*, VirtualRegister, bool) { }
+
+#endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+static JSCell* createDirectArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
+{
+    VM& vm = *context.arg<VM*>();
+
+    ASSERT(vm.heap.isDeferred());
+
+    if (inlineCallFrame)
+        codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+
+    unsigned length = argumentCount - 1;
+    unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
+    DirectArguments* result = DirectArguments::create(
+        vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
+
+    result->setCallee(vm, callee);
+
+    void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+    Frame frame(frameBase, context.stack());
+    for (unsigned i = length; i--;)
+        result->setIndexQuickly(vm, i, frame.argument(i));
+
+    return result;
+}
+
+static JSCell* createClonedArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
+{
+    VM& vm = *context.arg<VM*>();
+
+    ASSERT(vm.heap.isDeferred());
+
+    if (inlineCallFrame)
+        codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+
+    JSGlobalObject* globalObject = codeBlock->globalObject();
+    unsigned length = argumentCount - 1;
+    ClonedArguments* result = ClonedArguments::createEmpty(
+        vm, globalObject->clonedArgumentsStructure(), callee, length);
+
+    void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+    Frame frame(frameBase, context.stack());
+    for (unsigned i = length; i--;)
+        result->putDirectIndex(globalObject, i, frame.argument(i));
+    return result;
+}
+
+static void emitRestoreArguments(Context& context, CodeBlock* codeBlock, DFG::JITCode* dfgJITCode, const Operands<ValueRecovery>& operands)
+{
+    Frame frame(context.fp(), context.stack());
+
+    HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
+    for (size_t index = 0; index < operands.size(); ++index) {
+        const ValueRecovery& recovery = operands[index];
+        int operand = operands.operandForIndex(index);
+
+        if (recovery.technique() != DirectArgumentsThatWereNotCreated
+            && recovery.technique() != ClonedArgumentsThatWereNotCreated)
+            continue;
+
+        MinifiedID id = recovery.nodeID();
+        auto iter = alreadyAllocatedArguments.find(id);
+        if (iter != alreadyAllocatedArguments.end()) {
+            frame.setOperand(operand, frame.operand(iter->value));
+            continue;
+        }
+
+        InlineCallFrame* inlineCallFrame =
+            dfgJITCode->minifiedDFG.at(id)->inlineCallFrame();
+
+        int stackOffset;
+        if (inlineCallFrame)
+            stackOffset = inlineCallFrame->stackOffset;
+        else
+            stackOffset = 0;
+
+        JSFunction* callee;
+        if (!inlineCallFrame || inlineCallFrame->isClosureCall)
+            callee = jsCast<JSFunction*>(frame.operand(stackOffset + CallFrameSlot::callee).asCell());
+        else
+            callee = jsCast<JSFunction*>(inlineCallFrame->calleeRecovery.constant().asCell());
+
+        int32_t argumentCount;
+        if (!inlineCallFrame || inlineCallFrame->isVarargs())
+            argumentCount = frame.operand<int32_t>(stackOffset + CallFrameSlot::argumentCountIncludingThis, PayloadOffset);
+        else
+            argumentCount = inlineCallFrame->argumentCountIncludingThis;
+
+        JSCell* argumentsObject;
+        switch (recovery.technique()) {
+        case DirectArgumentsThatWereNotCreated:
+            argumentsObject = createDirectArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
+            break;
+        case ClonedArgumentsThatWereNotCreated:
+            argumentsObject = createClonedArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+        frame.setOperand(operand, JSValue(argumentsObject));
+
+        alreadyAllocatedArguments.add(id, operand);
+    }
+}
+
+// The following is a list of extra initializations that need to be done in order
+// of most likely needed (lower enum value) to least likely needed (higher enum value).
+// Each level initialization includes the previous lower enum value (see use of the
+// extraInitializationLevel value below).
+enum class ExtraInitializationLevel {
+    None,
+    SpeculationRecovery,
+    ValueProfileUpdate,
+    ArrayProfileUpdate,
+    Other
+};
+
+void OSRExit::executeOSRExit(Context& context)
+{
+    VM& vm = *context.arg<VM*>();
+    auto scope = DECLARE_THROW_SCOPE(vm);
+
+    CallFrame* callFrame = context.fp<CallFrame*>();
+    ASSERT(&callFrame->deprecatedVM() == &vm);
+    auto& cpu = context.cpu;
+
+    if (validateDFGDoesGC) {
+        // We're about to exit optimized code. So, there's no longer any optimized
+        // code running that expects no GC.
+        vm.heap.setExpectDoesGC(true);
+    }
+
+    if (vm.callFrameForCatch) {
+        callFrame = vm.callFrameForCatch;
+        context.fp() = callFrame;
+    }
+
+    CodeBlock* codeBlock = callFrame->codeBlock();
+    ASSERT(codeBlock);
+    ASSERT(codeBlock->jitType() == JITType::DFGJIT);
+
+    // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
+    // really be profitable.
+    DeferGCForAWhile deferGC(vm.heap);
+
+    uint32_t exitIndex = vm.osrExitIndex;
+    DFG::JITCode* dfgJITCode = codeBlock->jitCode()->dfg();
+    OSRExit& exit = dfgJITCode->osrExit[exitIndex];
+
+    ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
+    EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
+
+    if (UNLIKELY(!exit.exitState)) {
+        ExtraInitializationLevel extraInitializationLevel = ExtraInitializationLevel::None;
+
+        // We only need to execute this block once for each OSRExit record. The computed
+        // results will be cached in the OSRExitState record for use of the rest of the
+        // exit ramp code.
+
+        CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
+        ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
+
+        SpeculationRecovery* recovery = nullptr;
+        if (exit.m_recoveryIndex != UINT_MAX) {
+            recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex];
+            extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::SpeculationRecovery);
+        }
+
+        if (UNLIKELY(exit.m_kind == GenericUnwind))
+            extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+
+        ArrayProfile* arrayProfile = nullptr;
+        if (!!exit.m_jsValueSource) {
+            if (exit.m_valueProfile)
+                extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ValueProfileUpdate);
+            if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+                CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+                CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+                arrayProfile = profiledCodeBlock->getArrayProfile(codeOrigin.bytecodeIndex());
+                if (arrayProfile)
+                    extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ArrayProfileUpdate);
+            }
+        }
+
+        int32_t activeThreshold = baselineCodeBlock->adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp());
+        double adjustedThreshold = applyMemoryUsageHeuristicsAndConvertToInt(activeThreshold, baselineCodeBlock);
+        ASSERT(adjustedThreshold > 0);
+        adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
+
+        CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
+        bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
+        void* jumpTarget;
+        if (exitToLLInt) {
+            BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
+            const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
+            MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
+            jumpTarget = destination.executableAddress();    
+        } else {
+            const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap();
+            CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex());
+            ASSERT(codeLocation);
+            jumpTarget = codeLocation.executableAddress();
+        }
+
+        // Compute the value recoveries.
+        Operands<ValueRecovery> operands;
+        Vector<UndefinedOperandSpan> undefinedOperandSpans;
+        dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands, &undefinedOperandSpans);
+        ptrdiff_t stackPointerOffset = -static_cast<ptrdiff_t>(codeBlock->jitCode()->dfgCommon()->requiredRegisterCountForExit) * sizeof(Register);
+
+        exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile, exitToLLInt));
+
+        if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
+            Profiler::Database& database = *vm.m_perBytecodeProfiler;
+            Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
+
+            Profiler::OSRExit* profilerExit = compilation->addOSRExit(
+                exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
+                exit.m_kind, exit.m_kind == UncountableInvalidation);
+            exit.exitState->profilerExit = profilerExit;
+            extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+        }
+
+        if (UNLIKELY(Options::printEachOSRExit()))
+            extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+
+        exit.exitState->extraInitializationLevel = extraInitializationLevel;
+
+        if (UNLIKELY(Options::verboseOSR() || Options::verboseDFGOSRExit())) {
+            dataLogF("DFG OSR exit #%u (%s, %s) from %s, with operands = %s\n",
+                exitIndex, toCString(exit.m_codeOrigin).data(),
+                exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
+                toCString(ignoringContext<DumpContext>(operands)).data());
+        }
+    }
+
+    OSRExitState& exitState = *exit.exitState.get();
+    CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
+    ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
+
+    Operands<ValueRecovery>& operands = exitState.operands;
+    Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans;
+
+    context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
+
+    // The only reason for using this do while loop is so we can break out midway when appropriate.
+    do {
+        auto extraInitializationLevel = static_cast<ExtraInitializationLevel>(exitState.extraInitializationLevel);
+
+        if (extraInitializationLevel == ExtraInitializationLevel::None)
+            break;
+
+        // Begin extra initilization level: SpeculationRecovery
+
+        // We need to do speculation recovery first because array profiling and value profiling
+        // may rely on a value that it recovers. However, that doesn't mean that it is likely
+        // to have a recovery value. So, we'll decorate it as UNLIKELY.
+        SpeculationRecovery* recovery = exitState.recovery;
+        if (UNLIKELY(recovery)) {
+            switch (recovery->type()) {
+            case SpeculativeAdd:
+                cpu.gpr(recovery->dest()) = cpu.gpr<uint32_t>(recovery->dest()) - cpu.gpr<uint32_t>(recovery->src());
+#if USE(JSVALUE64)
+                ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
+                cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
+#endif
+                break;
+
+            case SpeculativeAddSelf:
+                cpu.gpr(recovery->dest()) = static_cast<uint32_t>(cpu.gpr<int32_t>(recovery->dest()) >> 1) ^ 0x80000000U;
+#if USE(JSVALUE64)
+                ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
+                cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
+#endif
+                break;
+
+            case SpeculativeAddImmediate:
+                cpu.gpr(recovery->dest()) = (cpu.gpr<uint32_t>(recovery->dest()) - recovery->immediate());
+#if USE(JSVALUE64)
+                ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
+                cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
+#endif
+                break;
+
+            case BooleanSpeculationCheck:
+#if USE(JSVALUE64)
+                cpu.gpr(recovery->dest()) = cpu.gpr(recovery->dest()) ^ JSValue::ValueFalse;
+#endif
+                break;
+
+            default:
+                break;
+            }
+        }
+        if (extraInitializationLevel <= ExtraInitializationLevel::SpeculationRecovery)
+            break;
+
+        // Begin extra initilization level: ValueProfileUpdate
+        JSValue profiledValue;
+        if (!!exit.m_jsValueSource) {
+            profiledValue = jsValueFor(cpu, exit.m_jsValueSource);
+            if (MethodOfGettingAValueProfile profile = exit.m_valueProfile)
+                profile.reportValue(profiledValue);
+        }
+        if (extraInitializationLevel <= ExtraInitializationLevel::ValueProfileUpdate)
+            break;
+
+        // Begin extra initilization level: ArrayProfileUpdate
+        if (ArrayProfile* arrayProfile = exitState.arrayProfile) {
+            ASSERT(!!exit.m_jsValueSource);
+            ASSERT(exit.m_kind == BadCache || exit.m_kind == BadIndexingType);
+            CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOriginForExitProfile, baselineCodeBlock);
+            const Instruction* instruction = profiledCodeBlock->instructions().at(exit.m_codeOriginForExitProfile.bytecodeIndex()).ptr();
+            bool doProfile = !instruction->is<OpGetById>() || instruction->as<OpGetById>().metadata(profiledCodeBlock).m_modeMetadata.mode == GetByIdMode::ArrayLength;
+            if (doProfile) {
+                Structure* structure = profiledValue.asCell()->structure(vm);
+                arrayProfile->observeStructure(structure);
+                arrayProfile->observeArrayMode(arrayModesFromStructure(structure));
+            }
+        }
+        if (extraInitializationLevel <= ExtraInitializationLevel::ArrayProfileUpdate)
+            break;
+
+        // Begin Extra initilization level: Other
+        if (UNLIKELY(exit.m_kind == GenericUnwind)) {
+            // We are acting as a defacto op_catch because we arrive here from genericUnwind().
+            // So, we must restore our call frame and stack pointer.
+            restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(context);
+            ASSERT(context.fp() == vm.callFrameForCatch);
+        }
+
+        if (exitState.profilerExit)
+            exitState.profilerExit->incCount();
+
+        if (UNLIKELY(Options::printEachOSRExit()))
+            printOSRExit(context, vm.osrExitIndex, exit);
+
+    } while (false); // End extra initialization.
+
+    Frame frame(cpu.fp(), context.stack());
+    ASSERT(!(context.fp<uintptr_t>() & 0x7));
+
+#if USE(JSVALUE64)
+    ASSERT(cpu.gpr<int64_t>(GPRInfo::numberTagRegister) == JSValue::NumberTag);
+    ASSERT(cpu.gpr<int64_t>(GPRInfo::notCellMaskRegister) == JSValue::NotCellMask);
+#endif
+
+    // Do all data format conversions and store the results into the stack.
+    // Note: we need to recover values before restoring callee save registers below
+    // because the recovery may rely on values in some of callee save registers.
+
+    int calleeSaveSpaceAsVirtualRegisters = static_cast<int>(baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters());
+    size_t numberOfOperands = operands.size();
+    size_t numUndefinedOperandSpans = undefinedOperandSpans.size();
+
+    size_t nextUndefinedSpanIndex = 0;
+    size_t nextUndefinedOperandIndex = numberOfOperands;
+    if (numUndefinedOperandSpans)
+        nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
+
+    JSValue undefined = jsUndefined();
+    for (size_t spanIndex = 0; spanIndex < numUndefinedOperandSpans; ++spanIndex) {
+        auto& span = undefinedOperandSpans[spanIndex];
+        int firstOffset = span.minOffset;
+        int lastOffset = firstOffset + span.numberOfRegisters;
+
+        for (int offset = firstOffset; offset < lastOffset; ++offset)
+            frame.setOperand(offset, undefined);
+    }
+
+    for (size_t index = 0; index < numberOfOperands; ++index) {
+        const ValueRecovery& recovery = operands[index];
+        VirtualRegister reg = operands.virtualRegisterForIndex(index);
+
+        if (UNLIKELY(index == nextUndefinedOperandIndex)) {
+            index += undefinedOperandSpans[nextUndefinedSpanIndex++].numberOfRegisters - 1;
+            if (nextUndefinedSpanIndex < numUndefinedOperandSpans)
+                nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
+            else
+                nextUndefinedOperandIndex = numberOfOperands;
+            continue;
+        }
+
+        if (reg.isLocal() && reg.toLocal() < calleeSaveSpaceAsVirtualRegisters)
+            continue;
+
+        int operand = reg.offset();
+
+        switch (recovery.technique()) {
+        case DisplacedInJSStack:
+            frame.setOperand(operand, callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue());
+            break;
+
+        case InFPR:
+            frame.setOperand(operand, cpu.fpr<JSValue>(recovery.fpr()));
+            break;
+
+#if USE(JSVALUE64)
+        case InGPR:
+            frame.setOperand(operand, cpu.gpr<JSValue>(recovery.gpr()));
+            break;
+#else
+        case InPair:
+            frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.tagGPR()), cpu.gpr<int32_t>(recovery.payloadGPR())));
+            break;
+#endif
+
+        case UnboxedCellInGPR:
+            frame.setOperand(operand, JSValue(cpu.gpr<JSCell*>(recovery.gpr())));
+            break;
+
+        case CellDisplacedInJSStack:
+            frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedCell()));
+            break;
+
+#if USE(JSVALUE32_64)
+        case UnboxedBooleanInGPR:
+            frame.setOperand(operand, jsBoolean(cpu.gpr<bool>(recovery.gpr())));
+            break;
+#endif
+
+        case BooleanDisplacedInJSStack:
+#if USE(JSVALUE64)
+            frame.setOperand(operand, callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue());
+#else
+            frame.setOperand(operand, jsBoolean(callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue().payload()));
+#endif
+            break;
+
+        case UnboxedInt32InGPR:
+            frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.gpr())));
+            break;
+
+        case Int32DisplacedInJSStack:
+            frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedInt32()));
+            break;
+
+#if USE(JSVALUE64)
+        case UnboxedInt52InGPR:
+            frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr()) >> JSValue::int52ShiftAmount));
+            break;
+
+        case Int52DisplacedInJSStack:
+            frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedInt52()));
+            break;
+
+        case UnboxedStrictInt52InGPR:
+            frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr())));
+            break;
+
+        case StrictInt52DisplacedInJSStack:
+            frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedStrictInt52()));
+            break;
+#endif
+
+        case UnboxedDoubleInFPR:
+            frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(cpu.fpr(recovery.fpr()))));
+            break;
+
+        case DoubleDisplacedInJSStack:
+            frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedDouble())));
+            break;
+
+        case Constant:
+            frame.setOperand(operand, recovery.constant());
+            break;
+
+        case DirectArgumentsThatWereNotCreated:
+        case ClonedArgumentsThatWereNotCreated:
+            // Don't do this, yet.
+            break;
+
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+    // Restore the DFG callee saves and then save the ones the baseline JIT uses.
+    restoreCalleeSavesFor(context, codeBlock);
+    saveCalleeSavesFor(context, baselineCodeBlock);
+
+#if USE(JSVALUE64)
+    cpu.gpr(GPRInfo::numberTagRegister) = static_cast<JSC::UCPURegister>(JSValue::NumberTag);
+    cpu.gpr(GPRInfo::notCellMaskRegister) = static_cast<JSC::UCPURegister>(JSValue::NotCellMask);
+#endif
+
+    if (exit.isExceptionHandler())
+        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(context);
+
+    // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
+    // recoveries don't recursively refer to each other. But, we don't try to assume that they only
+    // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
+    // Note that we also roughly assume that the arguments might still be materialized outside of its
+    // inline call frame scope - but for now the DFG wouldn't do that.
+
+    DFG::emitRestoreArguments(context, codeBlock, dfgJITCode, operands);
+
+    // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+    // that all new calls into this code will go to the new JIT, so the execute
+    // counter only affects call frames that performed OSR exit and call frames
+    // that were still executing the old JIT at the time of another call frame's
+    // OSR exit. We want to ensure that the following is true:
+    //
+    // (a) Code the performs an OSR exit gets a chance to reenter optimized
+    //     code eventually, since optimized code is faster. But we don't
+    //     want to do such reentery too aggressively (see (c) below).
+    //
+    // (b) If there is code on the call stack that is still running the old
+    //     JIT's code and has never OSR'd, then it should get a chance to
+    //     perform OSR entry despite the fact that we've exited.
+    //
+    // (c) Code the performs an OSR exit should not immediately retry OSR
+    //     entry, since both forms of OSR are expensive. OSR entry is
+    //     particularly expensive.
+    //
+    // (d) Frequent OSR failures, even those that do not result in the code
+    //     running in a hot loop, result in recompilation getting triggered.
+    //
+    // To ensure (c), we'd like to set the execute counter to
+    // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+    // (a) and (b), since then every OSR exit would delay the opportunity for
+    // every call frame to perform OSR entry. Essentially, if OSR exit happens
+    // frequently and the function has few loops, then the counter will never
+    // become non-negative and OSR entry will never be triggered. OSR entry
+    // will only happen if a loop gets hot in the old JIT, which does a pretty
+    // good job of ensuring (a) and (b). But that doesn't take care of (d),
+    // since each speculation failure would reset the execute counter.
+    // So we check here if the number of speculation failures is significantly
+    // larger than the number of successes (we want 90% success rate), and if
+    // there have been a large enough number of failures. If so, we set the
+    // counter to 0; otherwise we set the counter to
+    // counterValueForOptimizeAfterWarmUp().
+
+    if (UNLIKELY(codeBlock->updateOSRExitCounterAndCheckIfNeedToReoptimize(exitState) == CodeBlock::OptimizeAction::ReoptimizeNow))
+        triggerReoptimizationNow(baselineCodeBlock, codeBlock, &exit);
+
+    reifyInlinedCallFrames(context, baselineCodeBlock, exit);
+    adjustAndJumpToTarget(context, vm, codeBlock, baselineCodeBlock, exit);
+}
+
+static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselineCodeBlock, const OSRExitBase& exit)
+{
+    auto& cpu = context.cpu;
+    Frame frame(cpu.fp(), context.stack());
+
+    // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
+    // in presence of inlined tail calls.
+    // https://bugs.webkit.org/show_bug.cgi?id=147511
+    ASSERT(JITCode::isBaselineCode(outermostBaselineCodeBlock->jitType()));
+    frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
+
+    const CodeOrigin* codeOrigin;
+    for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame(); codeOrigin = codeOrigin->inlineCallFrame()->getCallerSkippingTailCalls()) {
+        InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame();
+        CodeBlock* baselineCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(*codeOrigin, outermostBaselineCodeBlock);
+        InlineCallFrame::Kind trueCallerCallKind;
+        CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
+        void* callerFrame = cpu.fp();
+
+        bool callerIsLLInt = false;
+
+        if (!trueCaller) {
+            ASSERT(inlineCallFrame->isTail());
+            void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
+#if CPU(ARM64E)
+            void* oldEntrySP = cpu.fp<uint8_t*>() + sizeof(CallerFrameAndPC);
+            void* newEntrySP = cpu.fp<uint8_t*>() + inlineCallFrame->returnPCOffset() + sizeof(void*);
+            returnPC = retagCodePtr(returnPC, bitwise_cast<PtrTag>(oldEntrySP), bitwise_cast<PtrTag>(newEntrySP));
+#endif
+            frame.set<void*>(inlineCallFrame->returnPCOffset(), returnPC);
+            callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
+        } else {
+            CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
+            BytecodeIndex callBytecodeIndex = trueCaller->bytecodeIndex();
+            void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt).untaggedExecutableAddress();
+
+            if (trueCaller->inlineCallFrame())
+                callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame()->stackOffset * sizeof(EncodedJSValue);
+
+#if CPU(ARM64E)
+            void* newEntrySP = cpu.fp<uint8_t*>() + inlineCallFrame->returnPCOffset() + sizeof(void*);
+            jumpTarget = tagCodePtr(jumpTarget, bitwise_cast<PtrTag>(newEntrySP));
+#endif
+            frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
+        }
+
+        frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
+
+        // Restore the inline call frame's callee save registers.
+        // If this inlined frame is a tail call that will return back to the original caller, we need to
+        // copy the prior contents of the tag registers already saved for the outer frame to this frame.
+        saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
+
+        if (callerIsLLInt) {
+            CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
+            frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR).offset, baselineCodeBlockForCaller->metadataTable());
+#if USE(JSVALUE64)
+            frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR).offset, baselineCodeBlockForCaller->instructionsRawPointer());
+#endif
+        }
+
+        if (!inlineCallFrame->isVarargs())
+            frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
+        ASSERT(callerFrame);
+        frame.set<void*>(inlineCallFrame->callerFrameOffset(), callerFrame);
+#if USE(JSVALUE64)
+        uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
+        frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis, TagOffset, locationBits);
+        if (!inlineCallFrame->isClosureCall)
+            frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
+#else // USE(JSVALUE64) // so this is the 32-bit part
+        const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr();
+        uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
+        frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCountIncludingThis, TagOffset, locationBits);
+        frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
+        if (!inlineCallFrame->isClosureCall)
+            frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, PayloadOffset, inlineCallFrame->calleeConstant());
+#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
+    }
+
+    // Don't need to set the toplevel code origin if we only did inline tail calls
+    if (codeOrigin) {
+#if USE(JSVALUE64)
+        uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
+#else
+        const Instruction* instruction = outermostBaselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr();
+        uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
+#endif
+        frame.setOperand<uint32_t>(CallFrameSlot::argumentCountIncludingThis, TagOffset, locationBits);
+    }
+}
+
+static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, OSRExit& exit)
+{
+    OSRExitState* exitState = exit.exitState.get();
+
+    WTF::storeLoadFence(); // The optimizing compiler expects that the OSR exit mechanism will execute this fence.
+    vm.heap.writeBarrier(baselineCodeBlock);
+
+    // We barrier all inlined frames -- and not just the current inline stack --
+    // because we don't know which inlined function owns the value profile that
+    // we'll update when we exit. In the case of "f() { a(); b(); }", if both
+    // a and b are inlined, we might exit inside b due to a bad value loaded
+    // from a.
+    // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
+    // the value profile.
+    InlineCallFrameSet* inlineCallFrames = codeBlock->jitCode()->dfgCommon()->inlineCallFrames.get();
+    if (inlineCallFrames) {
+        for (InlineCallFrame* inlineCallFrame : *inlineCallFrames)
+            vm.heap.writeBarrier(inlineCallFrame->baselineCodeBlock.get());
+    }
+
+    auto* exitInlineCallFrame = exit.m_codeOrigin.inlineCallFrame();
+    if (exitInlineCallFrame)
+        context.fp() = context.fp<uint8_t*>() + exitInlineCallFrame->stackOffset * sizeof(EncodedJSValue);
+
+    void* jumpTarget = exitState->jumpTarget;
+    ASSERT(jumpTarget);
+
+    if (exit.isExceptionHandler()) {
+        // Since we're jumping to op_catch, we need to set callFrameForCatch.
+        vm.callFrameForCatch = context.fp<CallFrame*>();
+    }
+
+    vm.topCallFrame = context.fp<CallFrame*>();
+
+    if (exitState->isJumpToLLInt) {
+        CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
+        BytecodeIndex bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
+        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
+
+        context.gpr(LLInt::Registers::metadataTableGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->metadataTable());
+#if USE(JSVALUE64)
+        context.gpr(LLInt::Registers::pbGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->instructionsRawPointer());
+        context.gpr(LLInt::Registers::pcGPR) = static_cast<uintptr_t>(exit.m_codeOrigin.bytecodeIndex().offset());
+#else
+        context.gpr(LLInt::Registers::pcGPR) = bitwise_cast<uintptr_t>(&currentInstruction);
+#endif
+
+        if (exit.isExceptionHandler())
+            vm.targetInterpreterPCForThrow = &currentInstruction;
+    }
+
+    context.pc() = untagCodePtr<JSEntryPtrTag>(jumpTarget);
+}
+
+static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit& exit)
+{
+    CallFrame* callFrame = context.fp<CallFrame*>();
+    CodeBlock* codeBlock = callFrame->codeBlock();
+    CodeBlock* alternative = codeBlock->alternative();
+    ExitKind kind = exit.m_kind;
+    BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
+
+    dataLog("Speculation failure in ", *codeBlock);
+    dataLog(" @ exit #", osrExitIndex, " (", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
+    if (alternative) {
+        dataLog(
+            "executeCounter = ", alternative->jitExecuteCounter(),
+            ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
+            ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
+    } else
+        dataLog("no alternative code block (i.e. we've been jettisoned)");
+    dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
+    dataLog("    GPRs at time of exit:");
+    for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+        GPRReg gpr = GPRInfo::toRegister(i);
+        dataLog(" ", context.gprName(gpr), ":", RawPointer(context.gpr<void*>(gpr)));
+    }
+    dataLog("\n");
+    dataLog("    FPRs at time of exit:");
+    for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+        FPRReg fpr = FPRInfo::toRegister(i);
+        dataLog(" ", context.fprName(fpr), ":");
+        uint64_t bits = context.fpr<uint64_t>(fpr);
+        double value = context.fpr(fpr);
+        dataLogF("%llx:%lf", static_cast<long long>(bits), value);
+    }
+    dataLog("\n");
+}
+
+// JIT based OSR Exit.
+
 OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
     : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
     , m_jsValueSource(jsValueSource)
@@ -70,18 +958,15 @@ CodeLocationJump<JSInternalPtrTag> OSRExit::codeLocationForRepatch() const
 
 void OSRExit::emitRestoreArguments(CCallHelpers& jit, VM& vm, const Operands<ValueRecovery>& operands)
 {
-    HashMap<MinifiedID, VirtualRegister> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
+    HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
     for (size_t index = 0; index < operands.size(); ++index) {
         const ValueRecovery& recovery = operands[index];
+        int operand = operands.operandForIndex(index);
 
         if (recovery.technique() != DirectArgumentsThatWereNotCreated
             && recovery.technique() != ClonedArgumentsThatWereNotCreated)
             continue;
 
-        Operand operand = operands.operandForIndex(index);
-        if (operand.isTmp())
-            continue;
-
         MinifiedID id = recovery.nodeID();
         auto iter = alreadyAllocatedArguments.find(id);
         if (iter != alreadyAllocatedArguments.end()) {
@@ -102,7 +987,7 @@ void OSRExit::emitRestoreArguments(CCallHelpers& jit, VM& vm, const Operands<Val
 
         if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
             jit.loadPtr(
-                AssemblyHelpers::addressFor(VirtualRegister(stackOffset + CallFrameSlot::callee)),
+                AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
                 GPRInfo::regT0);
         } else {
             jit.move(
@@ -112,7 +997,7 @@ void OSRExit::emitRestoreArguments(CCallHelpers& jit, VM& vm, const Operands<Val
 
         if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
             jit.load32(
-                AssemblyHelpers::payloadFor(VirtualRegister(stackOffset + CallFrameSlot::argumentCountIncludingThis)),
+                AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCountIncludingThis),
                 GPRInfo::regT1);
         } else {
             jit.move(
@@ -138,7 +1023,7 @@ void OSRExit::emitRestoreArguments(CCallHelpers& jit, VM& vm, const Operands<Val
         jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
         jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
 
-        alreadyAllocatedArguments.add(id, operand.virtualRegister());
+        alreadyAllocatedArguments.add(id, operand);
     }
 }
 
@@ -581,80 +1466,17 @@ void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const
     if (exit.isExceptionHandler())
         jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
 
-    if (exit.m_codeOrigin.inlineStackContainsActiveCheckpoint()) {
-        // FIXME: Maybe we shouldn't use a probe but filling all the side state objects is tricky otherwise...
-        Vector<ValueRecovery> values(operands.numberOfTmps());
-        for (size_t i = 0; i < operands.numberOfTmps(); ++i)
-            values[i] = operands.tmp(i);
-
-        VM* vmPtr = &vm;
-        auto* tmpScratch = scratch + operands.tmpIndex(0);
-        jit.probe([=, values = WTFMove(values)] (Probe::Context& context) {
-            auto addSideState = [&] (CallFrame* frame, BytecodeIndex index, size_t tmpOffset) {
-                std::unique_ptr<CheckpointOSRExitSideState> sideState = WTF::makeUnique<CheckpointOSRExitSideState>();
-
-                sideState->bytecodeIndex = index;
-                for (size_t i = 0; i < maxNumCheckpointTmps; ++i) {
-                    auto& recovery = values[i + tmpOffset];
-                    // FIXME: We should do what the FTL does and materialize all the JSValues into the scratch buffer.
-                    switch (recovery.technique()) {
-                    case Constant:
-                        sideState->tmps[i] = recovery.constant();
-                        break;
-
-                    case UnboxedInt32InGPR:
-                    case Int32DisplacedInJSStack: {
-                        sideState->tmps[i] = jsNumber(static_cast<int32_t>(tmpScratch[i + tmpOffset]));
-                        break;
- &nb