Update FTL to support UntypedUse operands for op_sub.
authormark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 28 Oct 2015 18:36:02 +0000 (18:36 +0000)
committermark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 28 Oct 2015 18:36:02 +0000 (18:36 +0000)
https://bugs.webkit.org/show_bug.cgi?id=150562

Reviewed by Geoffrey Garen.

Source/JavaScriptCore:

* assembler/MacroAssemblerARM64.h:
- make the dataTempRegister and memoryTempRegister public so that we can
  move input registers out of them if needed.

* ftl/FTLCapabilities.cpp:
(JSC::FTL::canCompile):
- We can now compile ArithSub.

* ftl/FTLCompile.cpp:
- Added BinaryArithGenerationContext to shuffle registers into a state that is
  expected by the baseline snippet generator.  This includes:
  1. Making sure that the input and output registers are not in the tag or
     scratch registers.
  2. Loading the tag registers with expected values.
  3. Restoring the registers to their original value on return.
- Added code to implement the ArithSub inline cache.

* ftl/FTLInlineCacheDescriptor.h:
(JSC::FTL::ArithSubDescriptor::ArithSubDescriptor):
(JSC::FTL::ArithSubDescriptor::leftType):
(JSC::FTL::ArithSubDescriptor::rightType):

* ftl/FTLInlineCacheSize.cpp:
(JSC::FTL::sizeOfArithSub):
* ftl/FTLInlineCacheSize.h:

* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::DFG::LowerDFGToLLVM::compileArithAddOrSub):
- Added handling for UnusedType for the ArithSub case.

* ftl/FTLState.h:
* jit/GPRInfo.h:
(JSC::GPRInfo::reservedRegisters):

* jit/JITSubGenerator.h:
(JSC::JITSubGenerator::generateFastPath):
- When the result is in the same as one of the input registers, we'll end up
  corrupting the input in fast path even if we determine that we need to go to
  the slow path.  We now move the input into the scratch register and operate
  on that instead and only move the result into the result register only after
  the fast path has succeeded.

* tests/stress/op_sub.js:
(o1.valueOf):
(runTest):
- Added some debugging tools: flags for verbose logging, and eager abort on fail.

LayoutTests:

* js/regress/ftl-sub-expected.txt: Added.
* js/regress/ftl-sub.html: Added.
* js/regress/script-tests/ftl-sub.js: Added.
(o1.valueOf):
(o2.valueOf):
(foo):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@191683 268f45cc-cd09-0410-ab3c-d52691b4dbfc

16 files changed:
LayoutTests/ChangeLog
LayoutTests/js/regress/ftl-object-sub-expected.txt [new file with mode: 0644]
LayoutTests/js/regress/ftl-object-sub.html [new file with mode: 0644]
LayoutTests/js/regress/script-tests/ftl-object-sub.js [new file with mode: 0644]
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
Source/JavaScriptCore/ftl/FTLCapabilities.cpp
Source/JavaScriptCore/ftl/FTLCompile.cpp
Source/JavaScriptCore/ftl/FTLInlineCacheDescriptor.h
Source/JavaScriptCore/ftl/FTLInlineCacheSize.cpp
Source/JavaScriptCore/ftl/FTLInlineCacheSize.h
Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
Source/JavaScriptCore/ftl/FTLState.h
Source/JavaScriptCore/jit/GPRInfo.h
Source/JavaScriptCore/jit/JITSubGenerator.h
Source/JavaScriptCore/tests/stress/op_sub.js

index ade4eabe98146ce3d61e3cd867269de41826fa04..3962cc61b8110013a4ae6c6bb6ee72024669ead2 100644 (file)
@@ -1,3 +1,17 @@
+2015-10-28  Mark Lam  <mark.lam@apple.com>
+
+        Update FTL to support UntypedUse operands for op_sub.
+        https://bugs.webkit.org/show_bug.cgi?id=150562
+
+        Reviewed by Geoffrey Garen.
+
+        * js/regress/ftl-sub-expected.txt: Added.
+        * js/regress/ftl-sub.html: Added.
+        * js/regress/script-tests/ftl-sub.js: Added.
+        (o1.valueOf):
+        (o2.valueOf):
+        (foo):
+
 2015-10-28  Hunseop Jeong  <hs85.jeong@samsung.com>
 
         Unreviewed. EFL gardening: rebaseline more tests after r191623.
diff --git a/LayoutTests/js/regress/ftl-object-sub-expected.txt b/LayoutTests/js/regress/ftl-object-sub-expected.txt
new file mode 100644 (file)
index 0000000..df13d3c
--- /dev/null
@@ -0,0 +1,10 @@
+JSRegress/ftl-object-sub
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+
+PASS no exception thrown
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/LayoutTests/js/regress/ftl-object-sub.html b/LayoutTests/js/regress/ftl-object-sub.html
new file mode 100644 (file)
index 0000000..13c0ac3
--- /dev/null
@@ -0,0 +1,12 @@
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+<html>
+<head>
+<script src="../../resources/js-test-pre.js"></script>
+</head>
+<body>
+<script src="../../resources/regress-pre.js"></script>
+<script src="script-tests/ftl-object-sub.js"></script>
+<script src="../../resources/regress-post.js"></script>
+<script src="../../resources/js-test-post.js"></script>
+</body>
+</html>
diff --git a/LayoutTests/js/regress/script-tests/ftl-object-sub.js b/LayoutTests/js/regress/script-tests/ftl-object-sub.js
new file mode 100644 (file)
index 0000000..5fe3f4c
--- /dev/null
@@ -0,0 +1,32 @@
+//@ runDefault
+var o1 = {
+    i: 0,
+    valueOf: function() { return this.i; }
+};
+var o2 = {
+    i: 0,
+    valueOf: function() { return this.i; }
+};
+
+result = 0;
+function foo(a, b) {
+    var result = 0;
+    for (var j = 0; j < 10; j++) {
+        if (a > b)
+            result += a - b;
+        else
+            result += b - 1;
+    }
+    return result;
+}
+noInline(foo);
+
+for (var i = 0; i <= 100000; i++) {
+    o1.i = i + 2;
+    o2.i = i;
+    result += foo(o1, o2);
+}
+print(result);
+
+if (result != 2000020)
+    throw "Bad result: " + result;
index 56690eb7e03ea3a863ed18fd67c5ec021817458f..ebeca296cb97a50b221d880a5bec4915e80f78a0 100644 (file)
@@ -1,3 +1,57 @@
+2015-10-28  Mark Lam  <mark.lam@apple.com>
+
+        Update FTL to support UntypedUse operands for op_sub.
+        https://bugs.webkit.org/show_bug.cgi?id=150562
+
+        Reviewed by Geoffrey Garen.
+
+        * assembler/MacroAssemblerARM64.h:
+        - make the dataTempRegister and memoryTempRegister public so that we can
+          move input registers out of them if needed.
+
+        * ftl/FTLCapabilities.cpp:
+        (JSC::FTL::canCompile):
+        - We can now compile ArithSub.
+
+        * ftl/FTLCompile.cpp:
+        - Added BinaryArithGenerationContext to shuffle registers into a state that is
+          expected by the baseline snippet generator.  This includes:
+          1. Making sure that the input and output registers are not in the tag or
+             scratch registers.
+          2. Loading the tag registers with expected values.
+          3. Restoring the registers to their original value on return.
+        - Added code to implement the ArithSub inline cache.
+
+        * ftl/FTLInlineCacheDescriptor.h:
+        (JSC::FTL::ArithSubDescriptor::ArithSubDescriptor):
+        (JSC::FTL::ArithSubDescriptor::leftType):
+        (JSC::FTL::ArithSubDescriptor::rightType):
+
+        * ftl/FTLInlineCacheSize.cpp:
+        (JSC::FTL::sizeOfArithSub):
+        * ftl/FTLInlineCacheSize.h:
+
+        * ftl/FTLLowerDFGToLLVM.cpp:
+        (JSC::FTL::DFG::LowerDFGToLLVM::compileArithAddOrSub):
+        - Added handling for UnusedType for the ArithSub case.
+
+        * ftl/FTLState.h:
+        * jit/GPRInfo.h:
+        (JSC::GPRInfo::reservedRegisters):
+
+        * jit/JITSubGenerator.h:
+        (JSC::JITSubGenerator::generateFastPath):
+        - When the result is in the same as one of the input registers, we'll end up
+          corrupting the input in fast path even if we determine that we need to go to
+          the slow path.  We now move the input into the scratch register and operate
+          on that instead and only move the result into the result register only after
+          the fast path has succeeded.
+
+        * tests/stress/op_sub.js:
+        (o1.valueOf):
+        (runTest):
+        - Added some debugging tools: flags for verbose logging, and eager abort on fail.
+
 2015-10-28  Mark Lam  <mark.lam@apple.com>
 
         Fix a typo in ProbeContext::fpr().
index 09813954811129c2180743f6924b43279f97eda4..89543168a0fdfa0fbe360d373e80eb7299429e71 100644 (file)
 namespace JSC {
 
 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
+public:
     static const RegisterID dataTempRegister = ARM64Registers::ip0;
     static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+
+private:
     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
     static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
     static const intptr_t maskHalfWord0 = 0xffffl;
index 5dd76325acfc4af9d71a5f0bea1a5cc4a2520203..e327f76967fbd5da470119b7f4f5d04adab554a5 100644 (file)
@@ -84,6 +84,7 @@ inline CapabilityLevel canCompile(Node* node)
     case StrCat:
     case ArithAdd:
     case ArithClz32:
+    case ArithSub:
     case ArithMul:
     case ArithDiv:
     case ArithMod:
@@ -211,10 +212,6 @@ inline CapabilityLevel canCompile(Node* node)
     case PutSetterByVal:
         // These are OK.
         break;
-    case ArithSub:
-        if (node->result() == NodeResultJS)
-            return CannotCompile;
-        break;
 
     case Identity:
         // No backend handles this because it will be optimized out. But we may check
index f8cae469f79b28ec3644ab08c3c5ea2ee1e3c428..cf2f2f150bd30b8d4c824b2b4775221c99c38c9c 100644 (file)
@@ -34,6 +34,7 @@
 #include "CCallHelpers.h"
 #include "DFGCommon.h"
 #include "DFGGraphSafepoint.h"
+#include "DFGOperations.h"
 #include "DataView.h"
 #include "Disassembler.h"
 #include "FTLExitThunkGenerator.h"
@@ -41,6 +42,7 @@
 #include "FTLJITCode.h"
 #include "FTLThunks.h"
 #include "FTLUnwindInfo.h"
+#include "JITSubGenerator.h"
 #include "LLVMAPI.h"
 #include "LinkBuffer.h"
 
@@ -48,6 +50,8 @@ namespace JSC { namespace FTL {
 
 using namespace DFG;
 
+static RegisterSet usedRegistersFor(const StackMaps::Record&);
+
 static uint8_t* mmAllocateCodeSection(
     void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
 {
@@ -301,6 +305,157 @@ static void generateCheckInICFastPath(
     }
 }
 
+class BinarySnippetRegisterContext {
+    // The purpose of this class is to shuffle registers to get them into the state
+    // that baseline code expects so that we can use the baseline snippet generators i.e.
+    //    1. ensure that the inputs and outputs are not in tag or scratch registers.
+    //    2. tag registers are loaded with the expected values.
+    //
+    // We also need to:
+    //    1. restore the input and tag registers to the values that LLVM put there originally.
+    //    2. that is except when one of the input registers is also the result register.
+    //       In this case, we don't want to trash the result, and hence, should not restore into it.
+
+public:
+    BinarySnippetRegisterContext(ScratchRegisterAllocator& allocator, GPRReg& result, GPRReg& left, GPRReg& right)
+        : m_allocator(allocator)
+        , m_result(result)
+        , m_left(left)
+        , m_right(right)
+        , m_origResult(result)
+        , m_origLeft(left)
+        , m_origRight(right)
+    {
+        m_allocator.lock(m_result);
+        m_allocator.lock(m_left);
+        m_allocator.lock(m_right);
+
+        RegisterSet inputRegisters = RegisterSet(m_left, m_right);
+        RegisterSet inputAndOutputRegisters = RegisterSet(inputRegisters, m_result);
+
+        RegisterSet reservedRegisters;
+        for (GPRReg reg : GPRInfo::reservedRegisters())
+            reservedRegisters.set(reg);
+
+        if (reservedRegisters.get(m_left))
+            m_left = m_allocator.allocateScratchGPR();
+        if (reservedRegisters.get(m_right))
+            m_right = m_allocator.allocateScratchGPR();
+        if (!inputRegisters.get(m_result) && reservedRegisters.get(m_result))
+            m_result = m_allocator.allocateScratchGPR();
+        
+        if (!inputAndOutputRegisters.get(GPRInfo::tagMaskRegister))
+            m_savedTagMaskRegister = m_allocator.allocateScratchGPR();
+        if (!inputAndOutputRegisters.get(GPRInfo::tagTypeNumberRegister))
+            m_savedTagTypeNumberRegister = m_allocator.allocateScratchGPR();
+    }
+
+    void initializeRegisters(CCallHelpers& jit)
+    {
+        if (m_left != m_origLeft)
+            jit.move(m_origLeft, m_left);
+        if (m_right != m_origRight)
+            jit.move(m_origRight, m_right);
+
+        if (m_savedTagMaskRegister != InvalidGPRReg)
+            jit.move(GPRInfo::tagMaskRegister, m_savedTagMaskRegister);
+        if (m_savedTagTypeNumberRegister != InvalidGPRReg)
+            jit.move(GPRInfo::tagTypeNumberRegister, m_savedTagTypeNumberRegister);
+
+        jit.emitMaterializeTagCheckRegisters();
+    }
+
+    void restoreRegisters(CCallHelpers& jit)
+    {
+        if (m_origLeft != m_left && m_origLeft != m_origResult)
+            jit.move(m_left, m_origLeft);
+        if (m_origRight != m_right && m_origRight != m_origResult)
+            jit.move(m_right, m_origRight);
+        
+        if (m_savedTagMaskRegister != InvalidGPRReg)
+            jit.move(m_savedTagMaskRegister, GPRInfo::tagMaskRegister);
+        if (m_savedTagTypeNumberRegister != InvalidGPRReg)
+            jit.move(m_savedTagTypeNumberRegister, GPRInfo::tagTypeNumberRegister);
+    }
+
+private:
+    ScratchRegisterAllocator& m_allocator;
+
+    GPRReg& m_result;
+    GPRReg& m_left;
+    GPRReg& m_right;
+
+    GPRReg m_origResult;
+    GPRReg m_origLeft;
+    GPRReg m_origRight;
+
+    GPRReg m_savedTagMaskRegister { InvalidGPRReg };
+    GPRReg m_savedTagTypeNumberRegister { InvalidGPRReg };
+};
+
+static void generateArithSubICFastPath(
+    State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
+    StackMaps::RecordMap& recordMap, ArithSubDescriptor& ic)
+{
+    VM& vm = state.graph.m_vm;
+    size_t sizeOfIC = sizeOfArithSub();
+
+    StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
+    if (iter == recordMap.end())
+        return; // It was optimized out.
+
+    Vector<StackMaps::RecordAndIndex>& records = iter->value;
+
+    RELEASE_ASSERT(records.size() == ic.m_slowPathStarts.size());
+
+    for (unsigned i = records.size(); i--;) {
+        StackMaps::Record& record = records[i].record;
+
+        CCallHelpers fastPathJIT(&vm, codeBlock);
+
+        GPRReg result = record.locations[0].directGPR();
+        GPRReg left = record.locations[1].directGPR();
+        GPRReg right = record.locations[2].directGPR();
+
+        RegisterSet usedRegisters = usedRegistersFor(record);
+        ScratchRegisterAllocator allocator(usedRegisters);
+
+        BinarySnippetRegisterContext context(allocator, result, left, right);
+
+        GPRReg scratchGPR = allocator.allocateScratchGPR();
+        FPRReg leftFPR = allocator.allocateScratchFPR();
+        FPRReg rightFPR = allocator.allocateScratchFPR();
+        FPRReg scratchFPR = InvalidFPRReg;
+
+        JITSubGenerator gen(JSValueRegs(result), JSValueRegs(left), JSValueRegs(right), ic.leftType(), ic.rightType(), leftFPR, rightFPR, scratchGPR, scratchFPR);
+
+        auto numberOfBytesUsedToPreserveReusedRegisters =
+            allocator.preserveReusedRegistersByPushing(fastPathJIT, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+        context.initializeRegisters(fastPathJIT);
+        gen.generateFastPath(fastPathJIT);
+
+        gen.endJumpList().link(&fastPathJIT);
+        context.restoreRegisters(fastPathJIT);
+        allocator.restoreReusedRegistersByPopping(fastPathJIT, numberOfBytesUsedToPreserveReusedRegisters,
+            ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+        CCallHelpers::Jump done = fastPathJIT.jump();
+
+        gen.slowPathJumpList().link(&fastPathJIT);
+        context.restoreRegisters(fastPathJIT);
+        allocator.restoreReusedRegistersByPopping(fastPathJIT, numberOfBytesUsedToPreserveReusedRegisters,
+            ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+        CCallHelpers::Jump slowPathStart = fastPathJIT.jump();
+
+        char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
+        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "ArithSub inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
+            linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
+            state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
+            
+            linkBuffer.link(slowPathStart, state.finalizer->sideCodeLinkBuffer->locationOf(ic.m_slowPathStarts[i]));
+        });
+    }
+}
 
 static RegisterSet usedRegistersFor(const StackMaps::Record& record)
 {
@@ -460,6 +615,7 @@ static void fixFunctionBasedOnStackMaps(
     if (!state.getByIds.isEmpty()
         || !state.putByIds.isEmpty()
         || !state.checkIns.isEmpty()
+        || !state.arithSubs.isEmpty()
         || !state.lazySlowPaths.isEmpty()) {
         CCallHelpers slowPathJIT(&vm, codeBlock);
         
@@ -582,6 +738,34 @@ static void fixFunctionBasedOnStackMaps(
             }
         }
 
+        for (size_t i = state.arithSubs.size(); i--;) {
+            ArithSubDescriptor& arithSub = state.arithSubs[i];
+            
+            if (verboseCompilationEnabled())
+                dataLog("Handling ArithSub stackmap #", arithSub.stackmapID(), "\n");
+            
+            auto iter = recordMap.find(arithSub.stackmapID());
+            if (iter == recordMap.end())
+                continue; // It was optimized out.
+            
+            CodeOrigin codeOrigin = arithSub.codeOrigin();
+            for (unsigned i = 0; i < iter->value.size(); ++i) {
+                StackMaps::Record& record = iter->value[i].record;
+                RegisterSet usedRegisters = usedRegistersFor(record);
+
+                GPRReg result = record.locations[0].directGPR();
+                GPRReg left = record.locations[1].directGPR();
+                GPRReg right = record.locations[2].directGPR();
+
+                arithSub.m_slowPathStarts.append(slowPathJIT.label());
+
+                callOperation(state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
+                    operationValueSub, result, left, right).call();
+
+                arithSub.m_slowPathDone.append(slowPathJIT.jump());
+            }
+        }
+
         for (unsigned i = state.lazySlowPaths.size(); i--;) {
             LazySlowPathDescriptor& descriptor = state.lazySlowPaths[i];
 
@@ -650,6 +834,10 @@ static void fixFunctionBasedOnStackMaps(
                 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                 sizeOfIn()); 
         }
+        for (unsigned i = state.arithSubs.size(); i--;) {
+            ArithSubDescriptor& arithSub = state.arithSubs[i];
+            generateArithSubICFastPath(state, codeBlock, generatedFunction, recordMap, arithSub);
+        }
         for (unsigned i = state.lazySlowPaths.size(); i--;) {
             LazySlowPathDescriptor& lazySlowPath = state.lazySlowPaths[i];
             for (auto& tuple : lazySlowPath.m_generators) {
index e798e87681c3642cb08598fe0747dfd05e22450d..b7cfa771e9c32f0c148820d40ea35cff05686f05 100644 (file)
@@ -123,6 +123,25 @@ public:
     Vector<CheckInGenerator> m_generators;
 };
 
+class ArithSubDescriptor : public InlineCacheDescriptor {
+public:
+    ArithSubDescriptor(unsigned stackmapID, CodeOrigin codeOrigin, ResultType leftType, ResultType rightType)
+        : InlineCacheDescriptor(stackmapID, codeOrigin, nullptr)
+        , m_leftType(leftType)
+        , m_rightType(rightType)
+    {
+    }
+
+    ResultType leftType() const { return m_leftType; }
+    ResultType rightType() const { return m_rightType; }
+    
+    Vector<MacroAssembler::Label> m_slowPathStarts;
+
+private:
+    ResultType m_leftType;
+    ResultType m_rightType;
+};
+
 // You can create a lazy slow path call in lowerDFGToLLVM by doing:
 // m_ftlState.lazySlowPaths.append(
 //     LazySlowPathDescriptor(
index 41244270e4970630ec65a8a7425b8aacda9e908e..ca6e238b6c8b0c519b1af10e78ba180fadcfbbf0 100644 (file)
@@ -128,6 +128,23 @@ size_t sizeOfIn()
 #endif
 }
 
+size_t sizeOfArithSub()
+{
+#if CPU(ARM64)
+#ifdef NDEBUG
+    return 192; // ARM64 release.
+#else
+    return 288; // ARM64 debug.
+#endif
+#else // CPU(X86_64)
+#ifdef NDEBUG
+    return 184; // X86_64 release.
+#else
+    return 259; // X86_64 debug.
+#endif
+#endif
+}
+
 size_t sizeOfICFor(Node* node)
 {
     switch (node->op()) {
index fed850bc7643ac20e25d5247ae5def2ca47fbd84..8a7b4612bd809c25feb42c918f3ddb4a0afb74bc 100644 (file)
@@ -46,6 +46,7 @@ size_t sizeOfTailCallForwardVarargs();
 size_t sizeOfConstructVarargs();
 size_t sizeOfConstructForwardVarargs();
 size_t sizeOfIn();
+size_t sizeOfArithSub();
 
 size_t sizeOfICFor(DFG::Node*);
 
index 12de9ee7be9b741bf2682f2d4b244299f7ccf989..2ac7b089ef446b305f2b6fdae7d364c6aa8be923 100644 (file)
@@ -1476,7 +1476,36 @@ private:
             setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
             break;
         }
+
+        case UntypedUse: {
+            if (!isSub) {
+                DFG_CRASH(m_graph, m_node, "Bad use kind");
+                break;
+            }
             
+            unsigned stackmapID = m_stackmapIDs++;
+
+            if (Options::verboseCompilation())
+                dataLog("    Emitting ArithSub patchpoint with stackmap #", stackmapID, "\n");
+
+            LValue left = lowJSValue(m_node->child1());
+            LValue right = lowJSValue(m_node->child2());
+
+            // Arguments: id, bytes, target, numArgs, args...
+            LValue call = m_out.call(
+                m_out.patchpointInt64Intrinsic(),
+                m_out.constInt64(stackmapID), m_out.constInt32(sizeOfArithSub()),
+                constNull(m_out.ref8), m_out.constInt32(2), left, right);
+            setInstructionCallingConvention(call, LLVMAnyRegCallConv);
+
+            m_ftlState.arithSubs.append(ArithSubDescriptor(stackmapID, m_node->origin.semantic,
+                abstractValue(m_node->child1()).resultType(),
+                abstractValue(m_node->child2()).resultType()));
+
+            setJSValue(call);
+            break;
+        }
+
         default:
             DFG_CRASH(m_graph, m_node, "Bad use kind");
             break;
index 3c7312b0cf7c58f851cf561854d1a941f82c2d7b..abc54603d1cfccbdeaf2eecac237eb1dd3067fbf 100644 (file)
@@ -78,6 +78,7 @@ public:
     SegmentedVector<GetByIdDescriptor> getByIds;
     SegmentedVector<PutByIdDescriptor> putByIds;
     SegmentedVector<CheckInDescriptor> checkIns;
+    SegmentedVector<ArithSubDescriptor> arithSubs;
     SegmentedVector<LazySlowPathDescriptor> lazySlowPaths;
     Vector<JSCall> jsCalls;
     Vector<JSCallVarargs> jsCallVarargses;
index 4cfdc68b264dc4d55264f711e01024cebfe5881d..f84b6aa3fb081587683cbdad166c1e0fe5d279c3 100644 (file)
@@ -27,6 +27,7 @@
 #define GPRInfo_h
 
 #include "MacroAssembler.h"
+#include <array>
 #include <wtf/PrintStream.h>
 
 namespace JSC {
@@ -398,6 +399,8 @@ public:
     static const GPRReg callFrameRegister = X86Registers::ebp;
     static const GPRReg tagTypeNumberRegister = X86Registers::r14;
     static const GPRReg tagMaskRegister = X86Registers::r15;
+    static const GPRReg scratchRegister = MacroAssembler::scratchRegister;
+
     // Temporary registers.
     static const GPRReg regT0 = X86Registers::eax;
 #if !OS(WINDOWS)
@@ -500,6 +503,16 @@ public:
         return nameForRegister[reg];
     }
 
+    static const std::array<GPRReg, 3>& reservedRegisters()
+    {
+        static const std::array<GPRReg, 3> reservedRegisters { {
+            scratchRegister,
+            tagTypeNumberRegister,
+            tagMaskRegister,
+        } };
+        return reservedRegisters;
+    }
+    
     static const unsigned InvalidIndex = 0xffffffff;
 };
 
@@ -603,6 +616,8 @@ public:
     static const GPRReg callFrameRegister = ARM64Registers::fp;
     static const GPRReg tagTypeNumberRegister = ARM64Registers::x27;
     static const GPRReg tagMaskRegister = ARM64Registers::x28;
+    static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister;
+    static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister;
     // Temporary registers.
     static const GPRReg regT0 = ARM64Registers::x0;
     static const GPRReg regT1 = ARM64Registers::x1;
@@ -695,6 +710,17 @@ public:
         return nameForRegister[reg];
     }
 
+    static const std::array<GPRReg, 4>& reservedRegisters()
+    {
+        static const std::array<GPRReg, 4> reservedRegisters { {
+            dataTempRegister,
+            memoryTempRegister,
+            tagTypeNumberRegister,
+            tagMaskRegister,
+        } };
+        return reservedRegisters;
+    }
+    
     static const unsigned InvalidIndex = 0xffffffff;
 };
 
index 03b7955903c5770dc1e375000f5b118b3bf9c0a5..5312d44f1972965eccdbf9f69b19e3fa41de4bec 100644 (file)
 
 #include "CCallHelpers.h"
 #include "ResultType.h"
+#include "ScratchRegisterAllocator.h"
 
 namespace JSC {
-    
+
 class JITSubGenerator {
 public:
 
@@ -61,11 +62,11 @@ public:
         CCallHelpers::Jump leftNotInt = jit.branchIfNotInt32(m_left);
         CCallHelpers::Jump rightNotInt = jit.branchIfNotInt32(m_right);
 
-        jit.move(m_left.payloadGPR(), m_result.payloadGPR());
+        jit.move(m_left.payloadGPR(), m_scratchGPR);
         m_slowPathJumpList.append(
-            jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_result.payloadGPR()));
+            jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_scratchGPR));
 
-        jit.boxInt32(m_result.payloadGPR(), m_result);
+        jit.boxInt32(m_scratchGPR, m_result);
 
         m_endJumpList.append(jit.jump());
 
@@ -74,7 +75,7 @@ public:
             m_slowPathJumpList.append(rightNotInt);
             return;
         }
-        
+
         leftNotInt.link(&jit);
         if (!m_leftType.definitelyIsNumber())
             m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
index 6374ecf35e1a40e07f1461107a974b2e6f8a6f55..3e60d74ae15ebb84918b67c490d0c305790cb878 100644 (file)
@@ -19,6 +19,9 @@
 // If all goes well, this test module will terminate silently. If not, it will print
 // errors.
 
+var verbose = false;
+var abortOnFirstFail = false;
+
 var o1 = {
     valueOf: function() { return 10; }
 };
@@ -275,6 +278,9 @@ function runTest(test) {
         for (var i = 0; i < 10000; i++) {
             for (var scenarioID = 0; scenarioID < scenarios.length; scenarioID++) {
                 var scenario = scenarios[scenarioID];
+                if (verbose)
+                    print("Testing " + test.name + ":" + scenario.name + " on iteration " + i + ": expecting " + scenario.expected); 
+
                 var result = testFunc(scenario.x, scenario.y);
                 if (result == scenario.expected)
                     continue;
@@ -282,11 +288,15 @@ function runTest(test) {
                     continue;
                 if (!failedScenario[scenarioID]) {
                     errorReport += "FAIL: " + test.name + ":" + scenario.name + " started failing on iteration " + i + ": expected " + scenario.expected + ", actual " + result + "\n";
+                    if (abortOnFirstFail)
+                        throw errorReport;
                     failedScenario[scenarioID] = scenario;
                 }
             }
         }
     } catch(e) {
+        if (abortOnFirstFail)
+            throw e; // Negate the catch by re-throwing.
         errorReport += "Unexpected exception: " + e + "\n";
     }
 }