Unreviewed, rolling out r252229.
authorjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 9 Nov 2019 00:11:28 +0000 (00:11 +0000)
committerjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 9 Nov 2019 00:11:28 +0000 (00:11 +0000)
This caused internal failures.

Reverted changeset:

"Split ArithProfile into a Unary and a Binary version"
https://bugs.webkit.org/show_bug.cgi?id=202832
https://trac.webkit.org/changeset/252229

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@252273 268f45cc-cd09-0410-ab3c-d52691b4dbfc

46 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/bytecode/ArithProfile.cpp
Source/JavaScriptCore/bytecode/ArithProfile.h
Source/JavaScriptCore/bytecode/BytecodeList.rb
Source/JavaScriptCore/bytecode/CodeBlock.cpp
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/Fits.h
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGGraph.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITAddGenerator.cpp
Source/JavaScriptCore/jit/JITAddGenerator.h
Source/JavaScriptCore/jit/JITArithmetic.cpp
Source/JavaScriptCore/jit/JITDivGenerator.cpp
Source/JavaScriptCore/jit/JITDivGenerator.h
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITMathIC.h
Source/JavaScriptCore/jit/JITMulGenerator.cpp
Source/JavaScriptCore/jit/JITMulGenerator.h
Source/JavaScriptCore/jit/JITNegGenerator.cpp
Source/JavaScriptCore/jit/JITNegGenerator.h
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITOperations.h
Source/JavaScriptCore/jit/JITSubGenerator.cpp
Source/JavaScriptCore/jit/JITSubGenerator.h
Source/JavaScriptCore/llint/LLIntData.cpp
Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
Source/JavaScriptCore/offlineasm/arm.rb
Source/JavaScriptCore/offlineasm/arm64.rb
Source/JavaScriptCore/offlineasm/cloop.rb
Source/JavaScriptCore/offlineasm/instructions.rb
Source/JavaScriptCore/offlineasm/mips.rb
Source/JavaScriptCore/offlineasm/risc.rb
Source/JavaScriptCore/offlineasm/x86.rb
Source/JavaScriptCore/parser/ResultType.h
Source/JavaScriptCore/runtime/CommonSlowPaths.cpp

index 718c28e..501125f 100644 (file)
@@ -1,3 +1,15 @@
+2019-11-08  Matt Lewis  <jlewis3@apple.com>
+
+        Unreviewed, rolling out r252229.
+
+        This caused internal failures.
+
+        Reverted changeset:
+
+        "Split ArithProfile into a Unary and a Binary version"
+        https://bugs.webkit.org/show_bug.cgi?id=202832
+        https://trac.webkit.org/changeset/252229
+
 2019-11-08  Chris Dumez  <cdumez@apple.com>
 
         Make DeferredPromise behave nicely with regards to the back/forward cache
index 7f46f73..999933b 100644 (file)
@@ -32,8 +32,7 @@
 namespace JSC {
 
 #if ENABLE(JIT)
-template<typename BitfieldType>
-void ArithProfile<BitfieldType>::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
+void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
 {
     if (!shouldEmitSetDouble() && !shouldEmitSetNonNumeric() && !shouldEmitSetBigInt())
         return;
@@ -59,51 +58,41 @@ void ArithProfile<BitfieldType>::emitObserveResult(CCallHelpers& jit, JSValueReg
     done.link(&jit);
 }
 
-template<typename BitfieldType>
-bool ArithProfile<BitfieldType>::shouldEmitSetDouble() const
+bool ArithProfile::shouldEmitSetDouble() const
 {
-    uint32_t mask = Int32Overflow | Int52Overflow | NegZeroDouble | NonNegZeroDouble;
+    uint32_t mask = ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble;
     return (m_bits & mask) != mask;
 }
 
-template<typename BitfieldType>
-void ArithProfile<BitfieldType>::emitSetDouble(CCallHelpers& jit) const
+void ArithProfile::emitSetDouble(CCallHelpers& jit) const
 {
     if (shouldEmitSetDouble())
-        jit.or32(CCallHelpers::TrustedImm32(Int32Overflow | Int52Overflow | NegZeroDouble | NonNegZeroDouble), CCallHelpers::AbsoluteAddress(addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(addressOfBits()));
 }
 
-template<typename BitfieldType>
-bool ArithProfile<BitfieldType>::shouldEmitSetNonNumeric() const
+bool ArithProfile::shouldEmitSetNonNumeric() const
 {
     uint32_t mask = ArithProfile::NonNumeric;
     return (m_bits & mask) != mask;
 }
 
-template<typename BitfieldType>
-bool ArithProfile<BitfieldType>::shouldEmitSetBigInt() const
+bool ArithProfile::shouldEmitSetBigInt() const
 {
     uint32_t mask = ArithProfile::BigInt;
     return (m_bits & mask) != mask;
 }
 
-template<typename BitfieldType>
-void ArithProfile<BitfieldType>::emitSetNonNumeric(CCallHelpers& jit) const
+void ArithProfile::emitSetNonNumeric(CCallHelpers& jit) const
 {
     if (shouldEmitSetNonNumeric())
-        jit.or32(CCallHelpers::TrustedImm32(NonNumeric), CCallHelpers::AbsoluteAddress(addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNumeric), CCallHelpers::AbsoluteAddress(addressOfBits()));
 }
 
-template<typename BitfieldType>
-void ArithProfile<BitfieldType>::emitSetBigInt(CCallHelpers& jit) const
+void ArithProfile::emitSetBigInt(CCallHelpers& jit) const
 {
     if (shouldEmitSetBigInt())
-        jit.or32(CCallHelpers::TrustedImm32(BigInt), CCallHelpers::AbsoluteAddress(addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::BigInt), CCallHelpers::AbsoluteAddress(addressOfBits()));
 }
-
-// Generate the implementations of the functions above for UnaryArithProfile/BinaryArithProfile
-// If changing the size of either, add the corresponding lines here.
-template class ArithProfile<uint16_t>;
 #endif // ENABLE(JIT)
 
 } // namespace JSC
@@ -112,8 +101,7 @@ namespace WTF {
     
 using namespace JSC;
 
-template <typename T>
-void printInternal(PrintStream& out, const ArithProfile<T>& profile)
+void printInternal(PrintStream& out, const ArithProfile& profile)
 {
     const char* separator = "";
 
@@ -147,30 +135,19 @@ void printInternal(PrintStream& out, const ArithProfile<T>& profile)
             separator = "|";
         }
     }
-    out.print(">");
-}
-
-void printInternal(PrintStream& out, const UnaryArithProfile& profile)
-{
-    printInternal(out, static_cast<ArithProfile<UnaryArithProfileBase>>(profile));
-
-    out.print(" Arg ObservedType:<");
-    out.print(profile.argObservedType());
-    out.print(">");
-}
-
-void printInternal(PrintStream& out, const BinaryArithProfile& profile)
-{
-    printInternal(out, static_cast<ArithProfile<UnaryArithProfileBase>>(profile));
-
     if (profile.tookSpecialFastPath())
-        out.print(" Took special fast path.");
+        out.print(separator, "Took special fast path.");
+    out.print(">");
 
     out.print(" LHS ObservedType:<");
     out.print(profile.lhsObservedType());
     out.print("> RHS ObservedType:<");
     out.print(profile.rhsObservedType());
     out.print(">");
+
+    out.print(" LHS ResultType:<", RawPointer(bitwise_cast<void*>(static_cast<uintptr_t>(profile.lhsResultType().bits()))));
+    out.print("> RHS ResultType:<", RawPointer(bitwise_cast<void*>(static_cast<uintptr_t>(profile.rhsResultType().bits()))));
+    out.print(">");
 }
 
 void printInternal(PrintStream& out, const JSC::ObservedType& observedType)
index cad99f4..fcc4e28 100644 (file)
@@ -66,217 +66,119 @@ private:
     uint8_t m_bits { 0 };
 };
 
-template <typename BitfieldType>
-class ArithProfile {
-public:
-    enum ObservedResults {
-        NonNegZeroDouble = 1 << 0,
-        NegZeroDouble    = 1 << 1,
-        NonNumeric       = 1 << 2,
-        Int32Overflow    = 1 << 3,
-        Int52Overflow    = 1 << 4,
-        BigInt           = 1 << 5,
-    };
-    static constexpr uint32_t observedResultsNumBitsNeeded = 6;
-
-    bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumeric | BigInt); }
-    bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
-    bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
-    bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
-    bool didObserveNonNumeric() const { return hasBits(NonNumeric); }
-    bool didObserveBigInt() const { return hasBits(BigInt); }
-    bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
-    bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
-
-    void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
-    void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
-    void setObservedNonNumeric() { setBit(NonNumeric); }
-    void setObservedBigInt() { setBit(BigInt); }
-    void setObservedInt32Overflow() { setBit(Int32Overflow); }
-    void setObservedInt52Overflow() { setBit(Int52Overflow); }
-
-    void observeResult(JSValue value)
-    {
-        if (value.isInt32())
-            return;
-        if (value.isNumber()) {
-            m_bits |= Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble;
-            return;
-        }
-        if (value && value.isBigInt()) {
-            m_bits |= BigInt;
-            return;
-        }
-        m_bits |= NonNumeric;
-    }
-
-    const void* addressOfBits() const { return &m_bits; }
-
-#if ENABLE(JIT)
-    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble) if it sees a
-    // double. Sets NonNumeric if it sees a non-numeric.
-    void emitObserveResult(CCallHelpers&, JSValueRegs, TagRegistersMode = HaveTagRegisters);
-
-    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble).
-    bool shouldEmitSetDouble() const;
-    void emitSetDouble(CCallHelpers&) const;
-
-    // Sets NonNumber.
-    void emitSetNonNumeric(CCallHelpers&) const;
-    bool shouldEmitSetNonNumeric() const;
-
-    // Sets BigInt
-    void emitSetBigInt(CCallHelpers&) const;
-    bool shouldEmitSetBigInt() const;
-#endif // ENABLE(JIT)
-
-    constexpr uint32_t bits() const { return m_bits; }
-
-protected:
-    ArithProfile()
-    {
-    }
-
-    bool hasBits(int mask) const { return m_bits & mask; }
-    void setBit(int mask) { m_bits |= mask; }
-
-    BitfieldType m_bits { 0 }; // We take care to update m_bits only in a single operation. We don't ever store an inconsistent bit representation to it.
-};
-
-/* This class stores the following components in 16 bits:
- * - ObservedResults
- * - ObservedType for the argument
- */
-using UnaryArithProfileBase = uint16_t;
-class UnaryArithProfile : public ArithProfile<UnaryArithProfileBase> {
-    static constexpr unsigned argObservedTypeShift = observedResultsNumBitsNeeded;
+struct ArithProfile {
+private:
+    static constexpr uint32_t numberOfFlagBits = 6;
+    static constexpr uint32_t rhsResultTypeShift = numberOfFlagBits;
+    static constexpr uint32_t lhsResultTypeShift = rhsResultTypeShift + ResultType::numBitsNeeded;
+    static constexpr uint32_t rhsObservedTypeShift = lhsResultTypeShift + ResultType::numBitsNeeded;
+    static constexpr uint32_t lhsObservedTypeShift = rhsObservedTypeShift + ObservedType::numBitsNeeded;
 
-    static_assert(argObservedTypeShift + ObservedType::numBitsNeeded <= sizeof(UnaryArithProfileBase) * 8, "Should fit in a the type of the underlying bitfield.");
+    static_assert(ObservedType::numBitsNeeded == 3, "We make a hard assumption about that here.");
+    static constexpr uint32_t clearRhsObservedTypeBitMask = static_cast<uint32_t>(~((1 << rhsObservedTypeShift) | (1 << (rhsObservedTypeShift + 1)) | (1 << (rhsObservedTypeShift + 2))));
+    static constexpr uint32_t clearLhsObservedTypeBitMask = static_cast<uint32_t>(~((1 << lhsObservedTypeShift) | (1 << (lhsObservedTypeShift + 1)) | (1 << (lhsObservedTypeShift + 2))));
 
-    static constexpr UnaryArithProfileBase clearArgObservedTypeBitMask = static_cast<UnaryArithProfileBase>(~(0b111 << argObservedTypeShift));
+    static constexpr uint32_t resultTypeMask = (1 << ResultType::numBitsNeeded) - 1;
+    static constexpr uint32_t observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
 
-    static constexpr UnaryArithProfileBase observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
+    enum class ConstantTag { Constant };
 
 public:
-    UnaryArithProfile()
-        : ArithProfile<UnaryArithProfileBase>()
-    {
-        ASSERT(argObservedType().isEmpty());
-        ASSERT(argObservedType().isEmpty());
-    }
+    static constexpr uint32_t specialFastPathBit = 1 << (lhsObservedTypeShift + ObservedType::numBitsNeeded);
+    static_assert((lhsObservedTypeShift + ObservedType::numBitsNeeded) <= (sizeof(uint32_t) * 8) - 1, "Should fit in a uint32_t.");
+    static_assert(!(specialFastPathBit & ~clearLhsObservedTypeBitMask), "These bits should not intersect.");
+    static_assert(specialFastPathBit & clearLhsObservedTypeBitMask, "These bits should intersect.");
+    static_assert(specialFastPathBit > ~clearLhsObservedTypeBitMask, "These bits should not intersect and specialFastPathBit should be a higher bit.");
 
-    static constexpr UnaryArithProfileBase observedIntBits()
-    {
-        constexpr ObservedType observedInt32 { ObservedType().withInt32() };
-        constexpr UnaryArithProfileBase bits = observedInt32.bits() << argObservedTypeShift;
-        return bits;
-    }
-    static constexpr UnaryArithProfileBase observedNumberBits()
+    ArithProfile(ResultType arg)
+        : ArithProfile(ConstantTag::Constant, arg)
     {
-        constexpr ObservedType observedNumber { ObservedType().withNumber() };
-        constexpr UnaryArithProfileBase bits = observedNumber.bits() << argObservedTypeShift;
-        return bits;
+        ASSERT(lhsResultType().bits() == arg.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
     }
 
-    constexpr ObservedType argObservedType() const { return ObservedType((m_bits >> argObservedTypeShift) & observedTypeMask); }
-    void setArgObservedType(ObservedType type)
+    ArithProfile(ResultType lhs, ResultType rhs)
+        : ArithProfile(ConstantTag::Constant, lhs, rhs)
     {
-        UnaryArithProfileBase bits = m_bits;
-        bits &= clearArgObservedTypeBitMask;
-        bits |= type.bits() << argObservedTypeShift;
-        m_bits = bits;
-        ASSERT(argObservedType() == type);
+        ASSERT(lhsResultType().bits() == lhs.bits() && rhsResultType().bits() == rhs.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
     }
 
-    void argSawInt32() { setArgObservedType(argObservedType().withInt32()); }
-    void argSawNumber() { setArgObservedType(argObservedType().withNumber()); }
-    void argSawNonNumber() { setArgObservedType(argObservedType().withNonNumber()); }
+    ArithProfile(OperandTypes types)
+        : ArithProfile(types.first(), types.second())
+    { }
 
-    void observeArg(JSValue arg)
-    {
-        UnaryArithProfile newProfile = *this;
-        if (arg.isNumber()) {
-            if (arg.isInt32())
-                newProfile.argSawInt32();
-            else
-                newProfile.argSawNumber();
-        } else
-            newProfile.argSawNonNumber();
+    ArithProfile() = default;
 
-        m_bits = newProfile.bits();
+    static constexpr ArithProfile fromInt(uint32_t bits)
+    {
+        return ArithProfile { ConstantTag::Constant, bits };
     }
 
-    bool isObservedTypeEmpty()
+    static constexpr ArithProfile observedUnaryInt()
     {
-        return argObservedType().isEmpty();
+        constexpr ObservedType observedInt32 { ObservedType().withInt32() };
+        constexpr uint32_t bits = observedInt32.bits() << lhsObservedTypeShift;
+        static_assert(bits == 0x800000, "");
+        return fromInt(bits);
     }
-
-    friend class JSC::LLIntOffsetsExtractor;
-};
-
-/* This class stores the following components in 16 bits:
- * - ObservedResults
- * - ObservedType for right-hand-side
- * - ObservedType for left-hand-side
- * - a bit used by division to indicate whether a special fast path was taken
- */
-using BinaryArithProfileBase = uint16_t;
-class BinaryArithProfile : public ArithProfile<BinaryArithProfileBase> {
-    static constexpr uint32_t rhsObservedTypeShift = observedResultsNumBitsNeeded;
-    static constexpr uint32_t lhsObservedTypeShift = rhsObservedTypeShift + ObservedType::numBitsNeeded;
-
-    static_assert(ObservedType::numBitsNeeded == 3, "We make a hard assumption about that here.");
-    static constexpr BinaryArithProfileBase clearRhsObservedTypeBitMask = static_cast<BinaryArithProfileBase>(~(0b111 << rhsObservedTypeShift));
-    static constexpr BinaryArithProfileBase clearLhsObservedTypeBitMask = static_cast<BinaryArithProfileBase>(~(0b111 << lhsObservedTypeShift));
-
-    static constexpr BinaryArithProfileBase observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
-
-public:
-    static constexpr BinaryArithProfileBase specialFastPathBit = 1 << (lhsObservedTypeShift + ObservedType::numBitsNeeded);
-    static_assert((lhsObservedTypeShift + ObservedType::numBitsNeeded + 1) <= sizeof(BinaryArithProfileBase) * 8, "Should fit in a uint32_t.");
-    static_assert(!(specialFastPathBit & ~clearLhsObservedTypeBitMask), "These bits should not intersect.");
-    static_assert(specialFastPathBit & clearLhsObservedTypeBitMask, "These bits should intersect.");
-    static_assert(specialFastPathBit > ~clearLhsObservedTypeBitMask, "These bits should not intersect and specialFastPathBit should be a higher bit.");
-
-    BinaryArithProfile()
-        : ArithProfile<BinaryArithProfileBase> ()
+    static constexpr ArithProfile observedUnaryNumber()
     {
-        ASSERT(lhsObservedType().isEmpty());
-        ASSERT(rhsObservedType().isEmpty());
+        constexpr ObservedType observedNumber { ObservedType().withNumber() };
+        constexpr uint32_t bits = observedNumber.bits() << lhsObservedTypeShift;
+        static_assert(bits == 0x1000000, "");
+        return fromInt(bits);
     }
-
-    static constexpr BinaryArithProfileBase observedIntIntBits()
+    static constexpr ArithProfile observedBinaryIntInt()
     {
         constexpr ObservedType observedInt32 { ObservedType().withInt32() };
-        constexpr BinaryArithProfileBase bits = (observedInt32.bits() << lhsObservedTypeShift) | (observedInt32.bits() << rhsObservedTypeShift);
-        return bits;
+        constexpr uint32_t bits = (observedInt32.bits() << lhsObservedTypeShift) | (observedInt32.bits() << rhsObservedTypeShift);
+        static_assert(bits == 0x900000, "");
+        return fromInt(bits);
     }
-    static constexpr BinaryArithProfileBase observedNumberIntBits()
+    static constexpr ArithProfile observedBinaryNumberInt()
     {
         constexpr ObservedType observedNumber { ObservedType().withNumber() };
         constexpr ObservedType observedInt32 { ObservedType().withInt32() };
-        constexpr BinaryArithProfileBase bits = (observedNumber.bits() << lhsObservedTypeShift) | (observedInt32.bits() << rhsObservedTypeShift);
-        return bits;
+        constexpr uint32_t bits = (observedNumber.bits() << lhsObservedTypeShift) | (observedInt32.bits() << rhsObservedTypeShift);
+        static_assert(bits == 0x1100000, "");
+        return fromInt(bits);
     }
-    static constexpr BinaryArithProfileBase observedIntNumberBits()
+    static constexpr ArithProfile observedBinaryIntNumber()
     {
         constexpr ObservedType observedNumber { ObservedType().withNumber() };
         constexpr ObservedType observedInt32 { ObservedType().withInt32() };
-        constexpr BinaryArithProfileBase bits = (observedInt32.bits() << lhsObservedTypeShift) | (observedNumber.bits() << rhsObservedTypeShift);
-        return bits;
+        constexpr uint32_t bits = (observedInt32.bits() << lhsObservedTypeShift) | (observedNumber.bits() << rhsObservedTypeShift);
+        static_assert(bits == 0xa00000, "");
+        return fromInt(bits);
     }
-    static constexpr BinaryArithProfileBase observedNumberNumberBits()
+    static constexpr ArithProfile observedBinaryNumberNumber()
     {
         constexpr ObservedType observedNumber { ObservedType().withNumber() };
-        constexpr BinaryArithProfileBase bits = (observedNumber.bits() << lhsObservedTypeShift) | (observedNumber.bits() << rhsObservedTypeShift);
-        return bits;
+        constexpr uint32_t bits = (observedNumber.bits() << lhsObservedTypeShift) | (observedNumber.bits() << rhsObservedTypeShift);
+        static_assert(bits == 0x1200000, "");
+        return fromInt(bits);
     }
 
+    enum ObservedResults {
+        NonNegZeroDouble = 1 << 0,
+        NegZeroDouble    = 1 << 1,
+        NonNumeric       = 1 << 2,
+        Int32Overflow    = 1 << 3,
+        Int52Overflow    = 1 << 4,
+        BigInt           = 1 << 5,
+    };
+
+    ResultType lhsResultType() const { return ResultType((m_bits >> lhsResultTypeShift) & resultTypeMask); }
+    ResultType rhsResultType() const { return ResultType((m_bits >> rhsResultTypeShift) & resultTypeMask); }
+
     constexpr ObservedType lhsObservedType() const { return ObservedType((m_bits >> lhsObservedTypeShift) & observedTypeMask); }
     constexpr ObservedType rhsObservedType() const { return ObservedType((m_bits >> rhsObservedTypeShift) & observedTypeMask); }
     void setLhsObservedType(ObservedType type)
     {
-        BinaryArithProfileBase bits = m_bits;
+        uint32_t bits = m_bits;
         bits &= clearLhsObservedTypeBitMask;
         bits |= type.bits() << lhsObservedTypeShift;
         m_bits = bits;
@@ -285,7 +187,7 @@ public:
 
     void setRhsObservedType(ObservedType type)
     { 
-        BinaryArithProfileBase bits = m_bits;
+        uint32_t bits = m_bits;
         bits &= clearRhsObservedTypeBitMask;
         bits |= type.bits() << rhsObservedTypeShift;
         m_bits = bits;
@@ -294,6 +196,39 @@ public:
 
     bool tookSpecialFastPath() const { return m_bits & specialFastPathBit; }
 
+    bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumeric | BigInt); }
+    bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
+    bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
+    bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
+    bool didObserveNonNumeric() const { return hasBits(NonNumeric); }
+    bool didObserveBigInt() const { return hasBits(BigInt); }
+    bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
+    bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
+
+    void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
+    void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
+    void setObservedNonNumeric() { setBit(NonNumeric); }
+    void setObservedBigInt() { setBit(BigInt); }
+    void setObservedInt32Overflow() { setBit(Int32Overflow); }
+    void setObservedInt52Overflow() { setBit(Int52Overflow); }
+
+    const void* addressOfBits() const { return &m_bits; }
+
+    void observeResult(JSValue value)
+    {
+        if (value.isInt32())
+            return;
+        if (value.isNumber()) {
+            m_bits |= Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble;
+            return;
+        }
+        if (value && value.isBigInt()) {
+            m_bits |= BigInt;
+            return;
+        }
+        m_bits |= NonNumeric;
+    }
+
     void lhsSawInt32() { setLhsObservedType(lhsObservedType().withInt32()); }
     void lhsSawNumber() { setLhsObservedType(lhsObservedType().withNumber()); }
     void lhsSawNonNumber() { setLhsObservedType(lhsObservedType().withNonNumber()); }
@@ -303,7 +238,7 @@ public:
 
     void observeLHS(JSValue lhs)
     {
-        BinaryArithProfile newProfile = *this;
+        ArithProfile newProfile = *this;
         if (lhs.isNumber()) {
             if (lhs.isInt32())
                 newProfile.lhsSawInt32();
@@ -319,7 +254,7 @@ public:
     {
         observeLHS(lhs);
 
-        BinaryArithProfile newProfile = *this;
+        ArithProfile newProfile = *this;
         if (rhs.isNumber()) {
             if (rhs.isInt32())
                 newProfile.rhsSawInt32();
@@ -331,11 +266,47 @@ public:
         m_bits = newProfile.bits();
     }
 
-    bool isObservedTypeEmpty()
+#if ENABLE(JIT)    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble) if it sees a
+    // double. Sets NonNumeric if it sees a non-numeric.
+    void emitObserveResult(CCallHelpers&, JSValueRegs, TagRegistersMode = HaveTagRegisters);
+    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble).
+    bool shouldEmitSetDouble() const;
+    void emitSetDouble(CCallHelpers&) const;
+    
+    // Sets NonNumber.
+    void emitSetNonNumeric(CCallHelpers&) const;
+    bool shouldEmitSetNonNumeric() const;
+
+    // Sets BigInt
+    void emitSetBigInt(CCallHelpers&) const;
+    bool shouldEmitSetBigInt() const;
+#endif // ENABLE(JIT)
+
+    constexpr uint32_t bits() const { return m_bits; }
+
+private:
+    constexpr explicit ArithProfile(ConstantTag, uint32_t bits)
+        : m_bits(bits)
+    {
+    }
+
+    constexpr ArithProfile(ConstantTag, ResultType arg)
+        : m_bits(arg.bits() << lhsResultTypeShift)
     {
-        return lhsObservedType().isEmpty() && rhsObservedType().isEmpty();
     }
 
+    constexpr ArithProfile(ConstantTag, ResultType lhs, ResultType rhs)
+        : m_bits((lhs.bits() << lhsResultTypeShift) | (rhs.bits() << rhsResultTypeShift))
+    {
+    }
+
+    bool hasBits(int mask) const { return m_bits & mask; }
+    void setBit(int mask) { m_bits |= mask; }
+
+    uint32_t m_bits { 0 }; // We take care to update m_bits only in a single operation. We don't ever store an inconsistent bit representation to it.
+
     friend class JSC::LLIntOffsetsExtractor;
 };
 
@@ -343,8 +314,7 @@ public:
 
 namespace WTF {
 
-void printInternal(PrintStream&, const JSC::UnaryArithProfile&);
-void printInternal(PrintStream&, const JSC::BinaryArithProfile&);
+void printInternal(PrintStream&, const JSC::ArithProfile&);
 void printInternal(PrintStream&, const JSC::ObservedType&);
 
 } // namespace WTF
index 72de609..c2ec9cf 100644 (file)
@@ -41,7 +41,6 @@ types [
     :JSType,
     :JSValue,
     :LLIntCallLinkInfo,
-    :ResultType,
     :OperandTypes,
     :ProfileTypeBytecodeFlag,
     :PropertyOffset,
@@ -59,8 +58,7 @@ types [
 
     :ValueProfile,
     :ValueProfileAndOperandBuffer,
-    :UnaryArithProfile,
-    :BinaryArithProfile,
+    :ArithProfile,
     :ArrayProfile,
     :ArrayAllocationProfile,
     :ObjectAllocationProfile,
@@ -281,7 +279,10 @@ op_group :ProfiledBinaryOp,
         operandTypes: OperandTypes,
     },
     metadata: {
-        arithProfile: BinaryArithProfile
+        arithProfile: ArithProfile
+    },
+    metadata_initializers: {
+        arithProfile: :operandTypes
     }
 
 op_group :ValueProfiledBinaryOp,
@@ -363,10 +364,13 @@ op :negate,
     args: {
         dst: VirtualRegister,
         operand: VirtualRegister,
-        resultType: ResultType,
+        operandTypes: OperandTypes,
     },
     metadata: {
-        arithProfile: UnaryArithProfile,
+        arithProfile: ArithProfile,
+    },
+    metadata_initializers: {
+        arithProfile: :operandTypes
     }
 
 op :not,
index 681dfc9..0f8e7f8 100644 (file)
@@ -1479,25 +1479,25 @@ StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
     return ensureJITData(locker).m_stubInfos.add(accessType);
 }
 
-JITAddIC* CodeBlock::addJITAddIC(BinaryArithProfile* arithProfile)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
     return ensureJITData(locker).m_addICs.add(arithProfile);
 }
 
-JITMulIC* CodeBlock::addJITMulIC(BinaryArithProfile* arithProfile)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
     return ensureJITData(locker).m_mulICs.add(arithProfile);
 }
 
-JITSubIC* CodeBlock::addJITSubIC(BinaryArithProfile* arithProfile)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
     return ensureJITData(locker).m_subICs.add(arithProfile);
 }
 
-JITNegIC* CodeBlock::addJITNegIC(UnaryArithProfile* arithProfile)
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
 {
     ConcurrentJSLocker locker(m_lock);
     return ensureJITData(locker).m_negICs.add(arithProfile);
@@ -3084,19 +3084,16 @@ const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
     return instructions().at(offset + target).ptr();
 }
 
-BinaryArithProfile* CodeBlock::binaryArithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
+ArithProfile* CodeBlock::arithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    return binaryArithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr());
+    return arithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr());
 }
 
-UnaryArithProfile* CodeBlock::unaryArithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
-{
-    return unaryArithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr());
-}
-
-BinaryArithProfile* CodeBlock::binaryArithProfileForPC(const Instruction* pc)
+ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
 {
     switch (pc->opcodeID()) {
+    case op_negate:
+        return &pc->as<OpNegate>().metadata(this).m_arithProfile;
     case op_add:
         return &pc->as<OpAdd>().metadata(this).m_arithProfile;
     case op_mul:
@@ -3112,23 +3109,11 @@ BinaryArithProfile* CodeBlock::binaryArithProfileForPC(const Instruction* pc)
     return nullptr;
 }
 
-UnaryArithProfile* CodeBlock::unaryArithProfileForPC(const Instruction* pc)
-{
-    switch (pc->opcodeID()) {
-    case op_negate:
-        return &pc->as<OpNegate>().metadata(this).m_arithProfile;
-    default:
-        break;
-    }
-
-    return nullptr;
-}
-
 bool CodeBlock::couldTakeSpecialArithFastCase(BytecodeIndex bytecodeIndex)
 {
     if (!hasBaselineJITProfiling())
         return false;
-    BinaryArithProfile* profile = binaryArithProfileForBytecodeIndex(bytecodeIndex);
+    ArithProfile* profile = arithProfileForBytecodeIndex(bytecodeIndex);
     if (!profile)
         return false;
     return profile->tookSpecialFastPath();
index 18e48c2..d2c0698 100644 (file)
@@ -83,8 +83,6 @@ struct OSRExitState;
 } // namespace DFG
 #endif
 
-class UnaryArithProfile;
-class BinaryArithProfile;
 class BytecodeLivenessAnalysis;
 class CodeBlockSet;
 class ExecutableToCodeBlockEdge;
@@ -98,6 +96,7 @@ class StructureStubInfo;
 
 enum class AccessType : int8_t;
 
+struct ArithProfile;
 struct OpCatch;
 
 enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
@@ -280,22 +279,22 @@ public:
     }
     JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
 
-    JITAddIC* addJITAddIC(BinaryArithProfile*);
-    JITMulIC* addJITMulIC(BinaryArithProfile*);
-    JITNegIC* addJITNegIC(UnaryArithProfile*);
-    JITSubIC* addJITSubIC(BinaryArithProfile*);
+    JITAddIC* addJITAddIC(ArithProfile*);
+    JITMulIC* addJITMulIC(ArithProfile*);
+    JITNegIC* addJITNegIC(ArithProfile*);
+    JITSubIC* addJITSubIC(ArithProfile*);
 
     template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
-    JITAddIC* addMathIC(BinaryArithProfile* profile) { return addJITAddIC(profile); }
+    JITAddIC* addMathIC(ArithProfile* profile) { return addJITAddIC(profile); }
 
     template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
-    JITMulIC* addMathIC(BinaryArithProfile* profile) { return addJITMulIC(profile); }
+    JITMulIC* addMathIC(ArithProfile* profile) { return addJITMulIC(profile); }
 
     template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
-    JITNegIC* addMathIC(UnaryArithProfile* profile) { return addJITNegIC(profile); }
+    JITNegIC* addMathIC(ArithProfile* profile) { return addJITNegIC(profile); }
 
     template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
-    JITSubIC* addMathIC(BinaryArithProfile* profile) { return addJITSubIC(profile); }
+    JITSubIC* addMathIC(ArithProfile* profile) { return addJITSubIC(profile); }
 
     StructureStubInfo* addStubInfo(AccessType);
 
@@ -498,10 +497,8 @@ public:
     template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
     template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
 
-    BinaryArithProfile* binaryArithProfileForBytecodeIndex(BytecodeIndex);
-    UnaryArithProfile* unaryArithProfileForBytecodeIndex(BytecodeIndex);
-    BinaryArithProfile* binaryArithProfileForPC(const Instruction*);
-    UnaryArithProfile* unaryArithProfileForPC(const Instruction*);
+    ArithProfile* arithProfileForBytecodeIndex(BytecodeIndex);
+    ArithProfile* arithProfileForPC(const Instruction*);
 
     bool couldTakeSpecialArithFastCase(BytecodeIndex bytecodeOffset);
 
index 57789bc..eda24be 100644 (file)
@@ -236,18 +236,6 @@ struct Fits<E, size, std::enable_if_t<sizeof(E) != size && std::is_enum<E>::valu
 };
 
 template<OpcodeSize size>
-struct Fits<ResultType, size, std::enable_if_t<sizeof(ResultType) != size, std::true_type>> : public Fits<uint8_t, size> {
-    static_assert(sizeof(ResultType) == sizeof(uint8_t));
-    using Base = Fits<uint8_t, size>;
-
-    static bool check(ResultType type) { return Base::check(type.bits()); }
-
-    static typename Base::TargetType convert(ResultType type) { return Base::convert(type.bits()); }
-
-    static ResultType convert(typename Base::TargetType type) { return ResultType(Base::convert(type)); }
-};
-
-template<OpcodeSize size>
 struct Fits<OperandTypes, size, std::enable_if_t<sizeof(OperandTypes) != size, std::true_type>> {
     static_assert(sizeof(OperandTypes) == sizeof(uint16_t));
     using TargetType = typename TypeBySize<size>::unsignedType;
index 2b69f36..7104d01 100644 (file)
@@ -66,16 +66,10 @@ void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueReg
         return;
     }
         
-    case UnaryArithProfileReady: {
-        u.unaryArithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
+    case ArithProfileReady: {
+        u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
         return;
-    }
-
-    case BinaryArithProfileReady: {
-        u.binaryArithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
-        return;
-    }
-    }
+    } }
     
     RELEASE_ASSERT_NOT_REACHED();
 }
@@ -100,16 +94,10 @@ void MethodOfGettingAValueProfile::reportValue(JSValue value)
         return;
     }
 
-    case UnaryArithProfileReady: {
-        u.unaryArithProfile->observeResult(value);
+    case ArithProfileReady: {
+        u.arithProfile->observeResult(value);
         return;
-    }
-
-    case BinaryArithProfileReady: {
-        u.binaryArithProfile->observeResult(value);
-        return;
-    }
-    }
+    } }
 
     RELEASE_ASSERT_NOT_REACHED();
 }
index 5d81c4c..cfb8347 100644 (file)
 
 namespace JSC {
 
-class UnaryArithProfile;
-class BinaryArithProfile;
 class CCallHelpers;
 class CodeBlock;
 class LazyOperandValueProfileKey;
+struct ArithProfile;
 struct ValueProfile;
 
 class MethodOfGettingAValueProfile {
@@ -58,21 +57,12 @@ public:
         } else
             m_kind = None;
     }
-
-    MethodOfGettingAValueProfile(UnaryArithProfile* profile)
-    {
-        if (profile) {
-            m_kind = UnaryArithProfileReady;
-            u.unaryArithProfile = profile;
-        } else
-            m_kind = None;
-    }
-
-    MethodOfGettingAValueProfile(BinaryArithProfile* profile)
+    
+    MethodOfGettingAValueProfile(ArithProfile* profile)
     {
         if (profile) {
-            m_kind = BinaryArithProfileReady;
-            u.binaryArithProfile = profile;
+            m_kind = ArithProfileReady;
+            u.arithProfile = profile;
         } else
             m_kind = None;
     }
@@ -89,8 +79,7 @@ private:
     enum Kind {
         None,
         Ready,
-        UnaryArithProfileReady,
-        BinaryArithProfileReady,
+        ArithProfileReady,
         LazyOperand
     };
     
@@ -101,8 +90,7 @@ private:
         { }
 
         ValueProfile* profile;
-        UnaryArithProfile* unaryArithProfile;
-        BinaryArithProfile* binaryArithProfile;
+        ArithProfile* arithProfile;
         struct {
             CodeBlock* codeBlock;
             BytecodeIndex bytecodeOffset;
index f2bce62..e3831d2 100644 (file)
@@ -1516,14 +1516,14 @@ RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
     return dst;
 }
 
-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, ResultType type)
+RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types)
 {
     switch (opcodeID) {
     case op_not:
         emitUnaryOp<OpNot>(dst, src);
         break;
     case op_negate:
-        OpNegate::emit(this, dst, src, type);
+        OpNegate::emit(this, dst, src, types);
         break;
     case op_bitnot:
         emitUnaryOp<OpBitnot>(dst, src);
index 2259b7b..862ce12 100644 (file)
@@ -682,7 +682,7 @@ namespace JSC {
             return dst;
         }
 
-        RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, ResultType);
+        RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, OperandTypes);
 
         template<typename BinaryOp>
         std::enable_if_t<
index 01064ca..52468e1 100644 (file)
@@ -2146,7 +2146,7 @@ RegisterID* UnaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID*
 {
     RefPtr<RegisterID> src = generator.emitNode(m_expr);
     generator.emitExpressionInfo(position(), position(), position());
-    return generator.emitUnaryOp(opcodeID(), generator.finalDestination(dst), src.get(), m_expr->resultDescriptor());
+    return generator.emitUnaryOp(opcodeID(), generator.finalDestination(dst), src.get(), OperandTypes(m_expr->resultDescriptor()));
 }
 
 // ------------------------------ UnaryPlusNode -----------------------------------
index 98e68ec..3378ad0 100644 (file)
@@ -942,60 +942,56 @@ private:
         if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
             return node;
 
-        switch (node->op()) {
-        case ArithAdd:
-        case ArithSub:
-        case ValueAdd: {
-            BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex);
-            if (!arithProfile)
-                break;
-            if (arithProfile->didObserveDouble())
-                node->mergeFlags(NodeMayHaveDoubleResult);
-            if (arithProfile->didObserveNonNumeric())
-                node->mergeFlags(NodeMayHaveNonNumericResult);
-            if (arithProfile->didObserveBigInt())
-                node->mergeFlags(NodeMayHaveBigIntResult);
-            break;
-        }
-        case ValueMul:
-        case ArithMul: {
-            BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex);
-            if (!arithProfile)
-                break;
-            if (arithProfile->didObserveInt52Overflow())
-                node->mergeFlags(NodeMayOverflowInt52);
-            if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
-                node->mergeFlags(NodeMayOverflowInt32InBaseline);
-            if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
-                node->mergeFlags(NodeMayNegZeroInBaseline);
-            if (arithProfile->didObserveDouble())
-                node->mergeFlags(NodeMayHaveDoubleResult);
-            if (arithProfile->didObserveNonNumeric())
-                node->mergeFlags(NodeMayHaveNonNumericResult);
-            if (arithProfile->didObserveBigInt())
-                node->mergeFlags(NodeMayHaveBigIntResult);
-            break;
-        }
-        case ValueNegate:
-        case ArithNegate: {
-            UnaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->unaryArithProfileForBytecodeIndex(m_currentIndex);
-            if (!arithProfile)
-                break;
-            if (arithProfile->argObservedType().sawNumber() || arithProfile->didObserveDouble())
-                node->mergeFlags(NodeMayHaveDoubleResult);
-            if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
-                node->mergeFlags(NodeMayNegZeroInBaseline);
-            if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
-                node->mergeFlags(NodeMayOverflowInt32InBaseline);
-            if (arithProfile->didObserveNonNumeric())
-                node->mergeFlags(NodeMayHaveNonNumericResult);
-            if (arithProfile->didObserveBigInt())
-                node->mergeFlags(NodeMayHaveBigIntResult);
-            break;
-        }
+        {
+            ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex);
+            if (arithProfile) {
+                switch (node->op()) {
+                case ArithAdd:
+                case ArithSub:
+                case ValueAdd:
+                    if (arithProfile->didObserveDouble())
+                        node->mergeFlags(NodeMayHaveDoubleResult);
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
+                    break;
 
-        default:
-            break;
+                case ValueMul:
+                case ArithMul: {
+                    if (arithProfile->didObserveInt52Overflow())
+                        node->mergeFlags(NodeMayOverflowInt52);
+                    if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+                        node->mergeFlags(NodeMayOverflowInt32InBaseline);
+                    if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+                        node->mergeFlags(NodeMayNegZeroInBaseline);
+                    if (arithProfile->didObserveDouble())
+                        node->mergeFlags(NodeMayHaveDoubleResult);
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
+                    break;
+                }
+                case ValueNegate:
+                case ArithNegate: {
+                    if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
+                        node->mergeFlags(NodeMayHaveDoubleResult);
+                    if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+                        node->mergeFlags(NodeMayNegZeroInBaseline);
+                    if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+                        node->mergeFlags(NodeMayOverflowInt32InBaseline);
+                    if (arithProfile->didObserveNonNumeric())
+                        node->mergeFlags(NodeMayHaveNonNumericResult);
+                    if (arithProfile->didObserveBigInt())
+                        node->mergeFlags(NodeMayHaveBigIntResult);
+                    break;
+                }
+
+                default:
+                    break;
+                }
+            }
         }
         
         if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
@@ -1037,8 +1033,8 @@ private:
         
         // FIXME: It might be possible to make this more granular.
         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
-
-        BinaryArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->binaryArithProfileForBytecodeIndex(m_currentIndex);
+        
+        ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex);
         if (arithProfile->didObserveBigInt())
             node->mergeFlags(NodeMayHaveBigIntResult);
 
index 1b1b198..ca9fc34 100644 (file)
@@ -1654,9 +1654,7 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren
                 return &profiledBlock->valueProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex());
 
             if (profiledBlock->hasBaselineJITProfiling()) {
-                if (BinaryArithProfile* result = profiledBlock->binaryArithProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex()))
-                    return result;
-                if (UnaryArithProfile* result = profiledBlock->unaryArithProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex()))
+                if (ArithProfile* result = profiledBlock->arithProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex()))
                     return result;
             }
         }
index b849666..6cdb715 100644 (file)
@@ -3995,7 +3995,7 @@ void SpeculativeJIT::compileValueAdd(Node* node)
 
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
     auto repatchingFunction = operationValueAddOptimize;
     auto nonRepatchingFunction = operationValueAdd;
@@ -4019,7 +4019,7 @@ void SpeculativeJIT::compileValueSub(Node* node)
 
         CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
         BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
-        BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile);
         auto repatchingFunction = operationValueSubOptimize;
         auto nonRepatchingFunction = operationValueSub;
@@ -4613,7 +4613,7 @@ void SpeculativeJIT::compileValueNegate(Node* node)
 {
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    UnaryArithProfile* arithProfile = baselineCodeBlock->unaryArithProfileForBytecodeIndex(bytecodeIndex);
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile);
     auto repatchingFunction = operationArithNegateOptimize;
     auto nonRepatchingFunction = operationArithNegate;
@@ -4836,7 +4836,7 @@ void SpeculativeJIT::compileValueMul(Node* node)
 
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile);
     auto repatchingFunction = operationValueMulOptimize;
     auto nonRepatchingFunction = operationValueMul;
index 37f0a96..5d48c0b 100644 (file)
@@ -2116,7 +2116,7 @@ private:
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
         BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueAddOptimize;
         auto nonRepatchingFunction = operationValueAdd;
         compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2137,7 +2137,7 @@ private:
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
         BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueSubOptimize;
         auto nonRepatchingFunction = operationValueSub;
         compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2158,7 +2158,7 @@ private:
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
         BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueMulOptimize;
         auto nonRepatchingFunction = operationValueMul;
         compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2166,7 +2166,7 @@ private:
 
     template <typename Generator, typename Func1, typename Func2,
         typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
-    void compileUnaryMathIC(UnaryArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
+    void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
     {
         Node* node = m_node;
 
@@ -2252,7 +2252,7 @@ private:
 
     template <typename Generator, typename Func1, typename Func2,
         typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
-    void compileBinaryMathIC(BinaryArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
+    void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
     {
         Node* node = m_node;
         
@@ -2421,7 +2421,7 @@ private:
 
             CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
             BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-            BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
+            ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
             auto repatchingFunction = operationValueSubOptimize;
             auto nonRepatchingFunction = operationValueSub;
             compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -3099,7 +3099,7 @@ private:
         DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
         BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        UnaryArithProfile* arithProfile = baselineCodeBlock->unaryArithProfileForBytecodeIndex(bytecodeIndex);
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationArithNegateOptimize;
         auto nonRepatchingFunction = operationArithNegate;
         compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
index 2b4a5d6..b5efe26 100644 (file)
@@ -919,7 +919,7 @@ namespace JSC {
         // It will give the slow path the same value read by the fast path.
         GetPutInfo copiedGetPutInfo(OpPutToScope);
         template<typename BinaryOp>
-        BinaryArithProfile copiedArithProfile(BinaryOp);
+        ArithProfile copiedArithProfile(BinaryOp);
 
         Interpreter* m_interpreter;
 
@@ -939,7 +939,7 @@ namespace JSC {
         Vector<SwitchRecord> m_switches;
 
         HashMap<unsigned, unsigned> m_copiedGetPutInfos;
-        HashMap<uint64_t, BinaryArithProfile> m_copiedArithProfiles;
+        HashMap<uint64_t, ArithProfile> m_copiedArithProfiles;
 
         JumpList m_exceptionChecks;
         JumpList m_exceptionChecksWithCallFrameRollback;
index 740b2ed..627af27 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace JSC {
 
-JITMathICInlineResult JITAddGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const BinaryArithProfile* arithProfile)
+JITMathICInlineResult JITAddGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
 {
     // We default to speculating int32.
     ObservedType lhs = ObservedType().withInt32();
@@ -73,7 +73,7 @@ JITMathICInlineResult JITAddGenerator::generateInline(CCallHelpers& jit, MathICG
     return JITMathICInlineResult::GenerateFullSnippet;
 }
 
-bool JITAddGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const BinaryArithProfile* arithProfile, bool shouldEmitProfiling)
+bool JITAddGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
 {
     ASSERT(m_scratchGPR != InvalidGPRReg);
     ASSERT(m_scratchGPR != m_left.payloadGPR());
index 675cf93..4dc34a2 100644 (file)
@@ -55,8 +55,8 @@ public:
         ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
     }
 
-    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const BinaryArithProfile*);
-    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const BinaryArithProfile*, bool shouldEmitProfiling);
+    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
 
     static bool isLeftOperandValidConstant(SnippetOperand leftOperand) { return leftOperand.isPositiveConstInt32(); }
     static bool isRightOperandValidConstant(SnippetOperand rightOperand) { return rightOperand.isPositiveConstInt32(); }
index bb635f8..ac372c9 100644 (file)
@@ -453,7 +453,7 @@ void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&)
 
 void JIT::emit_op_negate(const Instruction* currentInstruction)
 {
-    UnaryArithProfile* arithProfile = &currentInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile;
+    ArithProfile* arithProfile = &currentInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile;
     JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile);
     m_instructionToMathIC.add(currentInstruction, negateIC);
     emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
@@ -634,9 +634,14 @@ void JIT::emit_op_urshift(const Instruction* currentInstruction)
     emitRightShiftFastPath(currentInstruction, op_urshift);
 }
 
+ALWAYS_INLINE static OperandTypes getOperandTypes(const ArithProfile& arithProfile)
+{
+    return OperandTypes(arithProfile.lhsResultType(), arithProfile.rhsResultType());
+}
+
 void JIT::emit_op_add(const Instruction* currentInstruction)
 {
-    BinaryArithProfile* arithProfile = &currentInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile;
+    ArithProfile* arithProfile = &currentInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile;
     JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile);
     m_instructionToMathIC.add(currentInstruction, addIC);
     emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
@@ -681,7 +686,7 @@ void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const Instruction* c
 
     bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
     if (!generatedInlineCode) {
-        UnaryArithProfile* arithProfile = mathIC->arithProfile();
+        ArithProfile* arithProfile = mathIC->arithProfile();
         if (arithProfile && shouldEmitProfiling())
             callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, arithProfile);
         else
@@ -704,6 +709,7 @@ template <typename Op, typename Generator, typename ProfiledFunction, typename N
 void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
 {
     auto bytecode = currentInstruction->as<Op>();
+    OperandTypes types = getOperandTypes(copiedArithProfile(bytecode));
     int result = bytecode.m_dst.offset();
     int op1 = bytecode.m_lhs.offset();
     int op2 = bytecode.m_rhs.offset();
@@ -722,8 +728,8 @@ void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction*
     FPRReg scratchFPR = fpRegT2;
 #endif
 
-    SnippetOperand leftOperand(bytecode.m_operandTypes.first());
-    SnippetOperand rightOperand(bytecode.m_operandTypes.second());
+    SnippetOperand leftOperand(types.first());
+    SnippetOperand rightOperand(types.second());
 
     if (isOperandConstantInt(op1))
         leftOperand.setConstInt32(getOperandConstantInt(op1));
@@ -753,7 +759,7 @@ void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction*
             emitGetVirtualRegister(op1, leftRegs);
         else if (rightOperand.isConst())
             emitGetVirtualRegister(op2, rightRegs);
-        BinaryArithProfile* arithProfile = mathIC->arithProfile();
+        ArithProfile* arithProfile = mathIC->arithProfile();
         if (arithProfile && shouldEmitProfiling())
             callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, arithProfile);
         else
@@ -793,7 +799,7 @@ void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const Instruction* c
     auto slowPathStart = label();
 #endif
 
-    UnaryArithProfile* arithProfile = mathIC->arithProfile();
+    ArithProfile* arithProfile = mathIC->arithProfile();
     if (arithProfile && shouldEmitProfiling()) {
         if (mathICGenerationState.shouldSlowPathRepatch)
             mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, TrustedImmPtr(mathIC));
@@ -825,6 +831,7 @@ void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction*
     mathICGenerationState.slowPathStart = label();
 
     auto bytecode = currentInstruction->as<Op>();
+    OperandTypes types = getOperandTypes(copiedArithProfile(bytecode));
     int result = bytecode.m_dst.offset();
     int op1 = bytecode.m_lhs.offset();
     int op2 = bytecode.m_rhs.offset();
@@ -839,8 +846,8 @@ void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction*
     JSValueRegs resultRegs = leftRegs;
 #endif
     
-    SnippetOperand leftOperand(bytecode.m_operandTypes.first());
-    SnippetOperand rightOperand(bytecode.m_operandTypes.second());
+    SnippetOperand leftOperand(types.first());
+    SnippetOperand rightOperand(types.second());
 
     if (isOperandConstantInt(op1))
         leftOperand.setConstInt32(getOperandConstantInt(op1));
@@ -858,7 +865,7 @@ void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction*
     auto slowPathStart = label();
 #endif
 
-    BinaryArithProfile* arithProfile = mathIC->arithProfile();
+    ArithProfile* arithProfile = mathIC->arithProfile();
     if (arithProfile && shouldEmitProfiling()) {
         if (mathICGenerationState.shouldSlowPathRepatch)
             mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, TrustedImmPtr(mathIC));
@@ -886,16 +893,19 @@ void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction*
 void JIT::emit_op_div(const Instruction* currentInstruction)
 {
     auto bytecode = currentInstruction->as<OpDiv>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
     int result = bytecode.m_dst.offset();
     int op1 = bytecode.m_lhs.offset();
     int op2 = bytecode.m_rhs.offset();
 
 #if USE(JSVALUE64)
+    OperandTypes types = getOperandTypes(metadata.m_arithProfile);
     JSValueRegs leftRegs = JSValueRegs(regT0);
     JSValueRegs rightRegs = JSValueRegs(regT1);
     JSValueRegs resultRegs = leftRegs;
     GPRReg scratchGPR = regT2;
 #else
+    OperandTypes types = getOperandTypes(metadata.m_arithProfile);
     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
     JSValueRegs resultRegs = leftRegs;
@@ -903,12 +913,12 @@ void JIT::emit_op_div(const Instruction* currentInstruction)
 #endif
     FPRReg scratchFPR = fpRegT2;
 
-    BinaryArithProfile* arithProfile = nullptr;
+    ArithProfile* arithProfile = nullptr;
     if (shouldEmitProfiling())
         arithProfile = &currentInstruction->as<OpDiv>().metadata(m_codeBlock).m_arithProfile;
 
-    SnippetOperand leftOperand(bytecode.m_operandTypes.first());
-    SnippetOperand rightOperand(bytecode.m_operandTypes.second());
+    SnippetOperand leftOperand(types.first());
+    SnippetOperand rightOperand(types.second());
 
     if (isOperandConstantInt(op1))
         leftOperand.setConstInt32(getOperandConstantInt(op1));
@@ -950,7 +960,7 @@ void JIT::emit_op_div(const Instruction* currentInstruction)
 
 void JIT::emit_op_mul(const Instruction* currentInstruction)
 {
-    BinaryArithProfile* arithProfile = &currentInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile;
+    ArithProfile* arithProfile = &currentInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile;
     JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile);
     m_instructionToMathIC.add(currentInstruction, mulIC);
     emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
@@ -966,7 +976,7 @@ void JIT::emitSlow_op_mul(const Instruction* currentInstruction, Vector<SlowCase
 
 void JIT::emit_op_sub(const Instruction* currentInstruction)
 {
-    BinaryArithProfile* arithProfile = &currentInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile;
+    ArithProfile* arithProfile = &currentInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile;
     JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile);
     m_instructionToMathIC.add(currentInstruction, subIC);
     emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
index b8c4798..66e1be1 100644 (file)
@@ -130,7 +130,7 @@ void JITDivGenerator::generateFastPath(CCallHelpers& jit)
     notDoubleZero.link(&jit);
 #endif
     if (m_arithProfile)
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::specialFastPathBit), CCallHelpers::AbsoluteAddress(m_arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::specialFastPathBit), CCallHelpers::AbsoluteAddress(m_arithProfile->addressOfBits()));
     jit.boxDouble(m_leftFPR, m_result);
 }
 
index 708db10..8396677 100644 (file)
@@ -37,7 +37,7 @@ public:
     JITDivGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
         JSValueRegs result, JSValueRegs left, JSValueRegs right,
         FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
-        BinaryArithProfile* arithProfile = nullptr)
+        ArithProfile* arithProfile = nullptr)
         : m_leftOperand(leftOperand)
         , m_rightOperand(rightOperand)
         , m_result(result)
@@ -71,7 +71,7 @@ private:
     GPRReg m_scratchGPR;
     FPRReg m_scratchFPR;
     bool m_didEmitFastPath { false };
-    BinaryArithProfile* m_arithProfile;
+    ArithProfile* m_arithProfile;
 
     CCallHelpers::JumpList m_endJumpList;
     CCallHelpers::JumpList m_slowPathJumpList;
index 25bcc2b..62eab9b 100644 (file)
@@ -716,13 +716,13 @@ ALWAYS_INLINE GetPutInfo JIT::copiedGetPutInfo(OpPutToScope bytecode)
 }
 
 template<typename BinaryOp>
-ALWAYS_INLINE BinaryArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
+ALWAYS_INLINE ArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
 {
     uint64_t key = (static_cast<uint64_t>(BinaryOp::opcodeID) + 1) << 32 | static_cast<uint64_t>(bytecode.m_metadataID);
     auto iterator = m_copiedArithProfiles.find(key);
     if (iterator != m_copiedArithProfiles.end())
         return iterator->value;
-    BinaryArithProfile arithProfile = bytecode.metadata(m_codeBlock).m_arithProfile;
+    ArithProfile arithProfile = bytecode.metadata(m_codeBlock).m_arithProfile;
     m_copiedArithProfiles.add(key, arithProfile);
     return arithProfile;
 }
index fdaec92..5645e42 100644 (file)
@@ -52,11 +52,11 @@ struct MathICGenerationState {
 
 #define ENABLE_MATH_IC_STATS 0
 
-template <typename GeneratorType, typename ArithProfileType>
+template <typename GeneratorType, bool(*isProfileEmpty)(ArithProfile&)>
 class JITMathIC {
     WTF_MAKE_FAST_ALLOCATED;
 public:
-    JITMathIC(ArithProfileType* arithProfile)
+    JITMathIC(ArithProfile* arithProfile)
         : m_arithProfile(arithProfile)
     {
     }
@@ -71,7 +71,7 @@ public:
         size_t startSize = jit.m_assembler.buffer().codeSize();
 
         if (m_arithProfile) {
-            if (m_arithProfile->isObservedTypeEmpty()) {
+            if (isProfileEmpty(*m_arithProfile)) {
                 // It looks like the MathIC has yet to execute. We don't want to emit code in this
                 // case for a couple reasons. First, the operation may never execute, so if we don't emit
                 // code, it's a win. Second, if the operation does execute, we can emit better code
@@ -223,7 +223,7 @@ public:
         m_slowPathStartLocation = linkBuffer.locationOf<JSInternalPtrTag>(state.slowPathStart);
     }
 
-    ArithProfileType* arithProfile() const { return m_arithProfile; }
+    ArithProfile* arithProfile() const { return m_arithProfile; }
 
 #if ENABLE(MATH_IC_STATS)
     size_t m_generatedCodeSize { 0 };
@@ -236,7 +236,7 @@ public:
     }
 #endif
 
-    ArithProfileType* m_arithProfile;
+    ArithProfile* m_arithProfile;
     MacroAssemblerCodeRef<JITStubRoutinePtrTag> m_code;
     CodeLocationLabel<JSInternalPtrTag> m_inlineStart;
     CodeLocationLabel<JSInternalPtrTag> m_inlineEnd;
@@ -246,11 +246,15 @@ public:
     GeneratorType m_generator;
 };
 
+inline bool isBinaryProfileEmpty(ArithProfile& arithProfile)
+{
+    return arithProfile.lhsObservedType().isEmpty() || arithProfile.rhsObservedType().isEmpty();
+}
 template <typename GeneratorType>
-class JITBinaryMathIC : public JITMathIC<GeneratorType, BinaryArithProfile> {
+class JITBinaryMathIC : public JITMathIC<GeneratorType, isBinaryProfileEmpty> {
 public:
-    JITBinaryMathIC(BinaryArithProfile* arithProfile)
-        : JITMathIC<GeneratorType, BinaryArithProfile>(arithProfile)
+    JITBinaryMathIC(ArithProfile* arithProfile)
+        : JITMathIC<GeneratorType, isBinaryProfileEmpty>(arithProfile)
     {
     }
 };
@@ -259,11 +263,16 @@ typedef JITBinaryMathIC<JITAddGenerator> JITAddIC;
 typedef JITBinaryMathIC<JITMulGenerator> JITMulIC;
 typedef JITBinaryMathIC<JITSubGenerator> JITSubIC;
 
+
+inline bool isUnaryProfileEmpty(ArithProfile& arithProfile)
+{
+    return arithProfile.lhsObservedType().isEmpty();
+}
 template <typename GeneratorType>
-class JITUnaryMathIC : public JITMathIC<GeneratorType, UnaryArithProfile> {
+class JITUnaryMathIC : public JITMathIC<GeneratorType, isUnaryProfileEmpty> {
 public:
-    JITUnaryMathIC(UnaryArithProfile* arithProfile)
-        : JITMathIC<GeneratorType, UnaryArithProfile>(arithProfile)
+    JITUnaryMathIC(ArithProfile* arithProfile)
+        : JITMathIC<GeneratorType, isUnaryProfileEmpty>(arithProfile)
     {
     }
 };
index cf6baa5..83d293b 100644 (file)
@@ -33,7 +33,7 @@
 
 namespace JSC {
 
-JITMathICInlineResult JITMulGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const BinaryArithProfile* arithProfile)
+JITMathICInlineResult JITMulGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
 {
     // We default to speculating int32.
     ObservedType lhs = ObservedType().withInt32();
@@ -89,7 +89,7 @@ JITMathICInlineResult JITMulGenerator::generateInline(CCallHelpers& jit, MathICG
     return JITMathICInlineResult::GenerateFullSnippet;
 }
 
-bool JITMulGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const BinaryArithProfile* arithProfile, bool shouldEmitProfiling)
+bool JITMulGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
 {
     ASSERT(m_scratchGPR != InvalidGPRReg);
     ASSERT(m_scratchGPR != m_left.payloadGPR());
@@ -205,18 +205,18 @@ bool JITMulGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList
 
         CCallHelpers::Jump notNegativeZero = jit.branch64(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm64(negativeZeroBits));
 
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
         CCallHelpers::Jump done = jit.jump();
 
         notNegativeZero.link(&jit);
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
 
         jit.move(m_result.payloadGPR(), m_scratchGPR);
         jit.urshiftPtr(CCallHelpers::Imm32(52), m_scratchGPR);
         jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
         CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
 
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
         noInt52Overflow.link(&jit);
 
         done.link(&jit);
@@ -227,18 +227,18 @@ bool JITMulGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList
         notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm32(0)));
         notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.tagGPR(), CCallHelpers::TrustedImm32(negativeZeroBits >> 32)));
 
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
         CCallHelpers::Jump done = jit.jump();
 
         notNegativeZero.link(&jit);
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
 
         jit.move(m_result.tagGPR(), m_scratchGPR);
         jit.urshiftPtr(CCallHelpers::Imm32(52 - 32), m_scratchGPR);
         jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
         CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
         
-        jit.or32(CCallHelpers::TrustedImm32(BinaryArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
 
         endJumpList.append(noInt52Overflow);
         if (m_scratchGPR == m_result.tagGPR() || m_scratchGPR == m_result.payloadGPR())
index 6087796..36d26ea 100644 (file)
@@ -56,8 +56,8 @@ public:
         ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
     }
 
-    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const BinaryArithProfile*);
-    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowJumpList, const BinaryArithProfile*, bool shouldEmitProfiling);
+    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowJumpList, const ArithProfile*, bool shouldEmitProfiling);
 
     static bool isLeftOperandValidConstant(SnippetOperand leftOperand) { return leftOperand.isPositiveConstInt32(); }
     static bool isRightOperandValidConstant(SnippetOperand rightOperand) { return rightOperand.isPositiveConstInt32(); }
index ceebe39..92c29dd 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace JSC {
 
-JITMathICInlineResult JITNegGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const UnaryArithProfile* arithProfile)
+JITMathICInlineResult JITNegGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
 {
     ASSERT(m_scratchGPR != InvalidGPRReg);
     ASSERT(m_scratchGPR != m_src.payloadGPR());
@@ -45,7 +45,7 @@ JITMathICInlineResult JITNegGenerator::generateInline(CCallHelpers& jit, MathICG
     // We default to speculating int32.
     ObservedType observedTypes = ObservedType().withInt32();
     if (arithProfile)
-        observedTypes = arithProfile->argObservedType();
+        observedTypes = arithProfile->lhsObservedType();
     ASSERT_WITH_MESSAGE(!observedTypes.isEmpty(), "We should not attempt to generate anything if we do not have a profile.");
 
     if (observedTypes.isOnlyNonNumber())
@@ -82,7 +82,7 @@ JITMathICInlineResult JITNegGenerator::generateInline(CCallHelpers& jit, MathICG
     return JITMathICInlineResult::GenerateFullSnippet;
 }
 
-bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const UnaryArithProfile* arithProfile, bool shouldEmitProfiling)
+bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
 {
     ASSERT(m_scratchGPR != m_src.payloadGPR());
     ASSERT(m_scratchGPR != m_result.payloadGPR());
@@ -117,7 +117,7 @@ bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList
 #endif
     // The flags of ArithNegate are basic in DFG.
     // We only need to know if we ever produced a number.
-    if (shouldEmitProfiling && arithProfile && !arithProfile->argObservedType().sawNumber() && !arithProfile->didObserveDouble())
+    if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
         arithProfile->emitSetDouble(jit);
     return true;
 }
index 787b707..8a0c2d5 100644 (file)
@@ -43,8 +43,8 @@ public:
         , m_scratchGPR(scratchGPR)
     { }
 
-    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const UnaryArithProfile*);
-    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const UnaryArithProfile*, bool shouldEmitProfiling);
+    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
 
 private:
     JSValueRegs m_result;
index 3a9e85a..47deb8c 100644 (file)
@@ -2645,7 +2645,7 @@ void JIT_OPERATION operationExceptionFuzz(JSGlobalObject* globalObject)
 #endif // COMPILER(GCC_COMPATIBLE)
 }
 
-ALWAYS_INLINE static JSValue profiledAdd(JSGlobalObject* globalObject, JSValue op1, JSValue op2, BinaryArithProfile& arithProfile)
+ALWAYS_INLINE static JSValue profiledAdd(JSGlobalObject* globalObject, JSValue op1, JSValue op2, ArithProfile& arithProfile)
 {
     arithProfile.observeLHSAndRHS(op1, op2);
     JSValue result = jsAdd(globalObject, op1, op2);
@@ -2661,7 +2661,7 @@ EncodedJSValue JIT_OPERATION operationValueAdd(JSGlobalObject* globalObject, Enc
     return JSValue::encode(jsAdd(globalObject, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2)));
 }
 
-EncodedJSValue JIT_OPERATION operationValueAddProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile* arithProfile)
+EncodedJSValue JIT_OPERATION operationValueAddProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
 {
     ASSERT(arithProfile);
     VM& vm = globalObject->vm();
@@ -2679,7 +2679,7 @@ EncodedJSValue JIT_OPERATION operationValueAddProfiledOptimize(JSGlobalObject* g
     JSValue op1 = JSValue::decode(encodedOp1);
     JSValue op2 = JSValue::decode(encodedOp2);
 
-    BinaryArithProfile* arithProfile = addIC->arithProfile();
+    ArithProfile* arithProfile = addIC->arithProfile();
     ASSERT(arithProfile);
     arithProfile->observeLHSAndRHS(op1, op2);
     auto nonOptimizeVariant = operationValueAddProfiledNoOptimize;
@@ -2701,7 +2701,7 @@ EncodedJSValue JIT_OPERATION operationValueAddProfiledNoOptimize(JSGlobalObject*
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
-    BinaryArithProfile* arithProfile = addIC->arithProfile();
+    ArithProfile* arithProfile = addIC->arithProfile();
     ASSERT(arithProfile);
     return JSValue::encode(profiledAdd(globalObject, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2), *arithProfile));
 }
@@ -2716,7 +2716,7 @@ EncodedJSValue JIT_OPERATION operationValueAddOptimize(JSGlobalObject* globalObj
     JSValue op2 = JSValue::decode(encodedOp2);
 
     auto nonOptimizeVariant = operationValueAddNoOptimize;
-    if (BinaryArithProfile* arithProfile = addIC->arithProfile())
+    if (ArithProfile* arithProfile = addIC->arithProfile())
         arithProfile->observeLHSAndRHS(op1, op2);
     addIC->generateOutOfLine(callFrame->codeBlock(), nonOptimizeVariant);
 
@@ -2749,7 +2749,7 @@ ALWAYS_INLINE static EncodedJSValue unprofiledMul(JSGlobalObject* globalObject,
     return JSValue::encode(jsMul(globalObject, op1, op2));
 }
 
-ALWAYS_INLINE static EncodedJSValue profiledMul(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
+ALWAYS_INLINE static EncodedJSValue profiledMul(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
 {
     VM& vm = globalObject->vm();
     auto scope = DECLARE_THROW_SCOPE(vm);
@@ -2790,7 +2790,7 @@ EncodedJSValue JIT_OPERATION operationValueMulOptimize(JSGlobalObject* globalObj
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
     auto nonOptimizeVariant = operationValueMulNoOptimize;
-    if (BinaryArithProfile* arithProfile = mulIC->arithProfile())
+    if (ArithProfile* arithProfile = mulIC->arithProfile())
         arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
     mulIC->generateOutOfLine(callFrame->codeBlock(), nonOptimizeVariant);
 
@@ -2801,7 +2801,7 @@ EncodedJSValue JIT_OPERATION operationValueMulOptimize(JSGlobalObject* globalObj
     return unprofiledMul(globalObject, encodedOp1, encodedOp2);
 }
 
-EncodedJSValue JIT_OPERATION operationValueMulProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile* arithProfile)
+EncodedJSValue JIT_OPERATION operationValueMulProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
 {
     VM& vm = globalObject->vm();
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
@@ -2817,7 +2817,7 @@ EncodedJSValue JIT_OPERATION operationValueMulProfiledOptimize(JSGlobalObject* g
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
-    BinaryArithProfile* arithProfile = mulIC->arithProfile();
+    ArithProfile* arithProfile = mulIC->arithProfile();
     ASSERT(arithProfile);
     arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
     auto nonOptimizeVariant = operationValueMulProfiledNoOptimize;
@@ -2836,7 +2836,7 @@ EncodedJSValue JIT_OPERATION operationValueMulProfiledNoOptimize(JSGlobalObject*
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
-    BinaryArithProfile* arithProfile = mulIC->arithProfile();
+    ArithProfile* arithProfile = mulIC->arithProfile();
     ASSERT(arithProfile);
     return profiledMul(globalObject, encodedOp1, encodedOp2, *arithProfile);
 }
@@ -2862,7 +2862,7 @@ EncodedJSValue JIT_OPERATION operationArithNegate(JSGlobalObject* globalObject,
 
 }
 
-EncodedJSValue JIT_OPERATION operationArithNegateProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOperand, UnaryArithProfile* arithProfile)
+EncodedJSValue JIT_OPERATION operationArithNegateProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOperand, ArithProfile* arithProfile)
 {
     ASSERT(arithProfile);
     VM& vm = globalObject->vm();
@@ -2871,7 +2871,7 @@ EncodedJSValue JIT_OPERATION operationArithNegateProfiled(JSGlobalObject* global
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
     JSValue operand = JSValue::decode(encodedOperand);
-    arithProfile->observeArg(operand);
+    arithProfile->observeLHS(operand);
 
     JSValue primValue = operand.toPrimitive(globalObject);
     RETURN_IF_EXCEPTION(scope, encodedJSValue());
@@ -2899,9 +2899,9 @@ EncodedJSValue JIT_OPERATION operationArithNegateProfiledOptimize(JSGlobalObject
     
     JSValue operand = JSValue::decode(encodedOperand);
 
-    UnaryArithProfile* arithProfile = negIC->arithProfile();
+    ArithProfile* arithProfile = negIC->arithProfile();
     ASSERT(arithProfile);
-    arithProfile->observeArg(operand);
+    arithProfile->observeLHS(operand);
     negIC->generateOutOfLine(callFrame->codeBlock(), operationArithNegateProfiled);
 
 #if ENABLE(MATH_IC_STATS)
@@ -2933,8 +2933,8 @@ EncodedJSValue JIT_OPERATION operationArithNegateOptimize(JSGlobalObject* global
 
     JSValue operand = JSValue::decode(encodedOperand);
 
-    if (UnaryArithProfile* arithProfile = negIC->arithProfile())
-        arithProfile->observeArg(operand);
+    if (ArithProfile* arithProfile = negIC->arithProfile())
+        arithProfile->observeLHS(operand);
     negIC->generateOutOfLine(callFrame->codeBlock(), operationArithNegate);
 
 #if ENABLE(MATH_IC_STATS)
@@ -2960,7 +2960,7 @@ ALWAYS_INLINE static EncodedJSValue unprofiledSub(JSGlobalObject* globalObject,
     return JSValue::encode(jsSub(globalObject, op1, op2));
 }
 
-ALWAYS_INLINE static EncodedJSValue profiledSub(VM& vm, JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
+ALWAYS_INLINE static EncodedJSValue profiledSub(VM& vm, JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
 {
     auto scope = DECLARE_THROW_SCOPE(vm);
 
@@ -2984,7 +2984,7 @@ EncodedJSValue JIT_OPERATION operationValueSub(JSGlobalObject* globalObject, Enc
     return unprofiledSub(globalObject, encodedOp1, encodedOp2);
 }
 
-EncodedJSValue JIT_OPERATION operationValueSubProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile* arithProfile)
+EncodedJSValue JIT_OPERATION operationValueSubProfiled(JSGlobalObject* globalObject, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
 {
     ASSERT(arithProfile);
 
@@ -3002,7 +3002,7 @@ EncodedJSValue JIT_OPERATION operationValueSubOptimize(JSGlobalObject* globalObj
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
     auto nonOptimizeVariant = operationValueSubNoOptimize;
-    if (BinaryArithProfile* arithProfile = subIC->arithProfile())
+    if (ArithProfile* arithProfile = subIC->arithProfile())
         arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
     subIC->generateOutOfLine(callFrame->codeBlock(), nonOptimizeVariant);
 
@@ -3028,7 +3028,7 @@ EncodedJSValue JIT_OPERATION operationValueSubProfiledOptimize(JSGlobalObject* g
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
-    BinaryArithProfile* arithProfile = subIC->arithProfile();
+    ArithProfile* arithProfile = subIC->arithProfile();
     ASSERT(arithProfile);
     arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
     auto nonOptimizeVariant = operationValueSubProfiledNoOptimize;
@@ -3047,7 +3047,7 @@ EncodedJSValue JIT_OPERATION operationValueSubProfiledNoOptimize(JSGlobalObject*
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
 
-    BinaryArithProfile* arithProfile = subIC->arithProfile();
+    ArithProfile* arithProfile = subIC->arithProfile();
     ASSERT(arithProfile);
     return profiledSub(vm, globalObject, encodedOp1, encodedOp2, *arithProfile);
 }
index 595aec4..37e8361 100644 (file)
@@ -38,8 +38,6 @@ typedef int64_t EncodedJSValue;
     
 class ArrayAllocationProfile;
 class ArrayProfile;
-class UnaryArithProfile;
-class BinaryArithProfile;
 class Butterfly;
 class CallFrame;
 class CallLinkInfo;
@@ -66,6 +64,7 @@ class WatchpointSet;
 struct ByValInfo;
 struct InlineCallFrame;
 struct Instruction;
+struct ArithProfile;
 
 extern "C" {
 
@@ -78,7 +77,7 @@ typedef char* UnusedPtr;
     A: JSArray*
     Aap: ArrayAllocationProfile*
     Ap: ArrayProfile*
-    Arp: BinaryArithProfile*
+    Arp: ArithProfile*
     B: Butterfly*
     By: ByValInfo*
     C: JSCell*
@@ -283,7 +282,7 @@ int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(VM
 int32_t JIT_OPERATION operationInstanceOfCustom(JSGlobalObject*, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance) WTF_INTERNAL;
 
 EncodedJSValue JIT_OPERATION operationValueAdd(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationValueAddProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueAddProfiledOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueAddProfiledNoOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueAddOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
@@ -293,13 +292,13 @@ EncodedJSValue JIT_OPERATION operationValueMulOptimize(JSGlobalObject*, EncodedJ
 EncodedJSValue JIT_OPERATION operationValueMulNoOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueMulProfiledOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueMulProfiledNoOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationValueMulProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationArithNegate(JSGlobalObject*, EncodedJSValue operand);
-EncodedJSValue JIT_OPERATION operationArithNegateProfiled(JSGlobalObject*, EncodedJSValue operand, UnaryArithProfile*);
+EncodedJSValue JIT_OPERATION operationArithNegateProfiled(JSGlobalObject*, EncodedJSValue operand, ArithProfile*);
 EncodedJSValue JIT_OPERATION operationArithNegateProfiledOptimize(JSGlobalObject*, EncodedJSValue encodedOperand, JITNegIC*);
 EncodedJSValue JIT_OPERATION operationArithNegateOptimize(JSGlobalObject*, EncodedJSValue encodedOperand, JITNegIC*);
 EncodedJSValue JIT_OPERATION operationValueSub(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationValueSubProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, BinaryArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubProfiled(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueSubOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueSubNoOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationValueSubProfiledOptimize(JSGlobalObject*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
index 7b1a8b8..795c275 100644 (file)
@@ -33,7 +33,7 @@
 
 namespace JSC {
 
-JITMathICInlineResult JITSubGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const BinaryArithProfile* arithProfile)
+JITMathICInlineResult JITSubGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
 {
     // We default to speculating int32.
     ObservedType lhs = ObservedType().withInt32();
@@ -78,7 +78,7 @@ JITMathICInlineResult JITSubGenerator::generateInline(CCallHelpers& jit, MathICG
     return JITMathICInlineResult::GenerateFullSnippet;
 }
 
-bool JITSubGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const BinaryArithProfile* arithProfile, bool shouldEmitProfiling)
+bool JITSubGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
 {
     ASSERT(m_scratchGPR != InvalidGPRReg);
     ASSERT(m_scratchGPR != m_left.payloadGPR());
index 3491a83..561f633 100644 (file)
@@ -53,8 +53,8 @@ public:
         , m_scratchFPR(scratchFPR)
     { }
 
-    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const BinaryArithProfile*);
-    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const BinaryArithProfile*, bool shouldEmitProfiling);
+    JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+    bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
 
     static bool isLeftOperandValidConstant(SnippetOperand) { return false; }
     static bool isRightOperandValidConstant(SnippetOperand) { return false; }
index 2fde107..51ed653 100644 (file)
@@ -147,49 +147,36 @@ void Data::performAssertions(VM& vm)
 #endif
 
     {
-        UnaryArithProfile arithProfile;
-        arithProfile.argSawInt32();
-        ASSERT(arithProfile.bits() == UnaryArithProfile::observedIntBits());
-        ASSERT(arithProfile.argObservedType().isOnlyInt32());
-    }
-    {
-        UnaryArithProfile arithProfile;
-        arithProfile.argSawNumber();
-        ASSERT(arithProfile.bits() == UnaryArithProfile::observedNumberBits());
-        ASSERT(arithProfile.argObservedType().isOnlyNumber());
-    }
-
-    {
-        BinaryArithProfile arithProfile;
+        ArithProfile arithProfile;
         arithProfile.lhsSawInt32();
         arithProfile.rhsSawInt32();
-        ASSERT(arithProfile.bits() == BinaryArithProfile::observedIntIntBits());
-        ASSERT(arithProfile.lhsObservedType().isOnlyInt32());
-        ASSERT(arithProfile.rhsObservedType().isOnlyInt32());
+        ASSERT(arithProfile.bits() == ArithProfile::observedBinaryIntInt().bits());
+        STATIC_ASSERT(ArithProfile::observedBinaryIntInt().lhsObservedType().isOnlyInt32());
+        STATIC_ASSERT(ArithProfile::observedBinaryIntInt().rhsObservedType().isOnlyInt32());
     }
     {
-        BinaryArithProfile arithProfile;
+        ArithProfile arithProfile;
         arithProfile.lhsSawNumber();
         arithProfile.rhsSawInt32();
-        ASSERT(arithProfile.bits() == BinaryArithProfile::observedNumberIntBits());
-        ASSERT(arithProfile.lhsObservedType().isOnlyNumber());
-        ASSERT(arithProfile.rhsObservedType().isOnlyInt32());
+        ASSERT(arithProfile.bits() == ArithProfile::observedBinaryNumberInt().bits());
+        STATIC_ASSERT(ArithProfile::observedBinaryNumberInt().lhsObservedType().isOnlyNumber());
+        STATIC_ASSERT(ArithProfile::observedBinaryNumberInt().rhsObservedType().isOnlyInt32());
     }
     {
-        BinaryArithProfile arithProfile;
+        ArithProfile arithProfile;
         arithProfile.lhsSawNumber();
         arithProfile.rhsSawNumber();
-        ASSERT(arithProfile.bits() == BinaryArithProfile::observedNumberNumberBits());
-        ASSERT(arithProfile.lhsObservedType().isOnlyNumber());
-        ASSERT(arithProfile.rhsObservedType().isOnlyNumber());
+        ASSERT(arithProfile.bits() == ArithProfile::observedBinaryNumberNumber().bits());
+        STATIC_ASSERT(ArithProfile::observedBinaryNumberNumber().lhsObservedType().isOnlyNumber());
+        STATIC_ASSERT(ArithProfile::observedBinaryNumberNumber().rhsObservedType().isOnlyNumber());
     }
     {
-        BinaryArithProfile arithProfile;
+        ArithProfile arithProfile;
         arithProfile.lhsSawInt32();
         arithProfile.rhsSawNumber();
-        ASSERT(arithProfile.bits() == BinaryArithProfile::observedIntNumberBits());
-        ASSERT(arithProfile.lhsObservedType().isOnlyInt32());
-        ASSERT(arithProfile.rhsObservedType().isOnlyNumber());
+        ASSERT(arithProfile.bits() == ArithProfile::observedBinaryIntNumber().bits());
+        STATIC_ASSERT(ArithProfile::observedBinaryIntNumber().lhsObservedType().isOnlyInt32());
+        STATIC_ASSERT(ArithProfile::observedBinaryIntNumber().rhsObservedType().isOnlyNumber());
     }
 }
 IGNORE_WARNINGS_END
index 7125bd4..78add02 100644 (file)
@@ -91,7 +91,7 @@ public:
 
 const int64_t* LLIntOffsetsExtractor::dummy()
 {
-// This is a file generated by offlineasm/generate_offset_extractor.rb, and contains code
+// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
 // to create a table of offsets, sizes, and a header identifying what combination of
 // Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
 // because the fields whose offsets we're extracting are mostly private. So we make their
index 9d11be2..9ce34c8 100644 (file)
@@ -241,15 +241,13 @@ const IsInvalidated = constexpr IsInvalidated
 # ShadowChicken data
 const ShadowChickenTailMarker = constexpr ShadowChicken::Packet::tailMarkerValue
 
-# UnaryArithProfile data
-const ArithProfileInt = constexpr (UnaryArithProfile::observedIntBits())
-const ArithProfileNumber = constexpr (UnaryArithProfile::observedNumberBits())
-
-# BinaryArithProfile data
-const ArithProfileIntInt = constexpr (BinaryArithProfile::observedIntIntBits())
-const ArithProfileNumberInt = constexpr (BinaryArithProfile::observedNumberIntBits())
-const ArithProfileIntNumber = constexpr (BinaryArithProfile::observedIntNumberBits())
-const ArithProfileNumberNumber = constexpr (BinaryArithProfile::observedNumberNumberBits())
+# ArithProfile data
+const ArithProfileInt = constexpr (ArithProfile::observedUnaryInt().bits())
+const ArithProfileNumber = constexpr (ArithProfile::observedUnaryNumber().bits())
+const ArithProfileIntInt = constexpr (ArithProfile::observedBinaryIntInt().bits())
+const ArithProfileNumberInt = constexpr (ArithProfile::observedBinaryNumberInt().bits())
+const ArithProfileIntNumber = constexpr (ArithProfile::observedBinaryIntNumber().bits())
+const ArithProfileNumberNumber = constexpr (ArithProfile::observedBinaryNumberNumber().bits())
 
 # Pointer Tags
 const BytecodePtrTag = constexpr BytecodePtrTag
index c880cf9..d15d967 100644 (file)
@@ -1000,8 +1000,8 @@ end)
 
 llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, return)
 
-    macro updateArithProfile(type)
-        orh type, OpNegate::Metadata::m_arithProfile + UnaryArithProfile::m_bits[t5]
+    macro arithProfile(type)
+        ori type, OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t5]
     end
 
     metadata(t5, t0)
@@ -1010,12 +1010,12 @@ llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, r
     bineq t1, Int32Tag, .opNegateSrcNotInt
     btiz t2, 0x7fffffff, .opNegateSlow
     negi t2
-    updateArithProfile(ArithProfileInt)
+    arithProfile(ArithProfileInt)
     return (Int32Tag, t2)
 .opNegateSrcNotInt:
     bia t1, LowestTag, .opNegateSlow
     xori 0x80000000, t1
-    updateArithProfile(ArithProfileNumber)
+    arithProfile(ArithProfileNumber)
     return(t1, t2)
 
 .opNegateSlow:
@@ -1027,7 +1027,7 @@ end)
 macro binaryOpCustomStore(opcodeName, opcodeStruct, integerOperationAndStore, doubleOperation)
     llintOpWithMetadata(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, metadata, return)
         macro arithProfile(type)
-            orh type, %opcodeStruct%::Metadata::m_arithProfile + BinaryArithProfile::m_bits[t5]
+            ori type, %opcodeStruct%::Metadata::m_arithProfile + ArithProfile::m_bits[t5]
         end
 
         metadata(t5, t2)
index d949101..95c0f68 100644 (file)
@@ -981,24 +981,22 @@ end)
 
 
 llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, return)
-
-    macro updateArithProfile(type)
-        orh type, OpNegate::Metadata::m_arithProfile + UnaryArithProfile::m_bits[t1]
-    end
-
     get(m_operand, t0)
     loadConstantOrVariable(size, t0, t3)
     metadata(t1, t2)
+    loadi OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1], t2
     bqb t3, numberTag, .opNegateNotInt
     btiz t3, 0x7fffffff, .opNegateSlow
     negi t3
     orq numberTag, t3
-    updateArithProfile(ArithProfileInt)
+    ori ArithProfileInt, t2
+    storei t2, OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1]
     return(t3)
 .opNegateNotInt:
     btqz t3, numberTag, .opNegateSlow
     xorq 0x8000000000000000, t3
-    updateArithProfile(ArithProfileNumber)
+    ori ArithProfileNumber, t2
+    storei t2, OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1]
     return(t3)
 
 .opNegateSlow:
@@ -1012,7 +1010,7 @@ macro binaryOpCustomStore(opcodeName, opcodeStruct, integerOperationAndStore, do
         metadata(t5, t0)
 
         macro profile(type)
-            orh type, %opcodeStruct%::Metadata::m_arithProfile + UnaryArithProfile::m_bits[t5]
+            ori type, %opcodeStruct%::Metadata::m_arithProfile + ArithProfile::m_bits[t5]
         end
 
         get(m_rhs, t0)
index b222567..b23f65b 100644 (file)
@@ -299,7 +299,7 @@ class Sequence
             end
         }
         result = riscLowerMalformedAddressesDouble(result)
-        result = riscLowerMisplacedImmediates(result, ["storeb", "storeh", "storei", "storep", "storeq"])
+        result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
         result = riscLowerMalformedImmediates(result, 0..0xff, 0..0x0ff)
         result = riscLowerMisplacedAddresses(result)
         result = riscLowerRegisterReuse(result)
@@ -418,7 +418,7 @@ class Instruction
             end
         when "andi", "andp"
             emitArmCompact("ands", "and", operands)
-        when "ori", "orp", "orh"
+        when "ori", "orp"
             emitArmCompact("orrs", "orr", operands)
         when "oris"
             emitArmCompact("orrs", "orrs", operands)
index fcd276f..0966ec3 100644 (file)
@@ -393,7 +393,7 @@ class Sequence
             case node.opcode
             when "loadb", "loadbsi", "loadbsq", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
                 size = 1
-            when "loadh", "loadhsi", "loadhsq", "orh", "storeh"
+            when "loadh", "loadhsi", "loadhsq", "storeh"
                 size = 2
             when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
                 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
@@ -418,7 +418,7 @@ class Sequence
             end
         }
 
-        result = riscLowerMisplacedImmediates(result, ["storeb", "storeh", "storei", "storep", "storeq"])
+        result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
 
         # The rules for which immediates are valid for and/or/xor instructions are fairly involved, see https://dinfuehr.github.io/blog/encoding-of-immediate-values-on-aarch64/
         validLogicalImmediates = []
@@ -711,8 +711,6 @@ class Instruction
             emitARM64TAC("orr", operands, :ptr)
         when "orq"
             emitARM64TAC("orr", operands, :quad)
-        when "orh"
-            emitARM64TAC("orr", operands, :word) # not :half because 16-bit registers don't exist on ARM.
         when "xori"
             emitARM64TAC("eor", operands, :word)
         when "xorp"
index 372e6f1..f33f706 100644 (file)
@@ -160,7 +160,6 @@ class Immediate
 
         case type
         when :int8;    "int8_t(#{valueStr})"
-        when :int16;   "int16_t(#{valueStr})"
         when :int32;   "int32_t(#{valueStr})"
         when :int64;   "int64_t(#{valueStr})"
         when :intptr;  "intptr_t(#{valueStr})"
@@ -184,7 +183,6 @@ class Address
     def clValue(type=:intptr)
         case type
         when :int8;         int8MemRef
-        when :int16;        int16MemRef
         when :int32;        int32MemRef
         when :int64;        int64MemRef
         when :intptr;       intptrMemRef
@@ -388,7 +386,7 @@ end
 
 def cloopEmitOperation(operands, type, operator)
     raise unless type == :intptr || type == :uintptr || type == :int32 || type == :uint32 || \
-        type == :int64 || type == :uint64 || type == :double || type == :int16
+        type == :int64 || type == :uint64 || type == :double
     if operands.size == 3
         op1 = operands[0]
         op2 = operands[1]
@@ -403,9 +401,6 @@ def cloopEmitOperation(operands, type, operator)
     if dst.is_a? RegisterID and (type == :int32 or type == :uint32)
         truncationHeader = "(uint32_t)("
         truncationFooter = ")"
-    elsif dst.is_a? RegisterID and (type == :int16)
-        truncationHeader = "(uint16_t)("
-        truncationFooter = ")"
     else
         truncationHeader = ""
         truncationFooter = ""
@@ -590,8 +585,6 @@ class Instruction
             cloopEmitOperation(operands, :int64, "|")
         when "orp"
             cloopEmitOperation(operands, :intptr, "|")
-        when "orh"
-            cloopEmitOperation(operands, :int16, "|")
 
         when "xori"
             cloopEmitOperation(operands, :int32, "^")
index a41e06f..7a212b2 100644 (file)
@@ -46,7 +46,6 @@ MACRO_INSTRUCTIONS =
      "ori",
      "orf",
      "ord",
-     "orh",
      "rshifti",
      "urshifti",
      "rshiftp",
index 5a532d8..b5f07b1 100644 (file)
@@ -527,7 +527,7 @@ def mipsLowerMisplacedImmediates(list)
                 end
             when /^(addi|subi)/
                 newList << node.riscLowerMalformedImmediatesRecurse(newList, -0x7fff..0x7fff)
-            when "andi", "andp", "ori", "orp", "orh", "xori", "xorp"
+            when "andi", "andp", "ori", "orp", "xori", "xorp"
                 newList << node.riscLowerMalformedImmediatesRecurse(newList, 0..0xffff)
             else
                 newList << node
@@ -860,7 +860,7 @@ class Instruction
             end
         when "andi", "andp"
             emitMIPSCompact("and", "and", operands)
-        when "ori", "orp", "orh"
+        when "ori", "orp"
             emitMIPSCompact("or", "orr", operands)
         when "oris"
             emitMIPSCompact("or", "orrs", operands)
index 1e6704f..75d7a98 100644 (file)
@@ -462,7 +462,7 @@ def riscLowerMisplacedAddresses(list)
             postInstructions = []
             annotation = node.annotation
             case node.opcode
-            when "addi", "addis", "andi", "lshifti", "muli", "negi", "noti", "ori", "orh", "oris",
+            when "addi", "addis", "andi", "lshifti", "muli", "negi", "noti", "ori", "oris",
                 "rshifti", "urshifti", "subi", "subis", "xori", /^bi/, /^bti/, /^ci/, /^ti/
                 newList << Instruction.new(node.codeOrigin,
                                            node.opcode,
index 6dca047..112eed7 100644 (file)
@@ -1123,8 +1123,6 @@ class Instruction
             handleX86Op("orps", :float)
         when "ord"
             handleX86Op("orpd", :double)
-        when "orh"
-            handleX86Op("or#{x86Suffix(:half)}", :half)
         when "rshifti"
             handleX86Shift("sar#{x86Suffix(:int)}", :int)
         when "rshiftp"
index 5622f28..9b3d0d7 100644 (file)
@@ -48,10 +48,6 @@ namespace JSC {
         static constexpr int numBitsNeeded = 7;
         static_assert((TypeBits & ((1 << numBitsNeeded) - 1)) == TypeBits, "This is necessary for correctness.");
 
-        constexpr explicit ResultType()
-            : ResultType(unknownType())
-        {
-        }
         constexpr explicit ResultType(Type type)
             : m_bits(type)
         {
index da9fbca..15f1fd8 100644 (file)
@@ -477,8 +477,8 @@ SLOW_PATH_DECL(slow_path_to_string)
 #if ENABLE(JIT)
 static void updateArithProfileForUnaryArithOp(OpNegate::Metadata& metadata, JSValue result, JSValue operand)
 {
-    UnaryArithProfile& profile = metadata.m_arithProfile;
-    profile.observeArg(operand);
+    ArithProfile& profile = metadata.m_arithProfile;
+    profile.observeLHS(operand);
     ASSERT(result.isNumber() || result.isBigInt());
     if (result.isNumber()) {
         if (!result.isInt32()) {
@@ -535,7 +535,7 @@ SLOW_PATH_DECL(slow_path_negate)
 #if ENABLE(DFG_JIT)
 static void updateArithProfileForBinaryArithOp(JSGlobalObject*, CodeBlock* codeBlock, const Instruction* pc, JSValue result, JSValue left, JSValue right)
 {
-    BinaryArithProfile& profile = *codeBlock->binaryArithProfileForPC(pc);
+    ArithProfile& profile = *codeBlock->arithProfileForPC(pc);
 
     if (result.isNumber()) {
         if (!result.isInt32()) {
@@ -596,7 +596,7 @@ SLOW_PATH_DECL(slow_path_add)
     JSValue v1 = GET_C(bytecode.m_lhs).jsValue();
     JSValue v2 = GET_C(bytecode.m_rhs).jsValue();
 
-    BinaryArithProfile& arithProfile = *codeBlock->binaryArithProfileForPC(pc);
+    ArithProfile& arithProfile = *codeBlock->arithProfileForPC(pc);
     arithProfile.observeLHSAndRHS(v1, v2);
 
     JSValue result = jsAdd(globalObject, v1, v2);