From a4c898240fccdba16a6cbd04624831693be8a05d Mon Sep 17 00:00:00 2001
From: "fpizlo@apple.com"
Date: Mon, 2 Feb 2015 20:52:01 +0000
Subject: [PATCH] BinarySwitch should be faster on average
https://bugs.webkit.org/show_bug.cgi?id=141046
Reviewed by Anders Carlsson.
This optimizes our binary switch using math. It's strictly better than what we had before
assuming we bottom out in some case (rather than fall through), assuming all cases get
hit with equal probability. The difference is particularly large for large switch
statements. For example, a switch statement with 1000 cases would previously require on
average 13.207 branches to get to some case, while now it just requires 10.464.
This is also a progression for the fallthrough case, though we could shave off another
1/6 branch on average if we wanted to  though it would regress taking a case (not falling
through) by 1/6 branch. I believe it's better to bias the BinarySwitch for not falling
through.
This also adds some randomness to the algorithm to minimize the likelihood of us
generating a switch statement that is always particularly bad for some input. Note that
the randomness has no effect on averagecase performance assuming all cases are equally
likely.
This ought to have no actual performance change because we don't rely on binary switches
that much. The main reason why this change is interesting is that I'm finding myself
increasingly relying on BinarySwitch, and I'd like to know that it's optimal.
* jit/BinarySwitch.cpp:
(JSC::BinarySwitch::BinarySwitch):
(JSC::BinarySwitch::~BinarySwitch):
(JSC::BinarySwitch::build):
* jit/BinarySwitch.h:
gitsvnid: http://svn.webkit.org/repository/webkit/trunk@179490 268f45cccd090410ab3cd52691b4dbfc

Makefile.shared  2 +
Source/JavaScriptCore/ChangeLog  33 +++
Source/JavaScriptCore/jit/BinarySwitch.cpp  246 ++++++++++++++++
Source/JavaScriptCore/jit/BinarySwitch.h  8 +
.../WebKit2/WebProcess/com.apple.WebProcess.sb.in  2 +
5 files changed, 228 insertions(+), 63 deletions()
diff git a/Makefile.shared b/Makefile.shared
index 61c066d..22faa94 100644
 a/Makefile.shared
+++ b/Makefile.shared
@@ 12,6 +12,8 @@ ifneq (,$(ARCHS))
XCODE_OPTIONS += ONLY_ACTIVE_ARCH=NO
endif
+XCODE_OPTIONS += TOOLCHAINS=com.apple.dt.toolchain.OSX10_11
+
DEFAULT_VERBOSITY := $(shell defaults read org.webkit.BuildConfiguration BuildTranscriptVerbosity 2>/dev/null  echo "default")
VERBOSITY ?= $(DEFAULT_VERBOSITY)
diff git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index a200b53..0946670 100644
 a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ 1,3 +1,36 @@
+20150131 Filip Pizlo
+
+ BinarySwitch should be faster on average
+ https://bugs.webkit.org/show_bug.cgi?id=141046
+
+ Reviewed by Anders Carlsson.
+
+ This optimizes our binary switch using math. It's strictly better than what we had before
+ assuming we bottom out in some case (rather than fall through), assuming all cases get
+ hit with equal probability. The difference is particularly large for large switch
+ statements. For example, a switch statement with 1000 cases would previously require on
+ average 13.207 branches to get to some case, while now it just requires 10.464.
+
+ This is also a progression for the fallthrough case, though we could shave off another
+ 1/6 branch on average if we wanted to  though it would regress taking a case (not falling
+ through) by 1/6 branch. I believe it's better to bias the BinarySwitch for not falling
+ through.
+
+ This also adds some randomness to the algorithm to minimize the likelihood of us
+ generating a switch statement that is always particularly bad for some input. Note that
+ the randomness has no effect on averagecase performance assuming all cases are equally
+ likely.
+
+ This ought to have no actual performance change because we don't rely on binary switches
+ that much. The main reason why this change is interesting is that I'm finding myself
+ increasingly relying on BinarySwitch, and I'd like to know that it's optimal.
+
+ * jit/BinarySwitch.cpp:
+ (JSC::BinarySwitch::BinarySwitch):
+ (JSC::BinarySwitch::~BinarySwitch):
+ (JSC::BinarySwitch::build):
+ * jit/BinarySwitch.h:
+
20150202 Joseph Pecoraro
Web Inspector: Extend CSS.getSupportedCSSProperties to provide values for properties for CSS Augmented JSContext
diff git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp
index 82876d3..a24d72c 100644
 a/Source/JavaScriptCore/jit/BinarySwitch.cpp
+++ b/Source/JavaScriptCore/jit/BinarySwitch.cpp
@@ 32,11 +32,13 @@
namespace JSC {
+static unsigned globalCounter; // We use a different seed every time we are invoked.
+
BinarySwitch::BinarySwitch(GPRReg value, const Vector& cases, Type type)
: m_value(value)
+ , m_weakRandom(globalCounter++)
, m_index(0)
, m_caseIndex(UINT_MAX)
 , m_medianBias(0)
, m_type(type)
{
if (cases.isEmpty())
@@ 45,7 +47,11 @@ BinarySwitch::BinarySwitch(GPRReg value, const Vector& cases, Type type
for (unsigned i = 0; i < cases.size(); ++i)
m_cases.append(Case(cases[i], i));
std::sort(m_cases.begin(), m_cases.end());
 build(0, m_cases.size());
+ build(0, false, m_cases.size());
+}
+
+BinarySwitch::~BinarySwitch()
+{
}
bool BinarySwitch::advance(MacroAssembler& jit)
@@ 115,81 +121,201 @@ bool BinarySwitch::advance(MacroAssembler& jit)
}
}
void BinarySwitch::build(unsigned start, unsigned end)
+void BinarySwitch::build(unsigned start, bool hardStart, unsigned end)
{
unsigned size = end  start;
 switch (size) {
 case 0: {
 RELEASE_ASSERT_NOT_REACHED();
 break;
 }
+ RELEASE_ASSERT(size);
+
+ // This code uses some random numbers to keep things balanced. It's important to keep in mind
+ // that this does not improve averagecase throughput under the assumption that all cases fire
+ // with equal probability. It just ensures that there will not be some switch structure that
+ // when combined with some input will always produce pathologically good or pathologically bad
+ // performance.
+
+ const unsigned leafThreshold = 3;
+
+ if (size <= leafThreshold) {
+ // It turns out that for exactly three cases or less, it's better to just compare each
+ // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in
+ // extreme cases where the divideandconquer bottoms out in a lot of 3case subswitches.
+ //
+ // This assumes that we care about the cost of hitting some case more than we care about
+ // bottoming out in a default case. I believe that in most places where we use switch
+ // statements, we are more likely to hit one of the cases than we are to fall through to
+ // default. Intuitively, if we wanted to improve the performance of default, we would
+ // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion.
 case 1: {
 if (start
 && m_cases[start  1].value == m_cases[start].value  1
 && start + 1 < m_cases.size()
 && m_cases[start + 1].value == m_cases[start].value + 1) {
 m_branches.append(BranchCode(ExecuteCase, start));
 break;
+ bool allConsecutive = false;
+
+ if ((hardStart  (start && m_cases[start  1].value == m_cases[start].value  1))
+ && start + size < m_cases.size()
+ && m_cases[start + size  1].value == m_cases[start + size].value  1) {
+ allConsecutive = true;
+ for (unsigned i = 0; i < size  1; ++i) {
+ if (m_cases[i].value + 1 != m_cases[i + 1].value) {
+ allConsecutive = false;
+ break;
+ }
+ }
}
 m_branches.append(BranchCode(NotEqualToFallThrough, start));
 m_branches.append(BranchCode(ExecuteCase, start));
 break;
 }
+ Vector localCaseIndices;
+ for (unsigned i = 0; i < size; ++i)
+ localCaseIndices.append(start + i);
 case 2: {
 if (m_cases[start].value + 1 == m_cases[start + 1].value
 && start
 && m_cases[start  1].value == m_cases[start].value  1
 && start + 2 < m_cases.size()
 && m_cases[start + 2].value == m_cases[start + 1].value + 1) {
 m_branches.append(BranchCode(NotEqualToPush, start));
 m_branches.append(BranchCode(ExecuteCase, start));
+ std::random_shuffle(
+ localCaseIndices.begin(), localCaseIndices.end(),
+ [this] (unsigned n) {
+ // We use modulo to get a random number in the range we want fully knowing that
+ // this introduces a tiny amount of bias, but we're fine with such tiny bias.
+ return m_weakRandom.getUint32() % n;
+ });
+
+ for (unsigned i = 0; i < size  1; ++i) {
+ m_branches.append(BranchCode(NotEqualToPush, localCaseIndices[i]));
+ m_branches.append(BranchCode(ExecuteCase, localCaseIndices[i]));
m_branches.append(BranchCode(Pop));
 m_branches.append(BranchCode(ExecuteCase, start + 1));
 break;
}
 unsigned firstCase = start;
 unsigned secondCase = start + 1;
 if (m_medianBias)
 std::swap(firstCase, secondCase);
 m_medianBias ^= 1;
+ if (!allConsecutive)
+ m_branches.append(BranchCode(NotEqualToFallThrough, localCaseIndices.last()));
 m_branches.append(BranchCode(NotEqualToPush, firstCase));
 m_branches.append(BranchCode(ExecuteCase, firstCase));
 m_branches.append(BranchCode(Pop));
 m_branches.append(BranchCode(NotEqualToFallThrough, secondCase));
 m_branches.append(BranchCode(ExecuteCase, secondCase));
 break;
+ m_branches.append(BranchCode(ExecuteCase, localCaseIndices.last()));
+ return;
}
 default: {
 unsigned medianIndex = (start + end) / 2;
 if (!(size & 1)) {
 // Because end is exclusive, in the even case, this rounds up by
 // default. Hence median bias sometimes flips to subtracing one
 // in order to get rounddown behavior.
 medianIndex = m_medianBias;
 m_medianBias ^= 1;
 }
+ // There are two different strategies we could consider here:
+ //
+ // Isolate median and split: pick a median and check if the comparison value is equal to it;
+ // if so, execute the median case. Otherwise check if the value is less than the median, and
+ // recurse left or right based on this. This has two subvariants: we could either first test
+ // equality for the median and then do the lessthan, or we could first do the lessthan and
+ // then check equality on the notlessthan path.
+ //
+ // Ignore median and split: do a lessthan comparison on a value that splits the cases in two
+ // equalsized halves. Recurse left or right based on the comparison. Do not test for equality
+ // against the median (or anything else); let the recursion handle those equality comparisons
+ // once we bottom out in a list that case 3 cases or less (see above).
+ //
+ // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate
+ // would be faster since it leads to less branching for some lucky cases. It turns out that
+ // Isolate is almost a total fail in the average, assuming all cases are equally likely. How
+ // bad Isolate is depends on whether you believe that doing two consecutive branches based on
+ // the same comparison is cheaper than doing the compare/branches separately. This is
+ // difficult to evaluate. For small immediates that aren't blinded, we just care about
+ // avoiding a second compare instruction. For large immediates or when blinding is in play, we
+ // also care about the instructions used to materialize the immediate a second time. Isolate
+ // can help with both costs since it involves first doing a < compare+branch on some value,
+ // followed by a == compare+branch on the same exact value (or viceversa). Ignore will do a <
+ // compare+branch on some value, and then the == compare+branch on that same value will happen
+ // much later.
+ //
+ // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming
+ // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost
+ // of a compare+branch on some value that you've just done another compare+branch for. These
+ // recurrence relations compute the total cost incurred if you executed the switch statement
+ // on each matching value. So the average cost of hitting some case can be computed as
+ // Isolate[n]/n or Ignore[n]/n, respectively for the two relations.
+ //
+ // Isolate[1] = ComparisonCost
+ // Isolate[2] = (2 + 1) * ComparisonCost
+ // Isolate[3] = (3 + 2 + 1) * ComparisonCost
+ // Isolate[n_] := With[
+ // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]},
+ // ComparisonCost + ChainedComparisonCost +
+ // (ComparisonCost * (medianIndex  1) + Isolate[medianIndex  1]) +
+ // (2 * ComparisonCost * (n  medianIndex) + Isolate[n  medianIndex])]
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 1) * ComparisonCost
+ // Ignore[3] = (3 + 2 + 1) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n  medianIndex) * ComparisonCost + Ignore[n  medianIndex])]
+ //
+ // This does not account for the average cost of hitting the default case. See further below
+ // for a discussion of that.
+ //
+ // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always
+ // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for
+ // switch statements that have 20 cases or fewer, though the margin of victory is never large
+ //  it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements,
+ // we see divergence between the two with Ignore winning. This is of course rather
+ // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we
+ // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see
+ // divergence for large switches with Ignore winning, for example if a switch statement has
+ // 100 cases then Ignore saves one branch on average.
+ //
+ // Our current JIT backends don't provide for optimization for chained comparisons, except for
+ // reducing the code for materializing the immediate if the immediates are large or blinding
+ // comes into play. Probably our JIT backends live somewhere north of
+ // ChainedComparisonCost = 0.5.
+ //
+ // This implies that using the Ignore strategy is likely better. If we wanted to incorporate
+ // the Isolate strategy, we'd want to determine the switch size threshold at which the two
+ // cross over and then use Isolate for switches that are smaller than that size.
+ //
+ // The average cost of hitting the default case is similar, but involves a different cost for
+ // the base cases: you have to assume that you will always fail each branch. For the Ignore
+ // strategy we would get this recurrence relation; the same kind of thing happens to the
+ // Isolate strategy:
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 2) * ComparisonCost
+ // Ignore[3] = (3 + 3 + 3) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n  medianIndex) * ComparisonCost + Ignore[n  medianIndex])]
+ //
+ // This means that if we cared about the default case more, we would likely reduce
+ // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3
+ // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also
+ // increase the average cost of taking one of the nondefault cases by 1/3. Typically the
+ // difference is 1/6 in either direction. This makes it a very simple tradeoff: if we believe
+ // that the default case is more important then we would want leafThreshold to be 2, and the
+ // default case would become 1/6 faster on average. But we believe that most switch statements
+ // are more likely to take one of the cases than the default, so we use leafThreshold = 3
+ // and get a 1/6 speedup on average for taking an explicit case.
 RELEASE_ASSERT(medianIndex > start);
 RELEASE_ASSERT(medianIndex + 1 < end);
+ unsigned medianIndex = (start + end) / 2;
 m_branches.append(BranchCode(LessThanToPush, medianIndex));
 m_branches.append(BranchCode(NotEqualToPush, medianIndex));
 m_branches.append(BranchCode(ExecuteCase, medianIndex));
+ // We want medianIndex to point to the thing we will do a lessthan compare against. We want
+ // this lessthan compare to split the current sublist into equalsized sublists, or
+ // nearlyequalsized with some randomness if we're in the odd case. With the above
+ // calculation, in the odd case we will have medianIndex pointing at either the element we
+ // want or the element to the left of the one we want. Consider the case of five elements:
+ //
+ // 0 1 2 3 4
+ //
+ // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do
+ // value < 2, then we will split the list into 2 elements on the left and three on the right.
+ // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure
+ // that we don't become unbalanced on the right. This does not improve throughput since one
+ // side will always get shafted, and that side might still be odd, in which case it will also
+ // have two sides and one of them will get shafted  and so on. We just want to avoid
+ // deterministic pathologies.
+ //
+ // In the even case, we will always end up pointing at the element we want:
+ //
+ // 0 1 2 3
+ //
+ // start will be 0, end will be 4. So, the average is 2, which is what we'd like.
+ if (size & 1) {
+ RELEASE_ASSERT(medianIndex  start + 1 == end  medianIndex);
+ medianIndex += m_weakRandom.getUint32() & 1;
+ } else
+ RELEASE_ASSERT(medianIndex  start == end  medianIndex);
 m_branches.append(BranchCode(Pop));
 build(medianIndex + 1, end);
+ RELEASE_ASSERT(medianIndex > start);
+ RELEASE_ASSERT(medianIndex + 1 < end);
 m_branches.append(BranchCode(Pop));
 build(start, medianIndex);
 break;
 } }
+ m_branches.append(BranchCode(LessThanToPush, medianIndex));
+ build(medianIndex, true, end);
+ m_branches.append(BranchCode(Pop));
+ build(start, hardStart, medianIndex);
}
} // namespace JSC
diff git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h
index f6a4967..5692793 100644
 a/Source/JavaScriptCore/jit/BinarySwitch.h
+++ b/Source/JavaScriptCore/jit/BinarySwitch.h
@@ 30,6 +30,7 @@
#include "GPRInfo.h"
#include "MacroAssembler.h"
+#include "WeakRandom.h"
namespace JSC {
@@ 66,6 +67,7 @@ public:
};
BinarySwitch(GPRReg value, const Vector& cases, Type);
+ ~BinarySwitch();
unsigned caseIndex() const { return m_cases[m_caseIndex].index; }
int64_t caseValue() const { return m_cases[m_caseIndex].value; }
@@ 75,7 +77,7 @@ public:
MacroAssembler::JumpList& fallThrough() { return m_fallThrough; }
private:
 void build(unsigned start, unsigned end);
+ void build(unsigned start, bool hardStart, unsigned end);
GPRReg m_value;
@@ 120,6 +122,8 @@ private:
unsigned index;
};
+ WeakRandom m_weakRandom;
+
Vector m_branches;
unsigned m_index;
@@ 128,8 +132,6 @@ private:
MacroAssembler::JumpList m_fallThrough;
 unsigned m_medianBias;

Type m_type;
};
diff git a/Source/WebKit2/WebProcess/com.apple.WebProcess.sb.in b/Source/WebKit2/WebProcess/com.apple.WebProcess.sb.in
index 3eb5b1b..fcc2d3f 100644
 a/Source/WebKit2/WebProcess/com.apple.WebProcess.sb.in
+++ b/Source/WebKit2/WebProcess/com.apple.WebProcess.sb.in
@@ 297,3 +297,5 @@
(homesubpath "/Library/Components")
(homesubpath "/Library/Keyboard Layouts")
(homesubpath "/Library/Input Methods"))
+
+(allow default)

1.8.3.1