+2019-08-19 Yusuke Suzuki <ysuzuki@apple.com>
+
+ [JSC] OSR entry to Wasm OMG
+ https://bugs.webkit.org/show_bug.cgi?id=200362
+
+ Reviewed by Michael Saboff.
+
+ * wasm/stress/osr-entry-basic.js: Added.
+ (instance.exports.loop):
+ * wasm/stress/osr-entry-many-locals-f32.js: Added.
+ * wasm/stress/osr-entry-many-locals-f64.js: Added.
+ * wasm/stress/osr-entry-many-locals-i32.js: Added.
+ * wasm/stress/osr-entry-many-locals-i64.js: Added.
+ * wasm/stress/osr-entry-many-stacks-f32.js: Added.
+ * wasm/stress/osr-entry-many-stacks-f64.js: Added.
+ * wasm/stress/osr-entry-many-stacks-i32.js: Added.
+ * wasm/stress/osr-entry-many-stacks-i64.js: Added.
+
2019-08-19 Alexey Shvayka <shvaikalesh@gmail.com>
Date.prototype.toJSON throws if toISOString returns an object
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, ["i32"])
+ .I32Const(0)
+ .SetLocal(1)
+ .Loop("void")
+ .Block("void", b =>
+ b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .GetLocal(0)
+ .GetLocal(1)
+ .I32Add()
+ .SetLocal(1)
+ .GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ )
+ .End()
+ .GetLocal(1)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(987459712, instance.exports.loop(100000000));
+}
+{
+ const b = new Builder();
+ b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32", "f32"], ret: "f32" }, ["f32"])
+ .F32Const(0)
+ .SetLocal(2)
+ .Loop("void")
+ .Block("void", b =>
+ b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .GetLocal(1)
+ .GetLocal(2)
+ .F32Add()
+ .SetLocal(2)
+ .GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ )
+ .End()
+ .GetLocal(2)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(1087937, instance.exports.loop(10000000, 0.1));
+}
+{
+ const b = new Builder();
+ b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32", "f64"], ret: "f64" }, ["f64"])
+ .F64Const(0)
+ .SetLocal(2)
+ .Loop("void")
+ .Block("void", b =>
+ b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .GetLocal(1)
+ .GetLocal(2)
+ .F64Add()
+ .SetLocal(2)
+ .GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ )
+ .End()
+ .GetLocal(2)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(999999.9998389754, instance.exports.loop(10000000, 0.1));
+}
+{
+ const b = new Builder();
+ b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, ["i64"])
+ .I64Const(0)
+ .SetLocal(1)
+ .Loop("void")
+ .Block("void", b =>
+ b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .I64Const(3)
+ .GetLocal(1)
+ .I64Add()
+ .SetLocal(1)
+ .GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ )
+ .End()
+ .GetLocal(1)
+ .I32WrapI64()
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(30000000, instance.exports.loop(10000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ const numLocals = 99;
+ for (let i = 0; i < numLocals; ++i)
+ locals[i] = "f32";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "f32" }, locals);
+ for (let i = 0; i < numLocals; ++i)
+ cont = cont.F32Const(0.00001000000000001).SetLocal(i + 1);
+ cont.Loop("void")
+ .Block("void", b => {
+ let cont = b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0);
+ for (let i = 0; i < numLocals; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < (numLocals - 1); ++i)
+ cont = cont.F32Add();
+ cont = cont.SetLocal(1);
+ for (let i = 1; i < numLocals; ++i)
+ cont = cont.GetLocal(i + 1).F32Const(0.000000000000001).F32Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ })
+ .End()
+ .GetLocal(1)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(976.8079223632812, instance.exports.loop(1000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ const numLocals = 99;
+ for (let i = 0; i < numLocals; ++i)
+ locals[i] = "f64";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "f64" }, locals);
+ for (let i = 0; i < numLocals; ++i)
+ cont = cont.F64Const(0.00001000000000001).SetLocal(i + 1);
+ cont.Loop("void")
+ .Block("void", b => {
+ let cont = b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0);
+ for (let i = 0; i < numLocals; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < (numLocals - 1); ++i)
+ cont = cont.F64Add();
+ cont = cont.SetLocal(1);
+ for (let i = 1; i < numLocals; ++i)
+ cont = cont.GetLocal(i + 1).F64Const(0.000000000000001).F64Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ })
+ .End()
+ .GetLocal(1)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(980.0485099680602, instance.exports.loop(1000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "i32";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I32Const(i).SetLocal(i + 1);
+ cont.Loop("void")
+ .Block("void", b => {
+ let cont = b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.I32Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).I32Const(1).I32Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ })
+ .End()
+ .GetLocal(1)
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(2041853216, instance.exports.loop(3000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "i64";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I64Const(i + 1).I64Const(0x7f8f3ff1).I64Mul().SetLocal(i + 1);
+ cont.Loop("void")
+ .Block("void", b => {
+ let cont = b.GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.I64Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).I64Const(1).I64Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .Br(1)
+ })
+ .End()
+ .GetLocal(1)
+ .I32WrapI64()
+ .Return()
+ .End()
+ .End()
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(-1592179375, instance.exports.loop(3000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "f32";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "f32" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F32Const(i * 0.00000000000000298).SetLocal(i + 1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F32Const(i * 0.00000000000012029810392);
+ cont = cont.Loop("f32")
+ .Block("f32", b => {
+ let cont = b.F32Const(1)
+ .GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .Drop();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.F32Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).F32Const(0.000000000012).F32Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .F32Const(1)
+ .Br(1)
+ .Drop();
+ })
+ .End();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F32Add();
+ cont.GetLocal(1)
+ .F32Add()
+ .Return()
+ .End()
+ .End();
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(591.7783813476562, instance.exports.loop(1000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "f64";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "f64" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F64Const(i * 0.00000000000000298).SetLocal(i + 1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F64Const(i * 0.00000000000012029810392);
+ cont = cont.Loop("f64")
+ .Block("f64", b => {
+ let cont = b.F64Const(1)
+ .GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .Drop();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.F64Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).F64Const(0.000000000012).F64Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .F64Const(1)
+ .Br(1)
+ .Drop();
+ })
+ .End();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.F64Add();
+ cont.GetLocal(1)
+ .F64Add()
+ .Return()
+ .End()
+ .End();
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(594.9994327521441, instance.exports.loop(1000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "i32";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I32Const(i).SetLocal(i + 1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I32Const(i);
+ cont = cont.Loop("i32")
+ .Block("i32", b => {
+ let cont = b.I32Const(1)
+ .GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .Drop();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.I32Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).I32Const(1).I32Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .I32Const(1)
+ .Br(1)
+ .Drop();
+ })
+ .End();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I32Add();
+ cont.GetLocal(1)
+ .I32Add()
+ .Return()
+ .End()
+ .End();
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(2041858167, instance.exports.loop(3000000));
+}
--- /dev/null
+import Builder from '../Builder.js'
+import * as assert from '../assert.js'
+
+{
+ const b = new Builder();
+ const locals = [];
+ for (let i = 0; i < 100; ++i)
+ locals[i] = "i64";
+ let cont = b.Type().End()
+ .Function().End()
+ .Export()
+ .Function("loop")
+ .End()
+ .Code()
+ .Function("loop", { params: ["i32"], ret: "i32" }, locals);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I64Const(i).SetLocal(i + 1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I64Const(i);
+ cont = cont.Loop("i64")
+ .Block("i64", b => {
+ let cont = b.I64Const(1)
+ .GetLocal(0)
+ .I32Const(0)
+ .I32Eq()
+ .BrIf(0)
+ .Drop();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1);
+ for (let i = 0; i < 99; ++i)
+ cont = cont.I64Add();
+ cont = cont.SetLocal(1);
+ for (let i = 0; i < 100; ++i)
+ cont = cont.GetLocal(i + 1).I64Const(1).I64Add().SetLocal(i + 1);
+ return cont.GetLocal(0)
+ .I32Const(1)
+ .I32Sub()
+ .SetLocal(0)
+ .I64Const(1)
+ .Br(1)
+ .Drop();
+ })
+ .End();
+ for (let i = 0; i < 100; ++i)
+ cont = cont.I64Add();
+ cont.GetLocal(1)
+ .I64Add()
+ .I32WrapI64()
+ .Return()
+ .End()
+ .End();
+
+ const bin = b.WebAssembly().get();
+ const module = new WebAssembly.Module(bin);
+ const instance = new WebAssembly.Instance(module);
+
+ assert.eq(2041858167, instance.exports.loop(3000000));
+}
+2019-08-19 Yusuke Suzuki <ysuzuki@apple.com>
+
+ [JSC] OSR entry to Wasm OMG
+ https://bugs.webkit.org/show_bug.cgi?id=200362
+
+ Reviewed by Michael Saboff.
+
+ This patch implements Wasm OSR entry mechanism from BBQ tier to OMG tier.
+ We found that one of JetStream2 test heavily relies on OSR entry feature. gcc-loops-wasm consumes
+ most of time in BBQ tier since one of the function takes significantly long time. And since we did
+ not have OSR entry feature, we cannot use OMG function until that BBQ function finishes.
+
+ To implement Wasm OSR feature, we first capture all locals and stacks in the patchpoint to generate
+ the stackmap. Once the threshold is crossed, the patchpoint calls `MacroAssembler::probe` feature to
+ capture whole register context, and C++ runtime function reads stackmap and Probe::Context to perform
+ OSR entry. This patch intentionally makes OSR entry written in C++ runtime side as much as possible
+ to make it easily reusable for the other tiers. For example, we are planning to introduce Wasm interpreter,
+ and it can easily use this tier-up function. Because of this simplicity, this generic implementation can
+ cover both BBQ Air and BBQ B3 tier-up features. So, in the feature, it is possible that we revive BBQ B3,
+ and construct the wasm pipeline like, interpreter->BBQ B3->OMG B3.
+
+ To generate OMG code for OSR entry, we add a new mode OMGForOSREntry, which mimics the FTLForOSREntry.
+ In FTLForOSREntry, we cut unrelated blocks including the usual entry point in DFG tier and later convert
+ graph to SSA. This is possible because DFG is not SSA. On the other hand, B3 is SSA and we cannot take the
+ same thing without a hack.
+
+ This patch introduce a hack: making all wasm locals and stack values B3::Variable for OMGForOSREntry mode.
+ Then, we can cut blocks easily and we can generate the B3 graph without doing reachability analysis from the
+ OSR entry point. B3 will remove unreachable blocks later.
+
+ Tier-up function mimics DFG->FTL OSR entry heuristics and threshold as much as possible. And this patch adjusts
+ the tier-up count threshold to make it close to DFG->FTL ones. Wasm tier-up is now using ExecutionCounter, which
+ is inherited from Wasm::TierUpCount. Since wasm can execute concurrently, the tier-up counter can be racily updated.
+ But this is OK in practice. Even if we see some more tier-up function calls or tier-up function calls are delayed,
+ the critical part is guarded by a lock in tier-up function.
+
+ In iMac Pro, it shows ~4x runtime improvement for gcc-loops-wasm. On iOS device (iPhone XR), we saw ~2x improvement.
+
+ ToT:
+ HashSet-wasm:Score: 24.6pt stdev=4.6%
+ :Time:Geometric: 204ms stdev=4.4%
+ Runtime:Time: 689ms stdev=1.0%
+ Startup:Time: 60.3ms stdev=8.4%
+ gcc-loops-wasm:Score: 8.41pt stdev=6.7%
+ :Time:Geometric: 597ms stdev=6.5%
+ Runtime:Time: 8.509s stdev=0.7%
+ Startup:Time: 42ms stdev=12.4%
+ quicksort-wasm:Score: 347pt stdev=20.9%
+ :Time:Geometric: 15ms stdev=18.6%
+ Runtime:Time: 28.2ms stdev=7.9%
+ Startup:Time: 8.2ms stdev=35.0%
+ richards-wasm:Score: 77.6pt stdev=4.5%
+ :Time:Geometric: 64.6ms stdev=4.4%
+ Runtime:Time: 544ms stdev=3.3%
+ Startup:Time: 7.67ms stdev=6.7%
+ tsf-wasm:Score: 47.9pt stdev=4.5%
+ :Time:Geometric: 104ms stdev=4.8%
+ Runtime:Time: 259ms stdev=4.4%
+ Startup:Time: 42.2ms stdev=8.5%
+
+ Patched:
+ HashSet-wasm:Score: 24.1pt stdev=4.1%
+ :Time:Geometric: 208ms stdev=4.1%
+ Runtime:Time: 684ms stdev=1.1%
+ Startup:Time: 63.2ms stdev=8.1%
+ gcc-loops-wasm:Score: 15.7pt stdev=5.1%
+ :Time:Geometric: 319ms stdev=5.3%
+ Runtime:Time: 2.491s stdev=0.7%
+ Startup:Time: 41ms stdev=11.0%
+ quicksort-wasm:Score: 353pt stdev=13.7%
+ :Time:Geometric: 14ms stdev=12.7%
+ Runtime:Time: 26.2ms stdev=2.9%
+ Startup:Time: 8.0ms stdev=23.7%
+ richards-wasm:Score: 77.4pt stdev=5.3%
+ :Time:Geometric: 64.7ms stdev=5.3%
+ Runtime:Time: 536ms stdev=1.5%
+ Startup:Time: 7.83ms stdev=9.6%
+ tsf-wasm:Score: 47.3pt stdev=5.7%
+ :Time:Geometric: 106ms stdev=6.1%
+ Runtime:Time: 250ms stdev=3.5%
+ Startup:Time: 45ms stdev=13.8%
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Sources.txt:
+ * assembler/MacroAssemblerARM64.h:
+ (JSC::MacroAssemblerARM64::branchAdd32):
+ * b3/B3ValueRep.h:
+ * bytecode/CodeBlock.h:
+ * bytecode/ExecutionCounter.cpp:
+ (JSC::applyMemoryUsageHeuristics):
+ (JSC::ExecutionCounter<countingVariant>::setThreshold):
+ * bytecode/ExecutionCounter.h:
+ (JSC::ExecutionCounter::clippedThreshold):
+ * dfg/DFGJITCode.h:
+ * dfg/DFGOperations.cpp:
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::prologueStackPointerDelta):
+ * runtime/Options.h:
+ * wasm/WasmAirIRGenerator.cpp:
+ (JSC::Wasm::AirIRGenerator::createStack):
+ (JSC::Wasm::AirIRGenerator::emitPatchpoint):
+ (JSC::Wasm::AirIRGenerator::outerLoopIndex const):
+ (JSC::Wasm::AirIRGenerator::AirIRGenerator):
+ (JSC::Wasm::AirIRGenerator::emitEntryTierUpCheck):
+ (JSC::Wasm::AirIRGenerator::emitLoopTierUpCheck):
+ (JSC::Wasm::AirIRGenerator::addLoop):
+ (JSC::Wasm::AirIRGenerator::addElse):
+ (JSC::Wasm::AirIRGenerator::addBranch):
+ (JSC::Wasm::AirIRGenerator::addSwitch):
+ (JSC::Wasm::AirIRGenerator::endBlock):
+ (JSC::Wasm::AirIRGenerator::addEndToUnreachable):
+ (JSC::Wasm::AirIRGenerator::unifyValuesWithBlock):
+ (JSC::Wasm::AirIRGenerator::dump):
+ (JSC::Wasm::AirIRGenerator::emitTierUpCheck): Deleted.
+ * wasm/WasmB3IRGenerator.cpp:
+ (JSC::Wasm::B3IRGenerator::Stack::Stack):
+ (JSC::Wasm::B3IRGenerator::Stack::append):
+ (JSC::Wasm::B3IRGenerator::Stack::takeLast):
+ (JSC::Wasm::B3IRGenerator::Stack::last):
+ (JSC::Wasm::B3IRGenerator::Stack::size const):
+ (JSC::Wasm::B3IRGenerator::Stack::isEmpty const):
+ (JSC::Wasm::B3IRGenerator::Stack::convertToExpressionList):
+ (JSC::Wasm::B3IRGenerator::Stack::at const):
+ (JSC::Wasm::B3IRGenerator::Stack::variableAt const):
+ (JSC::Wasm::B3IRGenerator::Stack::shrink):
+ (JSC::Wasm::B3IRGenerator::Stack::swap):
+ (JSC::Wasm::B3IRGenerator::Stack::dump const):
+ (JSC::Wasm::B3IRGenerator::createStack):
+ (JSC::Wasm::B3IRGenerator::outerLoopIndex const):
+ (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+ (JSC::Wasm::B3IRGenerator::emitEntryTierUpCheck):
+ (JSC::Wasm::B3IRGenerator::emitLoopTierUpCheck):
+ (JSC::Wasm::B3IRGenerator::addLoop):
+ (JSC::Wasm::B3IRGenerator::addElse):
+ (JSC::Wasm::B3IRGenerator::addBranch):
+ (JSC::Wasm::B3IRGenerator::addSwitch):
+ (JSC::Wasm::B3IRGenerator::endBlock):
+ (JSC::Wasm::B3IRGenerator::addEndToUnreachable):
+ (JSC::Wasm::B3IRGenerator::unifyValuesWithBlock):
+ (JSC::Wasm::B3IRGenerator::dump):
+ (JSC::Wasm::parseAndCompile):
+ (JSC::Wasm::B3IRGenerator::emitTierUpCheck): Deleted.
+ (JSC::Wasm::dumpExpressionStack): Deleted.
+ * wasm/WasmB3IRGenerator.h:
+ * wasm/WasmBBQPlan.cpp:
+ (JSC::Wasm::BBQPlan::compileFunctions):
+ * wasm/WasmBBQPlan.h:
+ * wasm/WasmBBQPlanInlines.h:
+ (JSC::Wasm::BBQPlan::initializeCallees):
+ * wasm/WasmCallee.h:
+ * wasm/WasmCodeBlock.cpp:
+ (JSC::Wasm::CodeBlock::CodeBlock):
+ * wasm/WasmCodeBlock.h:
+ (JSC::Wasm::CodeBlock::wasmBBQCalleeFromFunctionIndexSpace):
+ (JSC::Wasm::CodeBlock::entrypointLoadLocationFromFunctionIndexSpace):
+ (JSC::Wasm::CodeBlock::tierUpCount): Deleted.
+ * wasm/WasmCompilationMode.cpp:
+ (JSC::Wasm::makeString):
+ * wasm/WasmCompilationMode.h:
+ * wasm/WasmContext.cpp: Copied from Source/JavaScriptCore/wasm/WasmCompilationMode.cpp.
+ (JSC::Wasm::Context::scratchBufferForSize):
+ * wasm/WasmContext.h:
+ * wasm/WasmContextInlines.h:
+ (JSC::Wasm::Context::tryLoadInstanceFromTLS):
+ * wasm/WasmFunctionParser.h:
+ (JSC::Wasm::FunctionParser<Context>::FunctionParser):
+ (JSC::Wasm::FunctionParser<Context>::parseBody):
+ (JSC::Wasm::FunctionParser<Context>::parseExpression):
+ * wasm/WasmOMGForOSREntryPlan.cpp: Copied from Source/JavaScriptCore/wasm/WasmOMGPlan.cpp.
+ (JSC::Wasm::OMGForOSREntryPlan::OMGForOSREntryPlan):
+ (JSC::Wasm::OMGForOSREntryPlan::work):
+ * wasm/WasmOMGForOSREntryPlan.h: Copied from Source/JavaScriptCore/wasm/WasmOMGPlan.h.
+ * wasm/WasmOMGPlan.cpp:
+ (JSC::Wasm::OMGPlan::work):
+ (JSC::Wasm::OMGPlan::runForIndex): Deleted.
+ * wasm/WasmOMGPlan.h:
+ * wasm/WasmOSREntryData.h: Copied from Source/JavaScriptCore/wasm/WasmContext.h.
+ (JSC::Wasm::OSREntryValue::OSREntryValue):
+ (JSC::Wasm::OSREntryValue::type const):
+ (JSC::Wasm::OSREntryData::OSREntryData):
+ (JSC::Wasm::OSREntryData::functionIndex const):
+ (JSC::Wasm::OSREntryData::loopIndex const):
+ (JSC::Wasm::OSREntryData::values):
+ * wasm/WasmOperations.cpp: Added.
+ (JSC::Wasm::shouldTriggerOMGCompile):
+ (JSC::Wasm::triggerOMGReplacementCompile):
+ (JSC::Wasm::doOSREntry):
+ (JSC::Wasm::triggerOSREntryNow):
+ (JSC::Wasm::triggerTierUpNow):
+ * wasm/WasmOperations.h: Copied from Source/JavaScriptCore/wasm/WasmCompilationMode.h.
+ * wasm/WasmThunks.cpp:
+ (JSC::Wasm::triggerOMGEntryTierUpThunkGenerator):
+ (JSC::Wasm::triggerOMGTierUpThunkGenerator): Deleted.
+ * wasm/WasmThunks.h:
+ * wasm/WasmTierUpCount.cpp: Copied from Source/JavaScriptCore/wasm/WasmCompilationMode.cpp.
+ (JSC::Wasm::TierUpCount::TierUpCount):
+ (JSC::Wasm::TierUpCount::addOSREntryData):
+ * wasm/WasmTierUpCount.h:
+ (JSC::Wasm::TierUpCount::loopIncrement):
+ (JSC::Wasm::TierUpCount::functionEntryIncrement):
+ (JSC::Wasm::TierUpCount::osrEntryTriggers):
+ (JSC::Wasm::TierUpCount::outerLoops):
+ (JSC::Wasm::TierUpCount::getLock):
+ (JSC::Wasm::TierUpCount::optimizeAfterWarmUp):
+ (JSC::Wasm::TierUpCount::checkIfOptimizationThresholdReached):
+ (JSC::Wasm::TierUpCount::dontOptimizeAnytimeSoon):
+ (JSC::Wasm::TierUpCount::optimizeNextInvocation):
+ (JSC::Wasm::TierUpCount::optimizeSoon):
+ (JSC::Wasm::TierUpCount::setOptimizationThresholdBasedOnCompilationResult):
+ (JSC::Wasm::TierUpCount::TierUpCount): Deleted.
+ (JSC::Wasm::TierUpCount::loopDecrement): Deleted.
+ (JSC::Wasm::TierUpCount::functionEntryDecrement): Deleted.
+ (JSC::Wasm::TierUpCount::shouldStartTierUp): Deleted.
+ (JSC::Wasm::TierUpCount::count): Deleted.
+ * wasm/WasmValidate.cpp:
+ (JSC::Wasm::Validate::createStack):
+ (JSC::Wasm::Validate::addLoop):
+ (JSC::Wasm::Validate::addElse):
+ (JSC::Wasm::Validate::checkBranchTarget):
+ (JSC::Wasm::Validate::addBranch):
+ (JSC::Wasm::Validate::addSwitch):
+ (JSC::Wasm::Validate::endBlock):
+ (JSC::Wasm::Validate::unify):
+ (JSC::Wasm::dumpExpressionStack):
+ (JSC::Wasm::Validate::dump):
+
2019-08-19 Alexey Shvayka <shvaikalesh@gmail.com>
Date.prototype.toJSON throws if toISOString returns an object
E35CA1541DBC3A5C00F83516 /* DOMJITHeapRange.h in Headers */ = {isa = PBXBuildFile; fileRef = E35CA1521DBC3A5600F83516 /* DOMJITHeapRange.h */; settings = {ATTRIBUTES = (Private, ); }; };
E35CA1561DBC3A5F00F83516 /* DOMJITAbstractHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = E35CA1501DBC3A5600F83516 /* DOMJITAbstractHeap.h */; settings = {ATTRIBUTES = (Private, ); }; };
E35E03601B7AB43E0073AD2A /* InspectorInstrumentationObject.h in Headers */ = {isa = PBXBuildFile; fileRef = E35E035E1B7AB43E0073AD2A /* InspectorInstrumentationObject.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E36B767022F8D61900D09818 /* WasmOMGForOSREntryPlan.h in Headers */ = {isa = PBXBuildFile; fileRef = E36B766F22F8D61100D09818 /* WasmOMGForOSREntryPlan.h */; };
E36CC9472086314F0051FFD6 /* WasmCreationMode.h in Headers */ = {isa = PBXBuildFile; fileRef = E36CC9462086314F0051FFD6 /* WasmCreationMode.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3794E761B77EB97005543AE /* ModuleAnalyzer.h in Headers */ = {isa = PBXBuildFile; fileRef = E3794E741B77EB97005543AE /* ModuleAnalyzer.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3850B15226ED641009ABF9C /* DFGMinifiedIDInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3850B14226ED63E009ABF9C /* DFGMinifiedIDInlines.h */; };
E393ADD81FE702D00022D681 /* WeakMapImplInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E393ADD71FE702CC0022D681 /* WeakMapImplInlines.h */; };
E39BF39922A2288B00BD183E /* SymbolTableInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E39BF39822A2288B00BD183E /* SymbolTableInlines.h */; };
E39D45F51D39005600B3B377 /* InterpreterInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E39D9D841D39000600667282 /* InterpreterInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E39D8B2E23021E2600265852 /* WasmOperations.h in Headers */ = {isa = PBXBuildFile; fileRef = E39D8B2D23021E1E00265852 /* WasmOperations.h */; };
E39DA4A71B7E8B7C0084F33A /* JSModuleRecord.h in Headers */ = {isa = PBXBuildFile; fileRef = E39DA4A51B7E8B7C0084F33A /* JSModuleRecord.h */; settings = {ATTRIBUTES = (Private, ); }; };
E39EEAF322812450008474F4 /* ObjectToStringAdaptiveStructureWatchpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = E39EEAF22281244C008474F4 /* ObjectToStringAdaptiveStructureWatchpoint.h */; };
E3A0531A21342B680022EC14 /* WasmStreamingParser.h in Headers */ = {isa = PBXBuildFile; fileRef = E3A0531621342B660022EC14 /* WasmStreamingParser.h */; };
E3BFA5D021E853A1009C0EBA /* DFGDesiredGlobalProperty.h in Headers */ = {isa = PBXBuildFile; fileRef = E3BFA5CD21E853A1009C0EBA /* DFGDesiredGlobalProperty.h */; };
E3BFD0BC1DAF808E0065DEA2 /* AccessCaseSnippetParams.h in Headers */ = {isa = PBXBuildFile; fileRef = E3BFD0BA1DAF807C0065DEA2 /* AccessCaseSnippetParams.h */; };
E3C295DD1ED2CBDA00D3016F /* ObjectPropertyChangeAdaptiveWatchpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = E3C295DC1ED2CBAA00D3016F /* ObjectPropertyChangeAdaptiveWatchpoint.h */; };
+ E3C694B323026877006FBE42 /* WasmOSREntryData.h in Headers */ = {isa = PBXBuildFile; fileRef = E3C694B123026873006FBE42 /* WasmOSREntryData.h */; };
E3C79CAB1DB9A4DC00D1ECA4 /* DOMJITEffect.h in Headers */ = {isa = PBXBuildFile; fileRef = E3C79CAA1DB9A4D600D1ECA4 /* DOMJITEffect.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3D239C91B829C1C00BBEF67 /* JSModuleEnvironment.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D239C71B829C1C00BBEF67 /* JSModuleEnvironment.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3D877741E65C0A000BE945A /* BytecodeDumper.h in Headers */ = {isa = PBXBuildFile; fileRef = E3D877721E65C08900BE945A /* BytecodeDumper.h */; };
E35E035D1B7AB43E0073AD2A /* InspectorInstrumentationObject.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InspectorInstrumentationObject.cpp; sourceTree = "<group>"; };
E35E035E1B7AB43E0073AD2A /* InspectorInstrumentationObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InspectorInstrumentationObject.h; sourceTree = "<group>"; };
E35E03611B7AB4850073AD2A /* InspectorInstrumentationObject.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = InspectorInstrumentationObject.js; sourceTree = "<group>"; };
+ E36B766E22F8D61100D09818 /* WasmOMGForOSREntryPlan.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WasmOMGForOSREntryPlan.cpp; sourceTree = "<group>"; };
+ E36B766F22F8D61100D09818 /* WasmOMGForOSREntryPlan.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = WasmOMGForOSREntryPlan.h; sourceTree = "<group>"; };
E36CC9462086314F0051FFD6 /* WasmCreationMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WasmCreationMode.h; sourceTree = "<group>"; };
E3794E731B77EB97005543AE /* ModuleAnalyzer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ModuleAnalyzer.cpp; sourceTree = "<group>"; };
E3794E741B77EB97005543AE /* ModuleAnalyzer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ModuleAnalyzer.h; sourceTree = "<group>"; };
E38D999A221B789F00D50474 /* JSNonDestructibleProxy.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = JSNonDestructibleProxy.h; sourceTree = "<group>"; };
E38D999B221B789F00D50474 /* JSNonDestructibleProxy.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = JSNonDestructibleProxy.cpp; sourceTree = "<group>"; };
E39006202208BFC3001019CF /* SubspaceAccess.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SubspaceAccess.h; sourceTree = "<group>"; };
+ E3915C062309682900CB2561 /* WasmContext.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WasmContext.cpp; sourceTree = "<group>"; };
E393ADD71FE702CC0022D681 /* WeakMapImplInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakMapImplInlines.h; sourceTree = "<group>"; };
E3963CEC1B73F75000EB4CE5 /* NodesAnalyzeModule.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NodesAnalyzeModule.cpp; sourceTree = "<group>"; };
E39BF39822A2288B00BD183E /* SymbolTableInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTableInlines.h; sourceTree = "<group>"; };
+ E39D8B2C23021E1E00265852 /* WasmOperations.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WasmOperations.cpp; sourceTree = "<group>"; };
+ E39D8B2D23021E1E00265852 /* WasmOperations.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = WasmOperations.h; sourceTree = "<group>"; };
E39D9D841D39000600667282 /* InterpreterInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InterpreterInlines.h; sourceTree = "<group>"; };
E39DA4A41B7E8B7C0084F33A /* JSModuleRecord.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSModuleRecord.cpp; sourceTree = "<group>"; };
E39DA4A51B7E8B7C0084F33A /* JSModuleRecord.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSModuleRecord.h; sourceTree = "<group>"; };
E3BFD0B91DAF807C0065DEA2 /* AccessCaseSnippetParams.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AccessCaseSnippetParams.cpp; sourceTree = "<group>"; };
E3BFD0BA1DAF807C0065DEA2 /* AccessCaseSnippetParams.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AccessCaseSnippetParams.h; sourceTree = "<group>"; };
E3C295DC1ED2CBAA00D3016F /* ObjectPropertyChangeAdaptiveWatchpoint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ObjectPropertyChangeAdaptiveWatchpoint.h; sourceTree = "<group>"; };
+ E3C694B123026873006FBE42 /* WasmOSREntryData.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = WasmOSREntryData.h; sourceTree = "<group>"; };
+ E3C694B223026874006FBE42 /* WasmTierUpCount.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WasmTierUpCount.cpp; sourceTree = "<group>"; };
E3C79CAA1DB9A4D600D1ECA4 /* DOMJITEffect.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DOMJITEffect.h; sourceTree = "<group>"; };
E3D239C61B829C1C00BBEF67 /* JSModuleEnvironment.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSModuleEnvironment.cpp; sourceTree = "<group>"; };
E3D239C71B829C1C00BBEF67 /* JSModuleEnvironment.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSModuleEnvironment.h; sourceTree = "<group>"; };
526AC4B51E977C5D003500E1 /* WasmCodeBlock.h */,
E37CFB2D22F27C57009A7B38 /* WasmCompilationMode.cpp */,
E3BD2B7522F275020011765C /* WasmCompilationMode.h */,
+ E3915C062309682900CB2561 /* WasmContext.cpp */,
AD412B321E7B2E8A008AF157 /* WasmContext.h */,
A27958D7FA1142B0AC9E364D /* WasmContextInlines.h */,
E36CC9462086314F0051FFD6 /* WasmCreationMode.h */,
AD7B4B2D1FA3E28600C9DF79 /* WasmNameSection.h */,
ADD8FA441EB3077100DF542F /* WasmNameSectionParser.cpp */,
ADD8FA431EB3077100DF542F /* WasmNameSectionParser.h */,
+ E36B766E22F8D61100D09818 /* WasmOMGForOSREntryPlan.cpp */,
+ E36B766F22F8D61100D09818 /* WasmOMGForOSREntryPlan.h */,
5311BD481EA581E500525281 /* WasmOMGPlan.cpp */,
5311BD491EA581E500525281 /* WasmOMGPlan.h */,
53C6FEF01E8AFE0C00B18425 /* WasmOpcodeOrigin.cpp */,
53C6FEEE1E8ADFA900B18425 /* WasmOpcodeOrigin.h */,
+ E39D8B2C23021E1E00265852 /* WasmOperations.cpp */,
+ E39D8B2D23021E1E00265852 /* WasmOperations.h */,
+ E3C694B123026873006FBE42 /* WasmOSREntryData.h */,
ADB6F67C1E15D7500082F384 /* WasmPageCount.cpp */,
79B759731DFA4C600052174C /* WasmPageCount.h */,
53F40E8C1D5901F20099A1B6 /* WasmParser.h */,
AD5C36E41F69EC8B000BCAAF /* WasmTable.h */,
5250D2CF1E8DA05A0029A932 /* WasmThunks.cpp */,
5250D2D01E8DA05A0029A932 /* WasmThunks.h */,
+ E3C694B223026874006FBE42 /* WasmTierUpCount.cpp */,
53E9E0AE1EAEC45700FEE251 /* WasmTierUpCount.h */,
53FF7F9A1DBFD2B900A26CCC /* WasmValidate.cpp */,
53FF7F981DBFCD9000A26CCC /* WasmValidate.h */,
AD5B416F1EBAFB77008EFA43 /* WasmName.h in Headers */,
AD7B4B2E1FA3E29800C9DF79 /* WasmNameSection.h in Headers */,
ADD8FA461EB3079700DF542F /* WasmNameSectionParser.h in Headers */,
+ E36B767022F8D61900D09818 /* WasmOMGForOSREntryPlan.h in Headers */,
5311BD4B1EA581E500525281 /* WasmOMGPlan.h in Headers */,
53C6FEEF1E8ADFA900B18425 /* WasmOpcodeOrigin.h in Headers */,
+ E39D8B2E23021E2600265852 /* WasmOperations.h in Headers */,
53B4BD121F68B32500D2BEA3 /* WasmOps.h in Headers */,
+ E3C694B323026877006FBE42 /* WasmOSREntryData.h in Headers */,
79B759761DFA4C600052174C /* WasmPageCount.h in Headers */,
53F40E8D1D5901F20099A1B6 /* WasmParser.h in Headers */,
531374BD1D5CE67600AF7A0B /* WasmPlan.h in Headers */,
wasm/WasmCallingConvention.cpp
wasm/WasmCodeBlock.cpp
wasm/WasmCompilationMode.cpp
+wasm/WasmContext.cpp
wasm/WasmEmbedder.h
wasm/WasmFaultSignalHandler.cpp
wasm/WasmFormat.cpp
wasm/WasmModuleInformation.cpp
wasm/WasmModuleParser.cpp
wasm/WasmNameSectionParser.cpp
+wasm/WasmOMGForOSREntryPlan.cpp
wasm/WasmOMGPlan.cpp
wasm/WasmOpcodeOrigin.cpp
+wasm/WasmOperations.cpp
wasm/WasmPageCount.cpp
wasm/WasmPlan.cpp
wasm/WasmSectionParser.cpp
wasm/WasmTable.cpp
wasm/WasmTable.h
wasm/WasmThunks.cpp
+wasm/WasmTierUpCount.cpp
wasm/WasmValidate.cpp
wasm/WasmWorklist.cpp
return Jump(makeBranch(cond));
}
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+ if (isUInt12(imm.m_value))
+ m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+ else if (isUInt12(-imm.m_value))
+ m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+ else {
+ move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ return Jump(makeBranch(cond));
+ }
+
Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
{
m_assembler.add<64, S>(dest, op1, op2);
// output.
class ValueRep {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- enum Kind {
+ enum Kind : uint8_t {
// As an input representation, this means that B3 can pick any representation. As an output
// representation, this means that we don't know. This will only arise as an output
// representation for the active arguments of Check/CheckAdd/CheckSub/CheckMul.
ValueRecovery recoveryForJSValue() const;
private:
- Kind m_kind;
union U {
Reg reg;
intptr_t offsetFromFP;
memset(static_cast<void*>(this), 0, sizeof(*this));
}
} u;
+ Kind m_kind;
};
} } // namespace JSC::B3
}
#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
- (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
+ do { \
+ if (codeBlock) \
+ (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; })); \
+ } while (0)
void setPrinter(Printer::PrintRecord&, CodeBlock*);
double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
{
-#if ENABLE(JIT)
- double multiplier =
- ExecutableAllocator::memoryPressureMultiplier(
- codeBlock->baselineAlternative()->predictedMachineCodeSize());
-#else
- // This code path will probably not be taken, but if it is, we fake it.
double multiplier = 1.0;
- UNUSED_PARAM(codeBlock);
+ if (codeBlock) {
+#if ENABLE(JIT)
+ multiplier =
+ ExecutableAllocator::memoryPressureMultiplier(
+ codeBlock->baselineAlternative()->predictedMachineCodeSize());
#endif
+ }
ASSERT(multiplier >= 1.0);
return multiplier * value;
}
return true;
}
- threshold = clippedThreshold(codeBlock->globalObject(), threshold);
+ threshold = clippedThreshold(codeBlock ? codeBlock->globalObject() : nullptr, threshold);
m_counter = static_cast<int32_t>(-threshold);
static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
{
int32_t maxThreshold;
- if (Options::randomizeExecutionCountsBetweenCheckpoints())
+ if (Options::randomizeExecutionCountsBetweenCheckpoints() && globalObject)
maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
else
maxThreshold = maximumExecutionCountsBetweenCheckpoints();
// are used by the JIT.
HashMap<unsigned, TriggerReason> tierUpEntryTriggers;
- // Set of bytecode that were the target of a TierUp operation.
- HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> tierUpEntrySeen;
-
WriteBarrier<CodeBlock> m_osrEntryBlock;
unsigned osrEntryRetry;
bool abandonOSREntry;
}
JITCode* jitCode = codeBlock->jitCode()->dfg();
- jitCode->tierUpEntrySeen.add(bytecodeIndex);
if (Options::verboseOSR()) {
dataLog(
}
#if CPU(X86_64) || CPU(X86)
- static size_t prologueStackPointerDelta()
+ static constexpr size_t prologueStackPointerDelta()
{
// Prologue only saves the framePointerRegister
return sizeof(void*);
#endif // CPU(X86_64) || CPU(X86)
#if CPU(ARM_THUMB2) || CPU(ARM64)
- static size_t prologueStackPointerDelta()
+ static constexpr size_t prologueStackPointerDelta()
{
// Prologue saves the framePointerRegister and linkRegister
return 2 * sizeof(void*);
#endif
#if CPU(MIPS)
- static size_t prologueStackPointerDelta()
+ static constexpr size_t prologueStackPointerDelta()
{
// Prologue saves the framePointerRegister and returnAddressRegister
return 2 * sizeof(void*);
v(unsigned, webAssemblyOMGOptimizationLevel, Options::defaultB3OptLevel(), Normal, "B3 Optimization level for OMG Web Assembly module compilations.") \
\
v(bool, useBBQTierUpChecks, true, Normal, "Enables tier up checks for our BBQ code.") \
- v(unsigned, webAssemblyOMGTierUpCount, 5000, Normal, "The countdown before we tier up a function to OMG.") \
- v(unsigned, webAssemblyLoopDecrement, 15, Normal, "The amount the tier up countdown is decremented on each loop backedge.") \
- v(unsigned, webAssemblyFunctionEntryDecrement, 1, Normal, "The amount the tier up countdown is decremented on each function entry.") \
+ v(bool, useWebAssemblyOSR, true, Normal, nullptr) \
+ v(int32, thresholdForOMGOptimizeAfterWarmUp, 50000, Normal, "The count before we tier up a function to OMG.") \
+ v(int32, thresholdForOMGOptimizeSoon, 500, Normal, nullptr) \
+ v(int32, omgTierUpCounterIncrementForLoop, 1, Normal, "The amount the tier up counter is incremented on each loop backedge.") \
+ v(int32, omgTierUpCounterIncrementForEntry, 15, Normal, "The amount the tier up counter is incremented on each function entry.") \
/* FIXME: enable fast memories on iOS and pre-allocate them. https://bugs.webkit.org/show_bug.cgi?id=170774 */ \
v(bool, useWebAssemblyFastMemory, !isIOS(), Normal, "If true, we will try to use a 32-bit address space with a signal handler to bounds check wasm memory.") \
v(bool, logWebAssemblyMemory, false, Normal, nullptr) \
#include "WasmInstance.h"
#include "WasmMemory.h"
#include "WasmOMGPlan.h"
+#include "WasmOSREntryData.h"
#include "WasmOpcodeOrigin.h"
+#include "WasmOperations.h"
#include "WasmSignatureInlines.h"
#include "WasmThunks.h"
#include <limits>
using ExpressionType = TypedTmp;
using ControlType = ControlData;
using ExpressionList = Vector<ExpressionType, 1>;
+ using Stack = ExpressionList;
using ResultList = ControlData::ResultList;
using ControlEntry = FunctionParser<AirIRGenerator>::ControlEntry;
static ExpressionType emptyExpression() { return { }; };
+ Stack createStack() { return Stack(); }
using ErrorType = String;
using UnexpectedResult = Unexpected<ErrorType>;
// Control flow
ControlData WARN_UNUSED_RETURN addTopLevel(Type signature);
ControlData WARN_UNUSED_RETURN addBlock(Type signature);
- ControlData WARN_UNUSED_RETURN addLoop(Type signature);
+ ControlData WARN_UNUSED_RETURN addLoop(Type signature, const Stack&, uint32_t loopIndex);
PartialResult WARN_UNUSED_RETURN addIf(ExpressionType condition, Type signature, ControlData& result);
- PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const ExpressionList&);
+ PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const Stack&);
PartialResult WARN_UNUSED_RETURN addElseToUnreachable(ControlData&);
PartialResult WARN_UNUSED_RETURN addReturn(const ControlData&, const ExpressionList& returnValues);
- PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const ExpressionList& returnValues);
- PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const ExpressionList& expressionStack);
- PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, ExpressionList& expressionStack);
+ PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const Stack& returnValues);
+ PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const Stack& expressionStack);
+ PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, Stack& expressionStack);
PartialResult WARN_UNUSED_RETURN addEndToUnreachable(ControlEntry&);
// Calls
PartialResult addFloatingPointAbs(B3::Air::Opcode, ExpressionType value, ExpressionType& result);
PartialResult addFloatingPointBinOp(Type, B3::Air::Opcode, ExpressionType lhs, ExpressionType rhs, ExpressionType& result);
- void dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack);
+ void dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack);
void setParser(FunctionParser<AirIRGenerator>* parser) { m_parser = parser; };
static Vector<Tmp> toTmpVector(const Vector<TypedTmp>& vector)
B3::Value* dummyValue = m_proc.addConstant(B3::Origin(), tmp.tmp.isGP() ? B3::Int64 : B3::Double, 0);
patch->append(dummyValue, tmp.rep);
switch (tmp.rep.kind()) {
+ case B3::ValueRep::ColdAny: // B3::Value propagates ColdAny information and later Air will allocate appropriate stack.
case B3::ValueRep::SomeRegister:
inst.args.append(tmp.tmp);
break;
void emitThrowException(CCallHelpers&, ExceptionType);
- void emitTierUpCheck(uint32_t decrementCount, B3::Origin);
+ void emitEntryTierUpCheck(int32_t incrementCount, B3::Origin);
+ void emitLoopTierUpCheck(int32_t incrementCount, const Stack&, uint32_t, uint32_t, B3::Origin);
void emitWriteBarrierForJSWrapper();
ExpressionType emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOp);
void emitStoreOp(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset);
void unify(const ExpressionType& dst, const ExpressionType& source);
- void unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& stack);
+ void unifyValuesWithBlock(const Stack& resultStack, const ResultList& stack);
template <typename IntType>
void emitChecksForModOrDiv(bool isSignedDiv, ExpressionType left, ExpressionType right);
B3::Origin origin();
+ uint32_t outerLoopIndex() const
+ {
+ if (m_outerLoops.isEmpty())
+ return UINT32_MAX;
+ return m_outerLoops.last();
+ }
+
FunctionParser<AirIRGenerator>* m_parser { nullptr };
const ModuleInformation& m_info;
const MemoryMode m_mode { MemoryMode::BoundsChecking };
const unsigned m_functionIndex { UINT_MAX };
- const TierUpCount* m_tierUp { nullptr };
+ TierUpCount* m_tierUp { nullptr };
B3::Procedure& m_proc;
Code& m_code;
+ Vector<uint32_t> m_outerLoops;
BasicBlock* m_currentBlock { nullptr };
BasicBlock* m_rootBlock { nullptr };
Vector<TypedTmp> m_locals;
}
});
- emitTierUpCheck(TierUpCount::functionEntryDecrement(), B3::Origin());
+ emitEntryTierUpCheck(TierUpCount::functionEntryIncrement(), B3::Origin());
}
void AirIRGenerator::restoreWebAssemblyGlobalState(RestoreCachedStackLimit restoreCachedStackLimit, const MemoryInformation& memory, TypedTmp instance, BasicBlock* block)
return { };
}
-void AirIRGenerator::emitTierUpCheck(uint32_t decrementCount, B3::Origin origin)
+void AirIRGenerator::emitEntryTierUpCheck(int32_t incrementCount, B3::Origin origin)
{
UNUSED_PARAM(origin);
return;
auto countdownPtr = g64();
- auto oldCountdown = g64();
- auto newCountdown = g64();
- append(Move, Arg::bigImm(reinterpret_cast<uint64_t>(m_tierUp)), countdownPtr);
- append(Move32, Arg::addr(countdownPtr), oldCountdown);
-
- RELEASE_ASSERT(Arg::isValidImmForm(decrementCount));
- append(Move32, oldCountdown, newCountdown);
- append(Sub32, Arg::imm(decrementCount), newCountdown);
- append(Move32, newCountdown, Arg::addr(countdownPtr));
+ append(Move, Arg::bigImm(reinterpret_cast<uint64_t>(&m_tierUp->m_counter)), countdownPtr);
auto* patch = addPatchpoint(B3::Void);
B3::Effects effects = B3::Effects::none();
effects.reads = B3::HeapRange::top();
effects.writes = B3::HeapRange::top();
patch->effects = effects;
+ patch->clobber(RegisterSet::macroScratchRegisters());
patch->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
- MacroAssembler::Jump tierUp = jit.branch32(MacroAssembler::Above, params[0].gpr(), params[1].gpr());
- MacroAssembler::Label tierUpResume = jit.label();
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(incrementCount), CCallHelpers::Address(params[0].gpr()));
+ CCallHelpers::Label tierUpResume = jit.label();
params.addLatePath([=] (CCallHelpers& jit) {
tierUp.link(&jit);
jit.jump(tierUpResume);
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
- MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGTierUpThunkGenerator).code()));
-
+ MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGEntryTierUpThunkGenerator).code()));
});
});
});
- emitPatchpoint(patch, Tmp(), newCountdown, oldCountdown);
+ emitPatchpoint(patch, Tmp(), countdownPtr);
+}
+
+void AirIRGenerator::emitLoopTierUpCheck(int32_t incrementCount, const Stack& expressionStack, uint32_t loopIndex, uint32_t outerLoopIndex, B3::Origin origin)
+{
+ UNUSED_PARAM(origin);
+
+ if (!m_tierUp)
+ return;
+
+ ASSERT(m_tierUp->osrEntryTriggers().size() == loopIndex);
+ m_tierUp->osrEntryTriggers().append(TierUpCount::TriggerReason::DontTrigger);
+ m_tierUp->outerLoops().append(outerLoopIndex);
+
+ auto countdownPtr = g64();
+
+ append(Move, Arg::bigImm(reinterpret_cast<uint64_t>(&m_tierUp->m_counter)), countdownPtr);
+
+ auto* patch = addPatchpoint(B3::Void);
+ B3::Effects effects = B3::Effects::none();
+ effects.reads = B3::HeapRange::top();
+ effects.writes = B3::HeapRange::top();
+ effects.exitsSideways = true;
+ patch->effects = effects;
+
+ patch->clobber(RegisterSet::macroScratchRegisters());
+ RegisterSet clobberLate;
+ clobberLate.add(GPRInfo::argumentGPR0);
+ patch->clobberLate(clobberLate);
+
+ Vector<ConstrainedTmp> patchArgs;
+ patchArgs.append(countdownPtr);
+
+ Vector<B3::Type> types;
+ for (auto& local : m_locals) {
+ patchArgs.append(ConstrainedTmp(local, B3::ValueRep::ColdAny));
+ types.append(toB3Type(local.type()));
+ }
+ for (auto& expression : expressionStack) {
+ patchArgs.append(ConstrainedTmp(expression, B3::ValueRep::ColdAny));
+ types.append(toB3Type(expression.type()));
+ }
+
+ TierUpCount::TriggerReason* forceEntryTrigger = &(m_tierUp->osrEntryTriggers().last());
+ static_assert(!static_cast<uint8_t>(TierUpCount::TriggerReason::DontTrigger), "the JIT code assumes non-zero means 'enter'");
+ static_assert(sizeof(TierUpCount::TriggerReason) == 1, "branchTest8 assumes this size");
+ patch->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CCallHelpers::Jump forceOSREntry = jit.branchTest8(CCallHelpers::NonZero, CCallHelpers::AbsoluteAddress(forceEntryTrigger));
+ CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(incrementCount), CCallHelpers::Address(params[0].gpr()));
+ MacroAssembler::Label tierUpResume = jit.label();
+
+ OSREntryData& osrEntryData = m_tierUp->addOSREntryData(m_functionIndex, loopIndex);
+ for (unsigned index = 0; index < types.size(); ++index)
+ osrEntryData.values().constructAndAppend(params[index + 1], types[index]);
+ OSREntryData* osrEntryDataPtr = &osrEntryData;
+
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ forceOSREntry.link(&jit);
+ tierUp.link(&jit);
+
+ jit.probe(triggerOSREntryNow, osrEntryDataPtr);
+ jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::argumentGPR0).linkTo(tierUpResume, &jit);
+ jit.farJump(GPRInfo::argumentGPR1, WasmEntryPtrTag);
+ });
+ });
+
+ emitPatchpoint(patch, Tmp(), WTFMove(patchArgs));
}
-AirIRGenerator::ControlData AirIRGenerator::addLoop(Type signature)
+AirIRGenerator::ControlData AirIRGenerator::addLoop(Type signature, const Stack& expressionStack, uint32_t loopIndex)
{
BasicBlock* body = m_code.addBlock();
BasicBlock* continuation = m_code.addBlock();
append(Jump);
m_currentBlock->setSuccessors(body);
+ uint32_t outerLoopIndex = this->outerLoopIndex();
+ m_outerLoops.append(loopIndex);
m_currentBlock = body;
- emitTierUpCheck(TierUpCount::loopDecrement(), origin());
+ emitLoopTierUpCheck(TierUpCount::loopIncrement(), expressionStack, loopIndex, outerLoopIndex, origin());
return ControlData(origin(), signature, tmpForType(signature), BlockType::Loop, continuation, body);
}
return { };
}
-auto AirIRGenerator::addElse(ControlData& data, const ExpressionList& currentStack) -> PartialResult
+auto AirIRGenerator::addElse(ControlData& data, const Stack& currentStack) -> PartialResult
{
unifyValuesWithBlock(currentStack, data.result);
append(Jump);
// NOTE: All branches in Wasm are on 32-bit ints
-auto AirIRGenerator::addBranch(ControlData& data, ExpressionType condition, const ExpressionList& returnValues) -> PartialResult
+auto AirIRGenerator::addBranch(ControlData& data, ExpressionType condition, const Stack& returnValues) -> PartialResult
{
unifyValuesWithBlock(returnValues, data.resultForBranch());
return { };
}
-auto AirIRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const ExpressionList& expressionStack) -> PartialResult
+auto AirIRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const Stack& expressionStack) -> PartialResult
{
auto& successors = m_currentBlock->successors();
ASSERT(successors.isEmpty());
return { };
}
-auto AirIRGenerator::endBlock(ControlEntry& entry, ExpressionList& expressionStack) -> PartialResult
+auto AirIRGenerator::endBlock(ControlEntry& entry, Stack& expressionStack) -> PartialResult
{
ControlData& data = entry.controlData;
data.special->setSuccessors(m_currentBlock);
}
+ if (data.type() == BlockType::Loop)
+ m_outerLoops.removeLast();
+
for (const auto& result : data.result)
entry.enclosedExpressionStack.append(result);
append(moveOpForValueType(dst.type()), source, dst);
}
-void AirIRGenerator::unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& result)
+void AirIRGenerator::unifyValuesWithBlock(const Stack& resultStack, const ResultList& result)
{
ASSERT(result.size() <= resultStack.size());
unify(result[result.size() - 1 - i], resultStack[resultStack.size() - 1 - i]);
}
-void AirIRGenerator::dump(const Vector<ControlEntry>&, const ExpressionList*)
+void AirIRGenerator::dump(const Vector<ControlEntry>&, const Stack*)
{
}
#include "WasmInstance.h"
#include "WasmMemory.h"
#include "WasmOMGPlan.h"
+#include "WasmOSREntryData.h"
#include "WasmOpcodeOrigin.h"
+#include "WasmOperations.h"
#include "WasmSignatureInlines.h"
#include "WasmThunks.h"
#include <limits>
};
typedef Value* ExpressionType;
- typedef ControlData ControlType;
typedef Vector<ExpressionType, 1> ExpressionList;
- typedef ControlData::ResultList ResultList;
- typedef FunctionParser<B3IRGenerator>::ControlEntry ControlEntry;
+
+ friend class Stack;
+ class Stack {
+ public:
+ Stack(B3IRGenerator* generator)
+ : m_generator(generator)
+ {
+ }
+
+ void append(ExpressionType expression)
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode) {
+ Variable* variable = m_generator->m_proc.addVariable(expression->type());
+ m_generator->m_currentBlock->appendNew<VariableValue>(m_generator->m_proc, Set, m_generator->origin(), variable, expression);
+ m_stack.append(variable);
+ return;
+ }
+ m_data.append(expression);
+ }
+
+ ExpressionType takeLast()
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ return m_generator->m_currentBlock->appendNew<VariableValue>(m_generator->m_proc, B3::Get, m_generator->origin(), m_stack.takeLast());
+ return m_data.takeLast();
+ }
+
+ ExpressionType last()
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ return m_generator->m_currentBlock->appendNew<VariableValue>(m_generator->m_proc, B3::Get, m_generator->origin(), m_stack.last());
+ return m_data.last();
+ }
+
+ unsigned size() const
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ return m_stack.size();
+ return m_data.size();
+ }
+ bool isEmpty() const { return size() == 0; }
+
+ ExpressionList convertToExpressionList()
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode) {
+ ExpressionList results;
+ for (unsigned i = 0; i < m_stack.size(); ++i)
+ results.append(at(i));
+ return results;
+ }
+ return m_data;
+ }
+
+ ExpressionType at(unsigned i) const
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ return m_generator->m_currentBlock->appendNew<VariableValue>(m_generator->m_proc, B3::Get, m_generator->origin(), m_stack.at(i));
+ return m_data.at(i);
+ }
+
+ Variable* variableAt(unsigned i) const
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ return m_stack.at(i);
+ return nullptr;
+ }
+
+ void shrink(unsigned i)
+ {
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode) {
+ m_stack.shrink(i);
+ return;
+ }
+ m_data.shrink(i);
+ }
+
+ void swap(Stack& stack)
+ {
+ std::swap(m_generator, stack.m_generator);
+ m_data.swap(stack.m_data);
+ m_stack.swap(stack.m_stack);
+ }
+
+ void dump() const
+ {
+ CommaPrinter comma(", ", "");
+ dataLog(comma, "ExpressionStack:");
+ if (m_generator->m_compilationMode == CompilationMode::OMGForOSREntryMode) {
+ for (const auto& variable : m_stack)
+ dataLog(comma, *variable);
+ return;
+ }
+ for (const auto& expression : m_data)
+ dataLog(comma, *expression);
+ }
+
+ private:
+ B3IRGenerator* m_generator { nullptr };
+ ExpressionList m_data;
+ Vector<Variable*> m_stack;
+ };
+ Stack createStack() { return Stack(this); }
+
+ using ControlType = ControlData;
+ using ResultList = ControlData::ResultList;
+ using ControlEntry = FunctionParser<B3IRGenerator>::ControlEntry;
static constexpr ExpressionType emptyExpression() { return nullptr; }
return fail(__VA_ARGS__); \
} while (0)
- B3IRGenerator(const ModuleInformation&, Procedure&, InternalFunction*, Vector<UnlinkedWasmToWasmCall>&, MemoryMode, CompilationMode, unsigned functionIndex, TierUpCount*, ThrowWasmException);
+ B3IRGenerator(const ModuleInformation&, Procedure&, InternalFunction*, Vector<UnlinkedWasmToWasmCall>&, unsigned& osrEntryScratchBufferSize, MemoryMode, CompilationMode, unsigned functionIndex, unsigned loopIndexForOSREntry, TierUpCount*, ThrowWasmException);
PartialResult WARN_UNUSED_RETURN addArguments(const Signature&);
PartialResult WARN_UNUSED_RETURN addLocal(Type, uint32_t);
// Control flow
ControlData WARN_UNUSED_RETURN addTopLevel(Type signature);
ControlData WARN_UNUSED_RETURN addBlock(Type signature);
- ControlData WARN_UNUSED_RETURN addLoop(Type signature);
+ ControlData WARN_UNUSED_RETURN addLoop(Type signature, const Stack&, uint32_t);
PartialResult WARN_UNUSED_RETURN addIf(ExpressionType condition, Type signature, ControlData& result);
- PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const ExpressionList&);
+ PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const Stack&);
PartialResult WARN_UNUSED_RETURN addElseToUnreachable(ControlData&);
PartialResult WARN_UNUSED_RETURN addReturn(const ControlData&, const ExpressionList& returnValues);
- PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const ExpressionList& returnValues);
- PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const ExpressionList& expressionStack);
- PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, ExpressionList& expressionStack);
+ PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const Stack& returnValues);
+ PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const Stack& expressionStack);
+ PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, Stack& expressionStack);
PartialResult WARN_UNUSED_RETURN addEndToUnreachable(ControlEntry&);
// Calls
PartialResult WARN_UNUSED_RETURN addCallIndirect(unsigned tableIndex, const Signature&, Vector<ExpressionType>& args, ExpressionType& result);
PartialResult WARN_UNUSED_RETURN addUnreachable();
- void dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack);
+ void dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack);
void setParser(FunctionParser<B3IRGenerator>* parser) { m_parser = parser; };
Value* constant(B3::Type, uint64_t bits, Optional<Origin> = WTF::nullopt);
private:
void emitExceptionCheck(CCallHelpers&, ExceptionType);
- void emitTierUpCheck(uint32_t decrementCount, Origin);
+ void emitEntryTierUpCheck(int32_t incrementCount, B3::Origin);
+ void emitLoopTierUpCheck(int32_t incrementCount, const Stack&, uint32_t, uint32_t, B3::Origin);
void emitWriteBarrierForJSWrapper();
ExpressionType emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOp);
void emitStoreOp(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset);
void unify(const ExpressionType phi, const ExpressionType source);
- void unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& stack);
+ void unifyValuesWithBlock(const Stack& resultStack, const ResultList& stack);
void emitChecksForModOrDiv(B3::Opcode, ExpressionType left, ExpressionType right);
Origin origin();
+ uint32_t outerLoopIndex() const
+ {
+ if (m_outerLoops.isEmpty())
+ return UINT32_MAX;
+ return m_outerLoops.last();
+ }
+
FunctionParser<B3IRGenerator>* m_parser { nullptr };
const ModuleInformation& m_info;
const MemoryMode m_mode { MemoryMode::BoundsChecking };
const CompilationMode m_compilationMode { CompilationMode::BBQMode };
const unsigned m_functionIndex { UINT_MAX };
- const TierUpCount* m_tierUp { nullptr };
+ const unsigned m_loopIndexForOSREntry { UINT_MAX };
+ TierUpCount* m_tierUp { nullptr };
Procedure& m_proc;
+ BasicBlock* m_rootBlock { nullptr };
BasicBlock* m_currentBlock { nullptr };
+ Vector<uint32_t> m_outerLoops;
Vector<Variable*> m_locals;
Vector<UnlinkedWasmToWasmCall>& m_unlinkedWasmToWasmCalls; // List each call site and the function index whose address it should be patched with.
+ unsigned& m_osrEntryScratchBufferSize;
HashMap<ValueKey, Value*> m_constantPool;
InsertionSet m_constantInsertionValues;
GPRReg m_memoryBaseGPR { InvalidGPRReg };
});
}
-B3IRGenerator::B3IRGenerator(const ModuleInformation& info, Procedure& procedure, InternalFunction* compilation, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, MemoryMode mode, CompilationMode compilationMode, unsigned functionIndex, TierUpCount* tierUp, ThrowWasmException throwWasmException)
+B3IRGenerator::B3IRGenerator(const ModuleInformation& info, Procedure& procedure, InternalFunction* compilation, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, unsigned& osrEntryScratchBufferSize, MemoryMode mode, CompilationMode compilationMode, unsigned functionIndex, unsigned loopIndexForOSREntry, TierUpCount* tierUp, ThrowWasmException throwWasmException)
: m_info(info)
, m_mode(mode)
, m_compilationMode(compilationMode)
, m_functionIndex(functionIndex)
+ , m_loopIndexForOSREntry(loopIndexForOSREntry)
, m_tierUp(tierUp)
, m_proc(procedure)
, m_unlinkedWasmToWasmCalls(unlinkedWasmToWasmCalls)
+ , m_osrEntryScratchBufferSize(osrEntryScratchBufferSize)
, m_constantInsertionValues(m_proc)
, m_numImportFunctions(info.importFunctionCount())
{
- m_currentBlock = m_proc.addBlock();
+ m_rootBlock = m_proc.addBlock();
+ m_currentBlock = m_rootBlock;
// FIXME we don't really need to pin registers here if there's no memory. It makes wasm -> wasm thunks simpler for now. https://bugs.webkit.org/show_bug.cgi?id=166623
const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get();
});
}
- emitTierUpCheck(TierUpCount::functionEntryDecrement(), Origin());
+ emitEntryTierUpCheck(TierUpCount::functionEntryIncrement(), Origin());
+
+ if (m_compilationMode == CompilationMode::OMGForOSREntryMode)
+ m_currentBlock = m_proc.addBlock();
}
void B3IRGenerator::restoreWebAssemblyGlobalState(RestoreCachedStackLimit restoreCachedStackLimit, const MemoryInformation& memory, Value* instance, Procedure& proc, BasicBlock* block)
return constant(toB3Type(type), value);
}
-void B3IRGenerator::emitTierUpCheck(uint32_t decrementCount, Origin origin)
+void B3IRGenerator::emitEntryTierUpCheck(int32_t incrementCount, Origin origin)
{
if (!m_tierUp)
return;
ASSERT(m_tierUp);
- Value* countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(m_tierUp), origin);
- Value* oldCountDown = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin, countDownLocation);
- Value* newCountDown = m_currentBlock->appendNew<Value>(m_proc, Sub, origin, oldCountDown, constant(Int32, decrementCount, origin));
- m_currentBlock->appendNew<MemoryValue>(m_proc, Store, origin, newCountDown, countDownLocation);
+ Value* countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(&m_tierUp->m_counter), origin);
PatchpointValue* patch = m_currentBlock->appendNew<PatchpointValue>(m_proc, B3::Void, origin);
Effects effects = Effects::none();
effects.reads = B3::HeapRange::top();
effects.writes = B3::HeapRange::top();
patch->effects = effects;
+ patch->clobber(RegisterSet::macroScratchRegisters());
- patch->append(newCountDown, ValueRep::SomeRegister);
- patch->append(oldCountDown, ValueRep::SomeRegister);
+ patch->append(countDownLocation, ValueRep::SomeRegister);
patch->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
- MacroAssembler::Jump tierUp = jit.branch32(MacroAssembler::Above, params[0].gpr(), params[1].gpr());
- MacroAssembler::Label tierUpResume = jit.label();
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(incrementCount), CCallHelpers::Address(params[0].gpr()));
+ CCallHelpers::Label tierUpResume = jit.label();
params.addLatePath([=] (CCallHelpers& jit) {
tierUp.link(&jit);
jit.jump(tierUpResume);
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
- MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGTierUpThunkGenerator).code()));
-
+ MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGEntryTierUpThunkGenerator).code()));
});
});
});
}
-B3IRGenerator::ControlData B3IRGenerator::addLoop(Type signature)
+void B3IRGenerator::emitLoopTierUpCheck(int32_t incrementCount, const Stack& expressionStack, uint32_t loopIndex, uint32_t outerLoopIndex, B3::Origin origin)
+{
+ if (!m_tierUp)
+ return;
+
+ ASSERT(m_tierUp);
+
+ ASSERT(m_tierUp->osrEntryTriggers().size() == loopIndex);
+ m_tierUp->osrEntryTriggers().append(TierUpCount::TriggerReason::DontTrigger);
+ m_tierUp->outerLoops().append(outerLoopIndex);
+
+ Value* countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(&m_tierUp->m_counter), origin);
+
+ Vector<ExpressionType> stackmap;
+ Vector<B3::Type> types;
+ for (auto& local : m_locals) {
+ ExpressionType result = m_currentBlock->appendNew<VariableValue>(m_proc, B3::Get, origin, local);
+ stackmap.append(result);
+ types.append(result->type());
+ }
+ for (unsigned i = 0; i < expressionStack.size(); ++i) {
+ ExpressionType result = expressionStack.at(i);
+ stackmap.append(result);
+ types.append(result->type());
+ }
+
+ PatchpointValue* patch = m_currentBlock->appendNew<PatchpointValue>(m_proc, B3::Void, origin);
+ Effects effects = Effects::none();
+ // FIXME: we should have a more precise heap range for the tier up count.
+ effects.reads = B3::HeapRange::top();
+ effects.writes = B3::HeapRange::top();
+ effects.exitsSideways = true;
+ patch->effects = effects;
+
+ patch->clobber(RegisterSet::macroScratchRegisters());
+ RegisterSet clobberLate;
+ clobberLate.add(GPRInfo::argumentGPR0);
+ patch->clobberLate(clobberLate);
+
+ patch->append(countDownLocation, ValueRep::SomeRegister);
+ patch->appendVectorWithRep(stackmap, ValueRep::ColdAny);
+
+ TierUpCount::TriggerReason* forceEntryTrigger = &(m_tierUp->osrEntryTriggers().last());
+ static_assert(!static_cast<uint8_t>(TierUpCount::TriggerReason::DontTrigger), "the JIT code assumes non-zero means 'enter'");
+ static_assert(sizeof(TierUpCount::TriggerReason) == 1, "branchTest8 assumes this size");
+ patch->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CCallHelpers::Jump forceOSREntry = jit.branchTest8(CCallHelpers::NonZero, CCallHelpers::AbsoluteAddress(forceEntryTrigger));
+ CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(incrementCount), CCallHelpers::Address(params[0].gpr()));
+ MacroAssembler::Label tierUpResume = jit.label();
+
+ OSREntryData& osrEntryData = m_tierUp->addOSREntryData(m_functionIndex, loopIndex);
+ for (unsigned index = 0; index < types.size(); ++index)
+ osrEntryData.values().constructAndAppend(params[index + 1], types[index]);
+ OSREntryData* osrEntryDataPtr = &osrEntryData;
+
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ forceOSREntry.link(&jit);
+ tierUp.link(&jit);
+
+ jit.probe(triggerOSREntryNow, osrEntryDataPtr);
+ jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::argumentGPR0).linkTo(tierUpResume, &jit);
+ jit.farJump(GPRInfo::argumentGPR1, WasmEntryPtrTag);
+ });
+ });
+}
+
+B3IRGenerator::ControlData B3IRGenerator::addLoop(Type signature, const Stack& stack, uint32_t loopIndex)
{
BasicBlock* body = m_proc.addBlock();
BasicBlock* continuation = m_proc.addBlock();
m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), body);
+ if (loopIndex == m_loopIndexForOSREntry) {
+ m_currentBlock = m_rootBlock;
+ m_osrEntryScratchBufferSize = m_locals.size() + stack.size();
+ Value* pointer = m_rootBlock->appendNew<ArgumentRegValue>(m_proc, Origin(), GPRInfo::argumentGPR0);
+
+ auto loadFromScratchBuffer = [&] (B3::Type type, unsigned index) {
+ size_t offset = sizeof(uint64_t) * index;
+ switch (type.kind()) {
+ case B3::Int32:
+ return m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), pointer, offset);
+ case B3::Int64:
+ return m_currentBlock->appendNew<MemoryValue>(m_proc, Load, B3::Int64, origin(), pointer, offset);
+ case B3::Float:
+ return m_currentBlock->appendNew<MemoryValue>(m_proc, Load, B3::Float, origin(), pointer, offset);
+ case B3::Double:
+ return m_currentBlock->appendNew<MemoryValue>(m_proc, Load, B3::Double, origin(), pointer, offset);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ };
+
+ unsigned indexInBuffer = 0;
+ for (auto& local : m_locals)
+ m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), local, loadFromScratchBuffer(local->type(), indexInBuffer++));
+ for (unsigned i = 0; i < stack.size(); ++i) {
+ auto* variable = stack.variableAt(i);
+ m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), variable, loadFromScratchBuffer(variable->type(), indexInBuffer++));
+ }
+ m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), body);
+ body->addPredecessor(m_currentBlock);
+ }
+ uint32_t outerLoopIndex = this->outerLoopIndex();
+ m_outerLoops.append(loopIndex);
m_currentBlock = body;
- emitTierUpCheck(TierUpCount::loopDecrement(), origin());
+ emitLoopTierUpCheck(TierUpCount::loopIncrement(), stack, loopIndex, outerLoopIndex, origin());
return ControlData(m_proc, origin(), signature, BlockType::Loop, continuation, body);
}
return { };
}
-auto B3IRGenerator::addElse(ControlData& data, const ExpressionList& currentStack) -> PartialResult
+auto B3IRGenerator::addElse(ControlData& data, const Stack& currentStack) -> PartialResult
{
unifyValuesWithBlock(currentStack, data.result);
m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation);
return { };
}
-auto B3IRGenerator::addBranch(ControlData& data, ExpressionType condition, const ExpressionList& returnValues) -> PartialResult
+auto B3IRGenerator::addBranch(ControlData& data, ExpressionType condition, const Stack& returnValues) -> PartialResult
{
unifyValuesWithBlock(returnValues, data.resultForBranch());
return { };
}
-auto B3IRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const ExpressionList& expressionStack) -> PartialResult
+auto B3IRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const Stack& expressionStack) -> PartialResult
{
for (size_t i = 0; i < targets.size(); ++i)
unifyValuesWithBlock(expressionStack, targets[i]->resultForBranch());
return { };
}
-auto B3IRGenerator::endBlock(ControlEntry& entry, ExpressionList& expressionStack) -> PartialResult
+auto B3IRGenerator::endBlock(ControlEntry& entry, Stack& expressionStack) -> PartialResult
{
ControlData& data = entry.controlData;
m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation);
data.continuation->addPredecessor(m_currentBlock);
+ if (data.type() == BlockType::Loop)
+ m_outerLoops.removeLast();
+
return addEndToUnreachable(entry);
}
// TopLevel does not have any code after this so we need to make sure we emit a return here.
if (data.type() == BlockType::TopLevel)
- return addReturn(entry.controlData, entry.enclosedExpressionStack);
+ return addReturn(entry.controlData, entry.enclosedExpressionStack.convertToExpressionList());
return { };
}
m_currentBlock->appendNew<UpsilonValue>(m_proc, origin(), source, phi);
}
-void B3IRGenerator::unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& result)
+void B3IRGenerator::unifyValuesWithBlock(const Stack& resultStack, const ResultList& result)
{
ASSERT(result.size() <= resultStack.size());
for (size_t i = 0; i < result.size(); ++i)
- unify(result[result.size() - 1 - i], resultStack[resultStack.size() - 1 - i]);
-}
-
-static void dumpExpressionStack(const CommaPrinter& comma, const B3IRGenerator::ExpressionList& expressionStack)
-{
- dataLog(comma, "ExpressionStack:");
- for (const auto& expression : expressionStack)
- dataLog(comma, *expression);
+ unify(result[result.size() - 1 - i], resultStack.at(resultStack.size() - 1 - i));
}
-void B3IRGenerator::dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack)
+void B3IRGenerator::dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack)
{
dataLogLn("Constants:");
for (const auto& constant : m_constantPool)
ASSERT(controlStack.size());
for (size_t i = controlStack.size(); i--;) {
dataLog(" ", controlStack[i].controlData, ": ");
- CommaPrinter comma(", ", "");
- dumpExpressionStack(comma, *expressionStack);
+ expressionStack->dump();
expressionStack = &controlStack[i].enclosedExpressionStack;
dataLogLn();
}
return bitwise_cast<Origin>(origin);
}
-Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext& compilationContext, const uint8_t* functionStart, size_t functionLength, const Signature& signature, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, const ModuleInformation& info, MemoryMode mode, CompilationMode compilationMode, uint32_t functionIndex, TierUpCount* tierUp, ThrowWasmException throwWasmException)
+Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext& compilationContext, const uint8_t* functionStart, size_t functionLength, const Signature& signature, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, unsigned& osrEntryScratchBufferSize, const ModuleInformation& info, MemoryMode mode, CompilationMode compilationMode, uint32_t functionIndex, uint32_t loopIndexForOSREntry, TierUpCount* tierUp, ThrowWasmException throwWasmException)
{
auto result = makeUnique<InternalFunction>();
? Options::webAssemblyBBQB3OptimizationLevel()
: Options::webAssemblyOMGOptimizationLevel());
- B3IRGenerator irGenerator(info, procedure, result.get(), unlinkedWasmToWasmCalls, mode, compilationMode, functionIndex, tierUp, throwWasmException);
+ B3IRGenerator irGenerator(info, procedure, result.get(), unlinkedWasmToWasmCalls, osrEntryScratchBufferSize, mode, compilationMode, functionIndex, loopIndexForOSREntry, tierUp, throwWasmException);
FunctionParser<B3IRGenerator> parser(irGenerator, functionStart, functionLength, signature, info);
WASM_FAIL_IF_HELPER_FAILS(parser.parse());
std::unique_ptr<B3::OpaqueByproducts> wasmEntrypointByproducts;
};
-Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext&, const uint8_t*, size_t, const Signature&, Vector<UnlinkedWasmToWasmCall>&, const ModuleInformation&, MemoryMode, CompilationMode, uint32_t functionIndex, TierUpCount* = nullptr, ThrowWasmException = nullptr);
+Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext&, const uint8_t*, size_t, const Signature&, Vector<UnlinkedWasmToWasmCall>&, unsigned& osrEntryScratchBufferSize, const ModuleInformation&, MemoryMode, CompilationMode, uint32_t functionIndex, uint32_t loopIndexForOSREntry, TierUpCount* = nullptr, ThrowWasmException = nullptr);
} } // namespace JSC::Wasm
ASSERT(validateFunction(function.data.data(), function.data.size(), signature, m_moduleInformation.get()));
m_unlinkedWasmToWasmCalls[functionIndex] = Vector<UnlinkedWasmToWasmCall>();
- TierUpCount* tierUp = Options::useBBQTierUpChecks() ? &m_tierUpCounts[functionIndex] : nullptr;
+ if (Options::useBBQTierUpChecks())
+ m_tierUpCounts[functionIndex] = makeUnique<TierUpCount>();
+ else
+ m_tierUpCounts[functionIndex] = nullptr;
+ TierUpCount* tierUp = m_tierUpCounts[functionIndex].get();
Expected<std::unique_ptr<InternalFunction>, String> parseAndCompileResult;
+ unsigned osrEntryScratchBufferSize = 0;
// FIXME: Some webpages use very large Wasm module, and it exhausts all executable memory in ARM64 devices since the size of executable memory region is only limited to 128MB.
// The long term solution should be to introduce a Wasm interpreter. But as a short term solution, we introduce heuristics to switch back to BBQ B3 at the sacrifice of start-up time,
if (!forceUsingB3 && Options::wasmBBQUsesAir())
parseAndCompileResult = parseAndCompileAir(m_compilationContexts[functionIndex], function.data.data(), function.data.size(), signature, m_unlinkedWasmToWasmCalls[functionIndex], m_moduleInformation.get(), m_mode, functionIndex, tierUp, m_throwWasmException);
else
- parseAndCompileResult = parseAndCompile(m_compilationContexts[functionIndex], function.data.data(), function.data.size(), signature, m_unlinkedWasmToWasmCalls[functionIndex], m_moduleInformation.get(), m_mode, CompilationMode::BBQMode, functionIndex, tierUp, m_throwWasmException);
+ parseAndCompileResult = parseAndCompile(m_compilationContexts[functionIndex], function.data.data(), function.data.size(), signature, m_unlinkedWasmToWasmCalls[functionIndex], osrEntryScratchBufferSize, m_moduleInformation.get(), m_mode, CompilationMode::BBQMode, functionIndex, UINT32_MAX, tierUp, m_throwWasmException);
if (UNLIKELY(!parseAndCompileResult)) {
auto locker = holdLock(m_lock);
return WTFMove(m_unlinkedWasmToWasmCalls);
}
- Vector<TierUpCount> takeTierUpCounts()
- {
- RELEASE_ASSERT(!failed() && !hasWork());
- return WTFMove(m_tierUpCounts);
- }
-
enum class State : uint8_t {
Initial,
Validated,
HashSet<uint32_t, typename DefaultHash<uint32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<uint32_t>> m_exportedFunctionIndices;
HashMap<uint32_t, std::unique_ptr<InternalFunction>, typename DefaultHash<uint32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<uint32_t>> m_embedderToWasmInternalFunctions;
Vector<CompilationContext> m_compilationContexts;
- Vector<TierUpCount> m_tierUpCounts;
+ Vector<std::unique_ptr<TierUpCount>> m_tierUpCounts;
Vector<Vector<UnlinkedWasmToWasmCall>> m_unlinkedWasmToWasmCalls;
State m_state;
RefPtr<Wasm::Callee> embedderEntrypointCallee;
if (auto embedderToWasmFunction = m_embedderToWasmInternalFunctions.get(internalFunctionIndex)) {
- embedderEntrypointCallee = Wasm::Callee::create(CompilationMode::BBQMode, WTFMove(embedderToWasmFunction->entrypoint));
+ embedderEntrypointCallee = Wasm::EmbedderEntrypointCallee::create(WTFMove(embedderToWasmFunction->entrypoint));
MacroAssembler::repatchPointer(embedderToWasmFunction->calleeMoveLocation, CalleeBits::boxWasm(embedderEntrypointCallee.get()));
}
InternalFunction* function = m_wasmInternalFunctions[internalFunctionIndex].get();
size_t functionIndexSpace = internalFunctionIndex + m_moduleInformation->importFunctionCount();
- Ref<Wasm::Callee> wasmEntrypointCallee = Wasm::Callee::create(CompilationMode::BBQMode, WTFMove(function->entrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace));
+ Ref<Wasm::Callee> wasmEntrypointCallee = Wasm::BBQCallee::create(WTFMove(function->entrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace), WTFMove(m_tierUpCounts[internalFunctionIndex]));
MacroAssembler::repatchPointer(function->calleeMoveLocation, CalleeBits::boxWasm(wasmEntrypointCallee.ptr()));
callback(internalFunctionIndex, WTFMove(embedderEntrypointCallee), WTFMove(wasmEntrypointCallee));
#include "WasmCompilationMode.h"
#include "WasmFormat.h"
#include "WasmIndexOrName.h"
+#include "WasmTierUpCount.h"
#include <wtf/ThreadSafeRefCounted.h>
namespace JSC { namespace Wasm {
-class Callee final : public ThreadSafeRefCounted<Callee> {
+class Callee : public ThreadSafeRefCounted<Callee> {
WTF_MAKE_FAST_ALLOCATED;
public:
- static Ref<Callee> create(Wasm::CompilationMode compilationMode, Wasm::Entrypoint&& entrypoint)
- {
- Callee* callee = new Callee(compilationMode, WTFMove(entrypoint));
- return adoptRef(*callee);
- }
-
- static Ref<Callee> create(Wasm::CompilationMode compilationMode, Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name)
- {
- Callee* callee = new Callee(compilationMode, WTFMove(entrypoint), index, WTFMove(name));
- return adoptRef(*callee);
- }
-
MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint() const { return m_entrypoint.compilation->code().retagged<WasmEntryPtrTag>(); }
RegisterAtOffsetList* calleeSaveRegisters() { return &m_entrypoint.calleeSaveRegisters; }
return { start, end };
}
- JS_EXPORT_PRIVATE ~Callee();
+ JS_EXPORT_PRIVATE virtual ~Callee();
-private:
+protected:
JS_EXPORT_PRIVATE Callee(Wasm::CompilationMode, Wasm::Entrypoint&&);
JS_EXPORT_PRIVATE Callee(Wasm::CompilationMode, Wasm::Entrypoint&&, size_t, std::pair<const Name*, RefPtr<NameSection>>&&);
+private:
CompilationMode m_compilationMode;
Wasm::Entrypoint m_entrypoint;
IndexOrName m_indexOrName;
};
+class OMGCallee final : public Callee {
+public:
+ static Ref<OMGCallee> create(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, Vector<UnlinkedWasmToWasmCall>&& unlinkedCalls)
+ {
+ return adoptRef(*new OMGCallee(WTFMove(entrypoint), index, WTFMove(name), WTFMove(unlinkedCalls)));
+ }
+
+ Vector<UnlinkedWasmToWasmCall>& wasmToWasmCallsites() { return m_wasmToWasmCallsites; }
+
+private:
+ OMGCallee(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, Vector<UnlinkedWasmToWasmCall>&& unlinkedCalls)
+ : Callee(Wasm::CompilationMode::OMGMode, WTFMove(entrypoint), index, WTFMove(name))
+ , m_wasmToWasmCallsites(WTFMove(unlinkedCalls))
+ {
+ }
+
+ Vector<UnlinkedWasmToWasmCall> m_wasmToWasmCallsites;
+};
+
+class OMGForOSREntryCallee final : public Callee {
+public:
+ static Ref<OMGForOSREntryCallee> create(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, unsigned osrEntryScratchBufferSize, uint32_t loopIndex, Vector<UnlinkedWasmToWasmCall>&& unlinkedCalls)
+ {
+ return adoptRef(*new OMGForOSREntryCallee(WTFMove(entrypoint), index, WTFMove(name), osrEntryScratchBufferSize, loopIndex, WTFMove(unlinkedCalls)));
+ }
+
+ unsigned osrEntryScratchBufferSize() const { return m_osrEntryScratchBufferSize; }
+ uint32_t loopIndex() const { return m_loopIndex; }
+ Vector<UnlinkedWasmToWasmCall>& wasmToWasmCallsites() { return m_wasmToWasmCallsites; }
+
+private:
+ OMGForOSREntryCallee(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, unsigned osrEntryScratchBufferSize, uint32_t loopIndex, Vector<UnlinkedWasmToWasmCall>&& unlinkedCalls)
+ : Callee(Wasm::CompilationMode::OMGForOSREntryMode, WTFMove(entrypoint), index, WTFMove(name))
+ , m_wasmToWasmCallsites(WTFMove(unlinkedCalls))
+ , m_osrEntryScratchBufferSize(osrEntryScratchBufferSize)
+ , m_loopIndex(loopIndex)
+ {
+ }
+
+ Vector<UnlinkedWasmToWasmCall> m_wasmToWasmCallsites;
+ unsigned m_osrEntryScratchBufferSize;
+ uint32_t m_loopIndex;
+};
+
+class EmbedderEntrypointCallee final : public Callee {
+public:
+ static Ref<EmbedderEntrypointCallee> create(Wasm::Entrypoint&& entrypoint)
+ {
+ return adoptRef(*new EmbedderEntrypointCallee(WTFMove(entrypoint)));
+ }
+
+private:
+ EmbedderEntrypointCallee(Wasm::Entrypoint&& entrypoint)
+ : Callee(Wasm::CompilationMode::EmbedderEntrypointMode, WTFMove(entrypoint))
+ {
+ }
+};
+
+class BBQCallee final : public Callee {
+public:
+ static Ref<BBQCallee> create(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, std::unique_ptr<TierUpCount>&& tierUpCount)
+ {
+ return adoptRef(*new BBQCallee(WTFMove(entrypoint), index, WTFMove(name), WTFMove(tierUpCount)));
+ }
+
+ OMGForOSREntryCallee* osrEntryCallee() { return m_osrEntryCallee.get(); }
+ void setOSREntryCallee(Ref<OMGForOSREntryCallee>&& osrEntryCallee)
+ {
+ m_osrEntryCallee = WTFMove(osrEntryCallee);
+ }
+
+ bool didStartCompilingOSREntryCallee() const { return m_didStartCompilingOSREntryCallee; }
+ void setDidStartCompilingOSREntryCallee(bool value) { m_didStartCompilingOSREntryCallee = value; }
+
+ OMGCallee* replacement() { return m_replacement.get(); }
+ void setReplacement(Ref<OMGCallee>&& replacement)
+ {
+ m_replacement = WTFMove(replacement);
+ }
+
+ TierUpCount* tierUpCount() { return m_tierUpCount.get(); }
+
+private:
+ BBQCallee(Wasm::Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name, std::unique_ptr<TierUpCount>&& tierUpCount)
+ : Callee(Wasm::CompilationMode::BBQMode, WTFMove(entrypoint), index, WTFMove(name))
+ , m_tierUpCount(WTFMove(tierUpCount))
+ {
+ }
+
+ RefPtr<OMGForOSREntryCallee> m_osrEntryCallee;
+ RefPtr<OMGCallee> m_replacement;
+ std::unique_ptr<TierUpCount> m_tierUpCount;
+ bool m_didStartCompilingOSREntryCallee { false };
+};
+
} } // namespace JSC::Wasm
#endif // ENABLE(WEBASSEMBLY)
m_wasmToWasmExitStubs = m_plan->takeWasmToWasmExitStubs();
m_wasmToWasmCallsites = m_plan->takeWasmToWasmCallsites();
- m_tierUpCounts = m_plan->takeTierUpCounts();
setCompilationFinished();
}), WTFMove(createEmbedderWrapper), throwWasmException));
return *m_callees[calleeIndex].get();
}
- MacroAssemblerCodePtr<WasmEntryPtrTag>* entrypointLoadLocationFromFunctionIndexSpace(unsigned functionIndexSpace)
+ Callee& wasmBBQCalleeFromFunctionIndexSpace(unsigned functionIndexSpace)
{
+ ASSERT(runnable());
RELEASE_ASSERT(functionIndexSpace >= functionImportCount());
unsigned calleeIndex = functionIndexSpace - functionImportCount();
- return &m_wasmIndirectCallEntryPoints[calleeIndex];
+ return *m_callees[calleeIndex].get();
}
- TierUpCount& tierUpCount(uint32_t functionIndex)
+ MacroAssemblerCodePtr<WasmEntryPtrTag>* entrypointLoadLocationFromFunctionIndexSpace(unsigned functionIndexSpace)
{
- return m_tierUpCounts[functionIndex];
+ RELEASE_ASSERT(functionIndexSpace >= functionImportCount());
+ unsigned calleeIndex = functionIndexSpace - functionImportCount();
+ return &m_wasmIndirectCallEntryPoints[calleeIndex];
}
bool isSafeToRun(MemoryMode);
~CodeBlock();
private:
friend class OMGPlan;
+ friend class OMGForOSREntryPlan;
CodeBlock(Context*, MemoryMode, ModuleInformation&, CreateEmbedderWrapper&&, ThrowWasmException);
void setCompilationFinished();
Vector<RefPtr<Callee>> m_optimizedCallees;
HashMap<uint32_t, RefPtr<Callee>, typename DefaultHash<uint32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<uint32_t>> m_embedderCallees;
Vector<MacroAssemblerCodePtr<WasmEntryPtrTag>> m_wasmIndirectCallEntryPoints;
- Vector<TierUpCount> m_tierUpCounts;
Vector<Vector<UnlinkedWasmToWasmCall>> m_wasmToWasmCallsites;
Vector<MacroAssemblerCodeRef<WasmEntryPtrTag>> m_wasmToWasmExitStubs;
RefPtr<BBQPlan> m_plan;
return "BBQ";
case CompilationMode::OMGMode:
return "OMG";
+ case CompilationMode::OMGForOSREntryMode:
+ return "OMGForOSREntry";
+ case CompilationMode::EmbedderEntrypointMode:
+ return "EmbedderEntrypoint";
}
RELEASE_ASSERT_NOT_REACHED();
return "";
enum class CompilationMode : uint8_t {
BBQMode,
OMGMode,
+ OMGForOSREntryMode,
+ EmbedderEntrypointMode,
};
const char* makeString(CompilationMode);
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WasmContext.h"
+
+#if ENABLE(WEBASSEMBLY)
+
+namespace JSC { namespace Wasm {
+
+uint64_t* Context::scratchBufferForSize(size_t size)
+{
+ if (!size)
+ return nullptr;
+
+ auto locker = holdLock(m_scratchBufferLock);
+ if (size > m_sizeOfLastScratchBuffer) {
+ m_sizeOfLastScratchBuffer = size * 2;
+
+ auto newBuffer = makeUniqueArray<uint64_t>(m_sizeOfLastScratchBuffer);
+ RELEASE_ASSERT(newBuffer);
+ m_scratchBuffers.append(WTFMove(newBuffer));
+ }
+ // Scanning scratch buffers for GC is not necessary since while performing OSR entry, we do not perform GC.
+ return m_scratchBuffers.last().get();
+}
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
#if ENABLE(WEBASSEMBLY)
+#include <wtf/Lock.h>
+#include <wtf/UniqueArray.h>
+#include <wtf/Vector.h>
+
namespace JSC { namespace Wasm {
class Instance;
return &instance;
}
+ static Instance* tryLoadInstanceFromTLS();
+
+ uint64_t* scratchBufferForSize(size_t numberOfSlots);
+
private:
Instance* instance { nullptr };
+ Vector<UniqueArray<uint64_t>> m_scratchBuffers;
+ size_t m_sizeOfLastScratchBuffer { 0 };
+ Lock m_scratchBufferLock;
};
} } // namespace JSC::Wasm
instance = inst;
}
+inline Instance* Context::tryLoadInstanceFromTLS()
+{
+#if ENABLE(FAST_TLS_JIT)
+ if (useFastTLS())
+ return bitwise_cast<Instance*>(_pthread_getspecific_direct(WTF_WASM_CONTEXT_KEY));
+#endif
+ return nullptr;
+}
+
} } // namespace JSC::Wasm
#endif // ENABLE(WEBASSEMBLY)
template<typename Context>
class FunctionParser : public Parser<void> {
public:
- typedef typename Context::ExpressionType ExpressionType;
- typedef typename Context::ControlType ControlType;
- typedef typename Context::ExpressionList ExpressionList;
+ using ExpressionType = typename Context::ExpressionType;
+ using ControlType = typename Context::ControlType;
+ using ExpressionList = typename Context::ExpressionList;
+ using Stack = typename Context::Stack;
FunctionParser(Context&, const uint8_t* functionStart, size_t functionLength, const Signature&, const ModuleInformation&);
Result WARN_UNUSED_RETURN parse();
struct ControlEntry {
- ExpressionList enclosedExpressionStack;
+ Stack enclosedExpressionStack;
ControlType controlData;
};
// FIXME add a macro as above for WASM_TRY_APPEND_TO_CONTROL_STACK https://bugs.webkit.org/show_bug.cgi?id=165862
Context& m_context;
- ExpressionList m_expressionStack;
+ Stack m_expressionStack;
Vector<ControlEntry> m_controlStack;
const Signature& m_signature;
const ModuleInformation& m_info;
Vector<ExpressionType, 8> m_toKillAfterExpression;
unsigned m_unreachableBlocks { 0 };
+ unsigned m_loopIndex { 0 };
};
template<typename Context>
FunctionParser<Context>::FunctionParser(Context& context, const uint8_t* functionStart, size_t functionLength, const Signature& signature, const ModuleInformation& info)
: Parser(functionStart, functionLength)
, m_context(context)
+ , m_expressionStack(context.createStack())
, m_signature(signature)
, m_info(info)
{
template<typename Context>
auto FunctionParser<Context>::parseBody() -> PartialResult
{
- m_controlStack.append({ ExpressionList(), m_context.addTopLevel(m_signature.returnType()) });
+ m_controlStack.append({ m_context.createStack(), m_context.addTopLevel(m_signature.returnType()) });
uint8_t op;
while (m_controlStack.size()) {
ASSERT(m_toKillAfterExpression.isEmpty());
Vector<ExpressionType> args;
WASM_PARSER_FAIL_IF(!args.tryReserveCapacity(calleeSignature.argumentCount()), "can't allocate enough memory for call's ", calleeSignature.argumentCount(), " arguments");
for (size_t i = firstArgumentIndex; i < m_expressionStack.size(); ++i)
- args.uncheckedAppend(m_expressionStack[i]);
+ args.uncheckedAppend(m_expressionStack.at(i));
m_expressionStack.shrink(firstArgumentIndex);
ExpressionType result = Context::emptyExpression();
WASM_PARSER_FAIL_IF(!args.tryReserveCapacity(argumentCount), "can't allocate enough memory for ", argumentCount, " call_indirect arguments");
size_t firstArgumentIndex = m_expressionStack.size() - argumentCount;
for (size_t i = firstArgumentIndex; i < m_expressionStack.size(); ++i)
- args.uncheckedAppend(m_expressionStack[i]);
+ args.uncheckedAppend(m_expressionStack.at(i));
m_expressionStack.shrink(firstArgumentIndex);
ExpressionType result = Context::emptyExpression();
Type inlineSignature;
WASM_PARSER_FAIL_IF(!parseResultType(inlineSignature), "can't get block's inline signature");
m_controlStack.append({ WTFMove(m_expressionStack), m_context.addBlock(inlineSignature) });
- m_expressionStack = ExpressionList();
+ m_expressionStack = m_context.createStack();
return { };
}
case Loop: {
Type inlineSignature;
WASM_PARSER_FAIL_IF(!parseResultType(inlineSignature), "can't get loop's inline signature");
- m_controlStack.append({ WTFMove(m_expressionStack), m_context.addLoop(inlineSignature) });
- m_expressionStack = ExpressionList();
+ auto expressionStack = WTFMove(m_expressionStack);
+ auto loop = m_context.addLoop(inlineSignature, expressionStack, m_loopIndex++);
+ m_controlStack.append({ expressionStack, loop });
+ m_expressionStack = m_context.createStack();
return { };
}
WASM_TRY_POP_EXPRESSION_STACK_INTO(condition, "if condition");
WASM_TRY_ADD_TO_CONTEXT(addIf(condition, inlineSignature, control));
m_controlStack.append({ WTFMove(m_expressionStack), control });
- m_expressionStack = ExpressionList();
+ m_expressionStack = m_context.createStack();
return { };
}
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WasmOMGForOSREntryPlan.h"
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "B3Compilation.h"
+#include "B3OpaqueByproducts.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "WasmB3IRGenerator.h"
+#include "WasmCallee.h"
+#include "WasmContext.h"
+#include "WasmInstance.h"
+#include "WasmMachineThreads.h"
+#include "WasmMemory.h"
+#include "WasmNameSection.h"
+#include "WasmSignatureInlines.h"
+#include "WasmValidate.h"
+#include "WasmWorklist.h"
+#include <wtf/DataLog.h>
+#include <wtf/Locker.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/ThreadMessage.h>
+
+namespace JSC { namespace Wasm {
+
+namespace WasmOMGForOSREntryPlanInternal {
+static const bool verbose = false;
+}
+
+OMGForOSREntryPlan::OMGForOSREntryPlan(Context* context, Ref<Module>&& module, Ref<BBQCallee>&& callee, uint32_t functionIndex, uint32_t loopIndex, MemoryMode mode, CompletionTask&& task)
+ : Base(context, makeRef(const_cast<ModuleInformation&>(module->moduleInformation())), WTFMove(task))
+ , m_module(WTFMove(module))
+ , m_codeBlock(*m_module->codeBlockFor(mode))
+ , m_callee(WTFMove(callee))
+ , m_functionIndex(functionIndex)
+ , m_loopIndex(loopIndex)
+{
+ setMode(mode);
+ ASSERT(m_codeBlock->runnable());
+ ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(m_mode));
+ dataLogLnIf(WasmOMGForOSREntryPlanInternal::verbose, "Starting OMGForOSREntry plan for ", functionIndex, " of module: ", RawPointer(&m_module.get()));
+}
+
+void OMGForOSREntryPlan::work(CompilationEffort)
+{
+ ASSERT(m_codeBlock->runnable());
+ ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(mode()));
+ const FunctionData& function = m_moduleInformation->functions[m_functionIndex];
+
+ const uint32_t functionIndexSpace = m_functionIndex + m_module->moduleInformation().importFunctionCount();
+ ASSERT(functionIndexSpace < m_module->moduleInformation().functionIndexSpaceSize());
+
+ SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[m_functionIndex];
+ const Signature& signature = SignatureInformation::get(signatureIndex);
+ ASSERT(validateFunction(function.data.data(), function.data.size(), signature, m_moduleInformation.get()));
+
+ Vector<UnlinkedWasmToWasmCall> unlinkedCalls;
+ CompilationContext context;
+ unsigned osrEntryScratchBufferSize = 0;
+ auto parseAndCompileResult = parseAndCompile(context, function.data.data(), function.data.size(), signature, unlinkedCalls, osrEntryScratchBufferSize, m_moduleInformation.get(), m_mode, CompilationMode::OMGForOSREntryMode, m_functionIndex, m_loopIndex);
+
+ if (UNLIKELY(!parseAndCompileResult)) {
+ fail(holdLock(m_lock), makeString(parseAndCompileResult.error(), "when trying to tier up ", String::number(m_functionIndex)));
+ return;
+ }
+
+ Entrypoint omgEntrypoint;
+ LinkBuffer linkBuffer(*context.wasmEntrypointJIT, nullptr, JITCompilationCanFail);
+ if (UNLIKELY(linkBuffer.didFailToAllocate())) {
+ Base::fail(holdLock(m_lock), makeString("Out of executable memory while tiering up function at index ", String::number(m_functionIndex)));
+ return;
+ }
+
+ omgEntrypoint.compilation = makeUnique<B3::Compilation>(
+ FINALIZE_CODE(linkBuffer, B3CompilationPtrTag, "WebAssembly OMGForOSREntry function[%i] %s name %s", m_functionIndex, signature.toString().ascii().data(), makeString(IndexOrName(functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace))).ascii().data()),
+ WTFMove(context.wasmEntrypointByproducts));
+
+ omgEntrypoint.calleeSaveRegisters = WTFMove(parseAndCompileResult.value()->entrypoint.calleeSaveRegisters);
+
+ MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint;
+ ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(mode()));
+ Ref<OMGForOSREntryCallee> callee = OMGForOSREntryCallee::create(WTFMove(omgEntrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace), osrEntryScratchBufferSize, m_loopIndex, WTFMove(unlinkedCalls));
+ {
+ MacroAssembler::repatchPointer(parseAndCompileResult.value()->calleeMoveLocation, CalleeBits::boxWasm(callee.ptr()));
+ entrypoint = callee->entrypoint();
+
+ auto locker = holdLock(m_codeBlock->m_lock);
+ for (auto& call : callee->wasmToWasmCallsites()) {
+ MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint;
+ if (call.functionIndexSpace < m_module->moduleInformation().importFunctionCount())
+ entrypoint = m_codeBlock->m_wasmToWasmExitStubs[call.functionIndexSpace].code();
+ else
+ entrypoint = m_codeBlock->wasmEntrypointCalleeFromFunctionIndexSpace(call.functionIndexSpace).entrypoint().retagged<WasmEntryPtrTag>();
+
+ MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint));
+ }
+ }
+ resetInstructionCacheOnAllThreads();
+ WTF::storeStoreFence();
+ {
+ auto locker = holdLock(m_codeBlock->m_lock);
+ {
+ auto locker = holdLock(m_callee->tierUpCount()->getLock());
+ m_callee->setOSREntryCallee(callee.copyRef());
+ m_callee->tierUpCount()->osrEntryTriggers()[m_loopIndex] = TierUpCount::TriggerReason::CompilationDone;
+ m_callee->tierUpCount()->m_compilationStatusForOMGForOSREntry = TierUpCount::CompilationStatus::Compiled;
+ }
+ WTF::storeStoreFence();
+ // It is possible that a new OMG callee is added while we release m_codeBlock->lock.
+ // Until we add OMGForOSREntry callee to BBQCallee's m_osrEntryCallee, this new OMG function linking does not happen for this OMGForOSREntry callee.
+ // We re-link this OMGForOSREntry callee again not to miss that chance.
+ for (auto& call : callee->wasmToWasmCallsites()) {
+ MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint;
+ if (call.functionIndexSpace < m_module->moduleInformation().importFunctionCount())
+ entrypoint = m_codeBlock->m_wasmToWasmExitStubs[call.functionIndexSpace].code();
+ else
+ entrypoint = m_codeBlock->wasmEntrypointCalleeFromFunctionIndexSpace(call.functionIndexSpace).entrypoint().retagged<WasmEntryPtrTag>();
+
+ MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint));
+ }
+ }
+ dataLogLnIf(WasmOMGForOSREntryPlanInternal::verbose, "Finished OMGForOSREntry ", m_functionIndex, " with tier up count at: ", m_callee->tierUpCount()->count());
+ complete(holdLock(m_lock));
+}
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "WasmContext.h"
+#include "WasmModule.h"
+#include "WasmOperations.h"
+#include "WasmPlan.h"
+
+namespace JSC {
+
+class BBQCallee;
+class CallLinkInfo;
+
+namespace Wasm {
+
+class OMGForOSREntryPlan final : public Plan {
+public:
+ using Base = Plan;
+
+ bool hasWork() const override { return !m_completed; }
+ void work(CompilationEffort) override;
+ bool multiThreaded() const override { return false; }
+
+ // Note: CompletionTask should not hold a reference to the Plan otherwise there will be a reference cycle.
+ OMGForOSREntryPlan(Context*, Ref<Module>&&, Ref<BBQCallee>&&, uint32_t functionIndex, uint32_t loopIndex, MemoryMode, CompletionTask&&);
+
+private:
+ // For some reason friendship doesn't extend to parent classes...
+ using Base::m_lock;
+
+ bool isComplete() const override { return m_completed; }
+ void complete(const AbstractLocker& locker) override
+ {
+ m_completed = true;
+ runCompletionTasks(locker);
+ }
+
+ Ref<Module> m_module;
+ Ref<CodeBlock> m_codeBlock;
+ Ref<BBQCallee> m_callee;
+ bool m_completed { false };
+ uint32_t m_functionIndex;
+ uint32_t m_loopIndex;
+};
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
ASSERT(validateFunction(function.data.data(), function.data.size(), signature, m_moduleInformation.get()));
Vector<UnlinkedWasmToWasmCall> unlinkedCalls;
+ unsigned osrEntryScratchBufferSize;
CompilationContext context;
- auto parseAndCompileResult = parseAndCompile(context, function.data.data(), function.data.size(), signature, unlinkedCalls, m_moduleInformation.get(), m_mode, CompilationMode::OMGMode, m_functionIndex);
+ auto parseAndCompileResult = parseAndCompile(context, function.data.data(), function.data.size(), signature, unlinkedCalls, osrEntryScratchBufferSize, m_moduleInformation.get(), m_mode, CompilationMode::OMGMode, m_functionIndex, UINT32_MAX);
if (UNLIKELY(!parseAndCompileResult)) {
fail(holdLock(m_lock), makeString(parseAndCompileResult.error(), "when trying to tier up ", String::number(m_functionIndex)));
MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint;
{
ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(mode()));
- Ref<Callee> callee = Callee::create(CompilationMode::OMGMode, WTFMove(omgEntrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace));
+ Ref<OMGCallee> callee = OMGCallee::create(WTFMove(omgEntrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace), WTFMove(unlinkedCalls));
MacroAssembler::repatchPointer(parseAndCompileResult.value()->calleeMoveLocation, CalleeBits::boxWasm(callee.ptr()));
ASSERT(!m_codeBlock->m_optimizedCallees[m_functionIndex]);
entrypoint = callee->entrypoint();
// will update. It's also ok if they publish their code before we reset the instruction caches because after we release
// the lock our code is ready to be published too.
LockHolder holder(m_codeBlock->m_lock);
- m_codeBlock->m_optimizedCallees[m_functionIndex] = WTFMove(callee);
-
- for (auto& call : unlinkedCalls) {
+ m_codeBlock->m_optimizedCallees[m_functionIndex] = callee.copyRef();
+ {
+ BBQCallee& bbqCallee = *static_cast<BBQCallee*>(m_codeBlock->m_callees[m_functionIndex].get());
+ auto locker = holdLock(bbqCallee.tierUpCount()->getLock());
+ bbqCallee.setReplacement(callee.copyRef());
+ bbqCallee.tierUpCount()->m_compilationStatusForOMG = TierUpCount::CompilationStatus::Compiled;
+ }
+ for (auto& call : callee->wasmToWasmCallsites()) {
MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint;
if (call.functionIndexSpace < m_module->moduleInformation().importFunctionCount())
entrypoint = m_codeBlock->m_wasmToWasmExitStubs[call.functionIndexSpace].code();
MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint));
}
- unlinkedCalls = std::exchange(m_codeBlock->m_wasmToWasmCallsites[m_functionIndex], unlinkedCalls);
}
// It's important to make sure we do this before we make any of the code we just compiled visible. If we didn't, we could end up
MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint));
}
}
-
};
for (unsigned i = 0; i < m_codeBlock->m_wasmToWasmCallsites.size(); ++i) {
- if (i != functionIndexSpace)
- repatchCalls(m_codeBlock->m_wasmToWasmCallsites[i]);
+ repatchCalls(m_codeBlock->m_wasmToWasmCallsites[i]);
+ if (OMGCallee* replacementCallee = static_cast<BBQCallee*>(m_codeBlock->m_callees[i].get())->replacement())
+ repatchCalls(replacementCallee->wasmToWasmCallsites());
+ if (OMGForOSREntryCallee* osrEntryCallee = static_cast<BBQCallee*>(m_codeBlock->m_callees[i].get())->osrEntryCallee())
+ repatchCalls(osrEntryCallee->wasmToWasmCallsites());
}
-
- // Make sure we repatch any recursive calls.
- repatchCalls(unlinkedCalls);
}
- dataLogLnIf(WasmOMGPlanInternal::verbose, "Finished with tier up count at: ", m_codeBlock->tierUpCount(m_functionIndex).count());
+ dataLogLnIf(WasmOMGPlanInternal::verbose, "Finished OMG ", m_functionIndex, " with tier up count at: ", static_cast<BBQCallee*>(m_codeBlock->m_callees[m_functionIndex].get())->tierUpCount()->count());
complete(holdLock(m_lock));
}
-void OMGPlan::runForIndex(Instance* instance, uint32_t functionIndex)
-{
- Wasm::CodeBlock& codeBlock = *instance->codeBlock();
- ASSERT(instance->memory()->mode() == codeBlock.mode());
-
- if (codeBlock.tierUpCount(functionIndex).shouldStartTierUp()) {
- Ref<Plan> plan = adoptRef(*new OMGPlan(instance->context(), Ref<Wasm::Module>(instance->module()), functionIndex, codeBlock.mode(), Plan::dontFinalize()));
- ensureWorklist().enqueue(plan.copyRef());
- if (UNLIKELY(!Options::useConcurrentJIT()))
- plan->waitForCompletion();
- }
-}
-
} } // namespace JSC::Wasm
#endif // ENABLE(WEBASSEMBLY)
void work(CompilationEffort) override;
bool multiThreaded() const override { return false; }
- static void runForIndex(Instance*, uint32_t functionIndex);
+ // Note: CompletionTask should not hold a reference to the Plan otherwise there will be a reference cycle.
+ OMGPlan(Context*, Ref<Module>&&, uint32_t functionIndex, MemoryMode, CompletionTask&&);
private:
// For some reason friendship doesn't extend to parent classes...
using Base::m_lock;
- // Note: CompletionTask should not hold a reference to the Plan otherwise there will be a reference cycle.
- OMGPlan(Context*, Ref<Module>&&, uint32_t functionIndex, MemoryMode, CompletionTask&&);
-
bool isComplete() const override { return m_completed; }
void complete(const AbstractLocker& locker) override
{
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "B3ValueRep.h"
+#include "WasmFormat.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace Wasm {
+
+class OSREntryValue : public B3::ValueRep {
+public:
+ OSREntryValue(const B3::ValueRep& valueRep, B3::Type type)
+ : B3::ValueRep(valueRep)
+ , m_type(type)
+ {
+ }
+
+ B3::Type type() const { return m_type; }
+
+private:
+ B3::Type m_type;
+};
+
+class OSREntryData {
+ WTF_MAKE_NONCOPYABLE(OSREntryData);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ OSREntryData(uint32_t functionIndex, uint32_t loopIndex)
+ : m_functionIndex(functionIndex)
+ , m_loopIndex(loopIndex)
+ {
+ }
+
+ uint32_t functionIndex() const { return m_functionIndex; }
+ uint32_t loopIndex() const { return m_loopIndex; }
+ Vector<OSREntryValue>& values() { return m_values; }
+
+private:
+ uint32_t m_functionIndex;
+ uint32_t m_loopIndex;
+ Vector<OSREntryValue> m_values;
+};
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WasmOperations.h"
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "ProbeContext.h"
+#include "WasmCallee.h"
+#include "WasmContextInlines.h"
+#include "WasmInstance.h"
+#include "WasmMemory.h"
+#include "WasmNameSection.h"
+#include "WasmOMGForOSREntryPlan.h"
+#include "WasmOMGPlan.h"
+#include "WasmOSREntryData.h"
+#include "WasmSignatureInlines.h"
+#include "WasmWorklist.h"
+#include <wtf/DataLog.h>
+#include <wtf/Locker.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC { namespace Wasm {
+
+static bool shouldTriggerOMGCompile(TierUpCount& tierUp, OMGCallee* replacement, uint32_t functionIndex)
+{
+ if (!replacement && !tierUp.checkIfOptimizationThresholdReached()) {
+ dataLogLnIf(Options::verboseOSR(), "delayOMGCompile counter = ", tierUp, " for ", functionIndex);
+ dataLogLnIf(Options::verboseOSR(), "Choosing not to OMG-optimize ", functionIndex, " yet.");
+ return false;
+ }
+ return true;
+}
+
+static void triggerOMGReplacementCompile(TierUpCount& tierUp, OMGCallee* replacement, Instance* instance, Wasm::CodeBlock& codeBlock, uint32_t functionIndex)
+{
+ if (replacement) {
+ tierUp.optimizeSoon(functionIndex);
+ return;
+ }
+
+ bool compile = false;
+ {
+ auto locker = holdLock(tierUp.getLock());
+ switch (tierUp.m_compilationStatusForOMG) {
+ case TierUpCount::CompilationStatus::StartCompilation:
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ return;
+ case TierUpCount::CompilationStatus::NotCompiled:
+ compile = true;
+ tierUp.m_compilationStatusForOMG = TierUpCount::CompilationStatus::StartCompilation;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (compile) {
+ dataLogLnIf(Options::verboseOSR(), "triggerOMGReplacement for ", functionIndex);
+ // We need to compile the code.
+ Ref<Plan> plan = adoptRef(*new OMGPlan(instance->context(), Ref<Wasm::Module>(instance->module()), functionIndex, codeBlock.mode(), Plan::dontFinalize()));
+ ensureWorklist().enqueue(plan.copyRef());
+ if (UNLIKELY(!Options::useConcurrentJIT()))
+ plan->waitForCompletion();
+ else
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ }
+}
+
+SUPPRESS_ASAN
+static void doOSREntry(Instance* instance, Probe::Context& context, BBQCallee& callee, OMGForOSREntryCallee& osrEntryCallee, OSREntryData& osrEntryData)
+{
+ auto returnWithoutOSREntry = [&] {
+ context.gpr(GPRInfo::argumentGPR0) = 0;
+ };
+
+ uint64_t* buffer = instance->context()->scratchBufferForSize(osrEntryCallee.osrEntryScratchBufferSize());
+ if (!buffer)
+ return returnWithoutOSREntry();
+
+ dataLogLnIf(Options::verboseOSR(), osrEntryData.functionIndex(), ":OMG OSR entry: got entry callee ", RawPointer(&osrEntryCallee));
+
+ // 1. Place required values in scratch buffer.
+ for (unsigned index = 0; index < osrEntryData.values().size(); ++index) {
+ const OSREntryValue& value = osrEntryData.values()[index];
+ dataLogLnIf(Options::verboseOSR(), "OMG OSR entry values[", index, "] ", value.type(), " ", value);
+ if (value.isGPR()) {
+ switch (value.type().kind()) {
+ case B3::Float:
+ case B3::Double:
+ RELEASE_ASSERT_NOT_REACHED();
+ default:
+ *bitwise_cast<uint64_t*>(buffer + index) = context.gpr(value.gpr());
+ }
+ } else if (value.isFPR()) {
+ switch (value.type().kind()) {
+ case B3::Float:
+ case B3::Double:
+ *bitwise_cast<double*>(buffer + index) = context.fpr(value.fpr());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ } else if (value.isConstant()) {
+ switch (value.type().kind()) {
+ case B3::Float:
+ *bitwise_cast<float*>(buffer + index) = value.floatValue();
+ break;
+ case B3::Double:
+ *bitwise_cast<double*>(buffer + index) = value.doubleValue();
+ break;
+ default:
+ *bitwise_cast<uint64_t*>(buffer + index) = value.value();
+ }
+ } else if (value.isStack()) {
+ switch (value.type().kind()) {
+ case B3::Float:
+ *bitwise_cast<float*>(buffer + index) = *bitwise_cast<float*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
+ break;
+ case B3::Double:
+ *bitwise_cast<double*>(buffer + index) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
+ break;
+ default:
+ *bitwise_cast<uint64_t*>(buffer + index) = *bitwise_cast<uint64_t*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
+ break;
+ }
+ } else if (value.isStackArgument()) {
+ switch (value.type().kind()) {
+ case B3::Float:
+ *bitwise_cast<float*>(buffer + index) = *bitwise_cast<float*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
+ break;
+ case B3::Double:
+ *bitwise_cast<double*>(buffer + index) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
+ break;
+ default:
+ *bitwise_cast<uint64_t*>(buffer + index) = *bitwise_cast<uint64_t*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
+ break;
+ }
+ } else
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ // 2. Restore callee saves.
+ RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
+ for (const RegisterAtOffset& entry : *callee.calleeSaveRegisters()) {
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR())
+ context.gpr(entry.reg().gpr()) = *bitwise_cast<UCPURegister*>(bitwise_cast<uint8_t*>(context.fp()) + entry.offset());
+ else
+ context.fpr(entry.reg().fpr()) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.fp()) + entry.offset());
+ }
+
+ // 3. Function epilogue, like a tail-call.
+ UCPURegister* framePointer = bitwise_cast<UCPURegister*>(context.fp());
+#if CPU(X86_64)
+ // move(framePointerRegister, stackPointerRegister);
+ // pop(framePointerRegister);
+ context.fp() = bitwise_cast<UCPURegister*>(*framePointer);
+ context.sp() = framePointer + 1;
+ static_assert(AssemblyHelpers::prologueStackPointerDelta() == sizeof(void*) * 1);
+#elif CPU(ARM64E) || CPU(ARM64)
+ // move(framePointerRegister, stackPointerRegister);
+ // popPair(framePointerRegister, linkRegister);
+ context.fp() = bitwise_cast<UCPURegister*>(*framePointer);
+ context.gpr(ARM64Registers::lr) = bitwise_cast<UCPURegister>(*(framePointer + 1));
+ context.sp() = framePointer + 2;
+ static_assert(AssemblyHelpers::prologueStackPointerDelta() == sizeof(void*) * 2);
+#if CPU(ARM64E)
+ // LR needs to be untagged since OSR entry function prologue will tag it with SP. This is similar to tail-call.
+ context.gpr(ARM64Registers::lr) = bitwise_cast<UCPURegister>(untagCodePtr(context.gpr<void*>(ARM64Registers::lr), bitwise_cast<PtrTag>(context.sp())));
+#endif
+#else
+#error Unsupported architecture.
+#endif
+ // 4. Configure argument registers to jump to OSR entry from the caller of this runtime function.
+ context.gpr(GPRInfo::argumentGPR0) = bitwise_cast<UCPURegister>(buffer);
+ context.gpr(GPRInfo::argumentGPR1) = bitwise_cast<UCPURegister>(osrEntryCallee.entrypoint().executableAddress<>());
+}
+
+void JIT_OPERATION triggerOSREntryNow(Probe::Context& context)
+{
+ OSREntryData& osrEntryData = *context.arg<OSREntryData*>();
+ uint32_t functionIndex = osrEntryData.functionIndex();
+ uint32_t loopIndex = osrEntryData.loopIndex();
+ Instance* instance = Wasm::Context::tryLoadInstanceFromTLS();
+ if (!instance)
+ instance = context.gpr<Instance*>(Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer);
+
+ auto returnWithoutOSREntry = [&] {
+ context.gpr(GPRInfo::argumentGPR0) = 0;
+ };
+
+ Wasm::CodeBlock& codeBlock = *instance->codeBlock();
+ ASSERT(instance->memory()->mode() == codeBlock.mode());
+
+ uint32_t functionIndexInSpace = functionIndex + codeBlock.functionImportCount();
+ ASSERT(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace).compilationMode() == Wasm::CompilationMode::BBQMode);
+ BBQCallee& callee = static_cast<BBQCallee&>(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace));
+ TierUpCount& tierUp = *callee.tierUpCount();
+ dataLogLnIf(Options::verboseOSR(), "Consider OMGForOSREntryPlan for [", functionIndex, "] loopIndex#", loopIndex, " with executeCounter = ", tierUp, " ", RawPointer(callee.replacement()));
+
+ if (!Options::useWebAssemblyOSR()) {
+ if (shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex))
+ triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
+
+ // We already have an OMG replacement.
+ if (callee.replacement()) {
+ // No OSR entry points. Just defer indefinitely.
+ if (tierUp.osrEntryTriggers().isEmpty()) {
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return;
+ }
+
+ // Found one OSR entry point. Since we do not have a way to jettison Wasm::Callee right now, this means that tierUp function is now meaningless.
+ // Not call it as much as possible.
+ if (callee.osrEntryCallee()) {
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return;
+ }
+ }
+ return returnWithoutOSREntry();
+ }
+
+ TierUpCount::CompilationStatus compilationStatus = TierUpCount::CompilationStatus::NotCompiled;
+ {
+ auto locker = holdLock(tierUp.getLock());
+ compilationStatus = tierUp.m_compilationStatusForOMGForOSREntry;
+ }
+
+ bool triggeredSlowPathToStartCompilation = false;
+ switch (tierUp.osrEntryTriggers()[loopIndex]) {
+ case TierUpCount::TriggerReason::DontTrigger:
+ // The trigger isn't set, we entered because the counter reached its
+ // threshold.
+ break;
+ case TierUpCount::TriggerReason::CompilationDone:
+ // The trigger was set because compilation completed. Don't unset it
+ // so that further BBQ executions OSR enter as well.
+ break;
+ case TierUpCount::TriggerReason::StartCompilation: {
+ // We were asked to enter as soon as possible and start compiling an
+ // entry for the current loopIndex. Unset this trigger so we
+ // don't continually enter.
+ auto locker = holdLock(tierUp.getLock());
+ TierUpCount::TriggerReason reason = tierUp.osrEntryTriggers()[loopIndex];
+ if (reason == TierUpCount::TriggerReason::StartCompilation) {
+ tierUp.osrEntryTriggers()[loopIndex] = TierUpCount::TriggerReason::DontTrigger;
+ triggeredSlowPathToStartCompilation = true;
+ }
+ break;
+ }
+ }
+
+ if (compilationStatus == TierUpCount::CompilationStatus::StartCompilation) {
+ dataLogLnIf(Options::verboseOSR(), "delayOMGCompile still compiling for ", functionIndex);
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ return returnWithoutOSREntry();
+ }
+
+ if (OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee()) {
+ if (osrEntryCallee->loopIndex() == loopIndex)
+ return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
+ }
+
+ if (!shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex) && !triggeredSlowPathToStartCompilation)
+ return returnWithoutOSREntry();
+
+ if (!triggeredSlowPathToStartCompilation) {
+ triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
+
+ if (!callee.replacement())
+ return returnWithoutOSREntry();
+ }
+
+ if (OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee()) {
+ if (osrEntryCallee->loopIndex() == loopIndex)
+ return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return returnWithoutOSREntry();
+ }
+
+ // Instead of triggering OSR entry compilation in inner loop, try outer loop's trigger immediately effective (setting TriggerReason::StartCompilation) and
+ // let outer loop attempt to compile.
+ if (!triggeredSlowPathToStartCompilation) {
+ // An inner loop didn't specifically ask for us to kick off a compilation. This means the counter
+ // crossed its threshold. We either fall through and kick off a compile for originBytecodeIndex,
+ // or we flag an outer loop to immediately try to compile itself. If there are outer loops,
+ // we first try to make them compile themselves. But we will eventually fall back to compiling
+ // a progressively inner loop if it takes too long for control to reach an outer loop.
+
+ auto tryTriggerOuterLoopToCompile = [&] {
+ // We start with the outermost loop and make our way inwards (hence why we iterate the vector in reverse).
+ // Our policy is that we will trigger an outer loop to compile immediately when program control reaches it.
+ // If program control is taking too long to reach that outer loop, we progressively move inwards, meaning,
+ // we'll eventually trigger some loop that is executing to compile. We start with trying to compile outer
+ // loops since we believe outer loop compilations reveal the best opportunities for optimizing code.
+ uint32_t currentLoopIndex = tierUp.outerLoops()[loopIndex];
+ auto locker = holdLock(tierUp.getLock());
+
+ // We already started OMGForOSREntryPlan.
+ if (callee.didStartCompilingOSREntryCallee())
+ return false;
+
+ while (currentLoopIndex != UINT32_MAX) {
+ if (tierUp.osrEntryTriggers()[currentLoopIndex] == TierUpCount::TriggerReason::StartCompilation) {
+ // This means that we already asked this loop to compile. If we've reached here, it
+ // means program control has not yet reached that loop. So it's taking too long to compile.
+ // So we move on to asking the inner loop of this loop to compile itself.
+ currentLoopIndex = tierUp.outerLoops()[currentLoopIndex];
+ continue;
+ }
+
+ // This is where we ask the outer to loop to immediately compile itself if program
+ // control reaches it.
+ dataLogLnIf(Options::verboseOSR(), "Inner-loop loopIndex#", loopIndex, " in ", functionIndex, " setting parent loop loopIndex#", currentLoopIndex, "'s trigger and backing off.");
+ tierUp.osrEntryTriggers()[currentLoopIndex] = TierUpCount::TriggerReason::StartCompilation;
+ return true;
+ }
+ return false;
+ };
+
+ if (tryTriggerOuterLoopToCompile()) {
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ return returnWithoutOSREntry();
+ }
+ }
+
+ bool startOSREntryCompilation = false;
+ {
+ auto locker = holdLock(tierUp.getLock());
+ if (tierUp.m_compilationStatusForOMGForOSREntry == TierUpCount::CompilationStatus::NotCompiled) {
+ tierUp.m_compilationStatusForOMGForOSREntry = TierUpCount::CompilationStatus::StartCompilation;
+ startOSREntryCompilation = true;
+ // Currently, we do not have a way to jettison wasm code. This means that once we decide to compile OSR entry code for a particular loopIndex,
+ // we cannot throw the compiled code so long as Wasm module is live. We immediately disable all the triggers.
+ for (auto& trigger : tierUp.osrEntryTriggers())
+ trigger = TierUpCount::TriggerReason::DontTrigger;
+ }
+ }
+
+ if (startOSREntryCompilation) {
+ dataLogLnIf(Options::verboseOSR(), "triggerOMGOSR for ", functionIndex);
+ Ref<Plan> plan = adoptRef(*new OMGForOSREntryPlan(instance->context(), Ref<Wasm::Module>(instance->module()), Ref<Wasm::BBQCallee>(callee), functionIndex, loopIndex, codeBlock.mode(), Plan::dontFinalize()));
+ ensureWorklist().enqueue(plan.copyRef());
+ if (UNLIKELY(!Options::useConcurrentJIT()))
+ plan->waitForCompletion();
+ else
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ }
+
+ OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee();
+ if (!osrEntryCallee) {
+ tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
+ return returnWithoutOSREntry();
+ }
+
+ if (osrEntryCallee->loopIndex() == loopIndex)
+ return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
+
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return returnWithoutOSREntry();
+}
+
+void JIT_OPERATION triggerTierUpNow(Instance* instance, uint32_t functionIndex)
+{
+ Wasm::CodeBlock& codeBlock = *instance->codeBlock();
+ ASSERT(instance->memory()->mode() == codeBlock.mode());
+
+ uint32_t functionIndexInSpace = functionIndex + codeBlock.functionImportCount();
+ ASSERT(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace).compilationMode() == Wasm::CompilationMode::BBQMode);
+ BBQCallee& callee = static_cast<BBQCallee&>(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace));
+ TierUpCount& tierUp = *callee.tierUpCount();
+ dataLogLnIf(Options::verboseOSR(), "Consider OMGPlan for [", functionIndex, "] with executeCounter = ", tierUp, " ", RawPointer(callee.replacement()));
+
+ if (shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex))
+ triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
+
+ // We already have an OMG replacement.
+ if (callee.replacement()) {
+ // No OSR entry points. Just defer indefinitely.
+ if (tierUp.osrEntryTriggers().isEmpty()) {
+ dataLogLnIf(Options::verboseOSR(), "delayOMGCompile replacement in place, delaying indefinitely for ", functionIndex);
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return;
+ }
+
+ // Found one OSR entry point. Since we do not have a way to jettison Wasm::Callee right now, this means that tierUp function is now meaningless.
+ // Not call it as much as possible.
+ if (callee.osrEntryCallee()) {
+ dataLogLnIf(Options::verboseOSR(), "delayOMGCompile trigger in place, delaying indefinitely for ", functionIndex);
+ tierUp.dontOptimizeAnytimeSoon(functionIndex);
+ return;
+ }
+ }
+}
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "SlowPathReturnType.h"
+
+namespace JSC {
+namespace Probe {
+class Context;
+} // namespace JSC::Probe
+namespace Wasm {
+
+class Instance;
+
+void JIT_OPERATION triggerOSREntryNow(Probe::Context&) WTF_INTERNAL;
+void JIT_OPERATION triggerTierUpNow(Instance*, uint32_t functionIndex) WTF_INTERNAL;
+
+} } // namespace JSC::Wasm
+
+#endif // ENABLE(WEBASSEMBLY)
#include "WasmContext.h"
#include "WasmExceptionType.h"
#include "WasmInstance.h"
-#include "WasmOMGPlan.h"
+#include "WasmOperations.h"
namespace JSC { namespace Wasm {
return FINALIZE_CODE(linkBuffer, JITThunkPtrTag, "Throw stack overflow from Wasm");
}
-MacroAssemblerCodeRef<JITThunkPtrTag> triggerOMGTierUpThunkGenerator(const AbstractLocker&)
+MacroAssemblerCodeRef<JITThunkPtrTag> triggerOMGEntryTierUpThunkGenerator(const AbstractLocker&)
{
// We expect that the user has already put the function index into GPRInfo::argumentGPR1
CCallHelpers jit;
unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraPaddingBytes);
jit.loadWasmContextInstance(GPRInfo::argumentGPR0);
- typedef void (*Run)(Instance*, uint32_t);
- Run run = OMGPlan::runForIndex;
- jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(run)), GPRInfo::argumentGPR2);
+ jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(triggerTierUpNow)), GPRInfo::argumentGPR2);
jit.call(GPRInfo::argumentGPR2, OperationPtrTag);
ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToSpill, RegisterSet(), numberOfStackBytesUsedForRegisterPreservation, extraPaddingBytes);
jit.emitFunctionEpilogue();
jit.ret();
LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(linkBuffer, JITThunkPtrTag, "Trigger OMG tier up");
+ return FINALIZE_CODE(linkBuffer, JITThunkPtrTag, "Trigger OMG entry tier up");
}
static Thunks* thunks;
MacroAssemblerCodeRef<JITThunkPtrTag> throwExceptionFromWasmThunkGenerator(const AbstractLocker&);
MacroAssemblerCodeRef<JITThunkPtrTag> throwStackOverflowFromWasmThunkGenerator(const AbstractLocker&);
-MacroAssemblerCodeRef<JITThunkPtrTag> triggerOMGTierUpThunkGenerator(const AbstractLocker&);
+MacroAssemblerCodeRef<JITThunkPtrTag> triggerOMGEntryTierUpThunkGenerator(const AbstractLocker&);
typedef MacroAssemblerCodeRef<JITThunkPtrTag> (*ThunkGenerator)(const AbstractLocker&);
--- /dev/null
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WasmTierUpCount.h"
+
+#if ENABLE(WEBASSEMBLY)
+
+#include "WasmOSREntryData.h"
+
+namespace JSC { namespace Wasm {
+
+TierUpCount::TierUpCount()
+{
+ setNewThreshold(Options::thresholdForOMGOptimizeAfterWarmUp(), nullptr);
+}
+
+TierUpCount::~TierUpCount() = default;
+
+OSREntryData& TierUpCount::addOSREntryData(uint32_t functionIndex, uint32_t loopIndex)
+{
+ m_osrEntryData.append(makeUnique<OSREntryData>(functionIndex, loopIndex));
+ return *m_osrEntryData.last().get();
+}
+
+} } // namespace JSC::Table
+
+#endif // ENABLE(WEBASSEMBLY)
#if ENABLE(WEBASSEMBLY)
+#include "CompilationResult.h"
+#include "ExecutionCounter.h"
#include "Options.h"
#include <wtf/Atomics.h>
+#include <wtf/SegmentedVector.h>
#include <wtf/StdLibExtras.h>
namespace JSC { namespace Wasm {
+class OSREntryData;
+
// This class manages the tier up counts for Wasm binaries. The main interesting thing about
// wasm tiering up counts is that the least significant bit indicates if the tier up has already
// started. Also, wasm code does not atomically update this count. This is because we
// don't care too much if the countdown is slightly off. The tier up trigger is atomic, however,
// so tier up will be triggered exactly once.
-class TierUpCount {
+class TierUpCount : public UpperTierExecutionCounter {
WTF_MAKE_NONCOPYABLE(TierUpCount);
public:
- TierUpCount()
- : m_count(Options::webAssemblyOMGTierUpCount())
- , m_tierUpStarted(false)
+ enum class TriggerReason : uint8_t {
+ DontTrigger,
+ CompilationDone,
+ StartCompilation,
+ };
+
+ enum class CompilationStatus : uint8_t {
+ NotCompiled,
+ StartCompilation,
+ Compiled,
+ Failed,
+ };
+
+ TierUpCount();
+ ~TierUpCount();
+
+ static int32_t loopIncrement() { return Options::omgTierUpCounterIncrementForLoop(); }
+ static int32_t functionEntryIncrement() { return Options::omgTierUpCounterIncrementForEntry(); }
+
+ SegmentedVector<TriggerReason, 16>& osrEntryTriggers() { return m_osrEntryTriggers; }
+ Vector<uint32_t>& outerLoops() { return m_outerLoops; }
+ Lock& getLock() { return m_lock; }
+
+ OSREntryData& addOSREntryData(uint32_t functionIndex, uint32_t loopIndex);
+
+ void optimizeAfterWarmUp(uint32_t functionIndex)
{
+ dataLogLnIf(Options::verboseOSR(), functionIndex, ": OMG-optimizing after warm-up.");
+ setNewThreshold(Options::thresholdForOMGOptimizeAfterWarmUp(), nullptr);
}
- TierUpCount(TierUpCount&& other)
+ bool checkIfOptimizationThresholdReached()
{
- ASSERT(other.m_count == Options::webAssemblyOMGTierUpCount());
- m_count = other.m_count;
+ return checkIfThresholdCrossedAndSet(nullptr);
}
- static uint32_t loopDecrement() { return Options::webAssemblyLoopDecrement(); }
- static uint32_t functionEntryDecrement() { return Options::webAssemblyFunctionEntryDecrement(); }
+ void dontOptimizeAnytimeSoon(uint32_t functionIndex)
+ {
+ dataLogLnIf(Options::verboseOSR(), functionIndex, ": Not OMG-optimizing anytime soon.");
+ deferIndefinitely();
+ }
- bool shouldStartTierUp()
+ void optimizeNextInvocation(uint32_t functionIndex)
{
- return !m_tierUpStarted.exchange(true);
+ dataLogLnIf(Options::verboseOSR(), functionIndex, ": OMG-optimizing next invocation.");
+ setNewThreshold(0, nullptr);
}
- int32_t count() { return bitwise_cast<int32_t>(m_count); }
+ void optimizeSoon(uint32_t functionIndex)
+ {
+ dataLogLnIf(Options::verboseOSR(), functionIndex, ": OMG-optimizing soon.");
+ // FIXME: Need adjustment once we get more information about wasm functions.
+ setNewThreshold(Options::thresholdForOMGOptimizeSoon(), nullptr);
+ }
+
+ void setOptimizationThresholdBasedOnCompilationResult(uint32_t functionIndex, CompilationResult result)
+ {
+ switch (result) {
+ case CompilationSuccessful:
+ optimizeNextInvocation(functionIndex);
+ return;
+ case CompilationFailed:
+ dontOptimizeAnytimeSoon(functionIndex);
+ return;
+ case CompilationDeferred:
+ optimizeAfterWarmUp(functionIndex);
+ return;
+ case CompilationInvalidated:
+ // This is weird - it will only happen in cases when the DFG code block (i.e.
+ // the code block that this JITCode belongs to) is also invalidated. So it
+ // doesn't really matter what we do. But, we do the right thing anyway. Note
+ // that us counting the reoptimization actually means that we might count it
+ // twice. But that's generally OK. It's better to overcount reoptimizations
+ // than it is to undercount them.
+ optimizeAfterWarmUp(functionIndex);
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
-private:
- uint32_t m_count;
- Atomic<bool> m_tierUpStarted;
+ Atomic<bool> m_tierUpStarted { false };
+ Lock m_lock;
+ CompilationStatus m_compilationStatusForOMG { CompilationStatus::NotCompiled };
+ CompilationStatus m_compilationStatusForOMGForOSREntry { CompilationStatus::NotCompiled };
+ SegmentedVector<TriggerReason, 16> m_osrEntryTriggers;
+ Vector<uint32_t> m_outerLoops;
+ Vector<std::unique_ptr<OSREntryData>> m_osrEntryData;
};
} } // namespace JSC::Wasm
typedef String ErrorType;
typedef Unexpected<ErrorType> UnexpectedResult;
typedef Expected<void, ErrorType> Result;
- typedef Type ExpressionType;
+ using ExpressionType = Type;
+ using ExpressionList = Vector<ExpressionType, 1>;
+ using Stack = ExpressionList;
typedef ControlData ControlType;
- typedef Vector<ExpressionType, 1> ExpressionList;
typedef FunctionParser<Validate>::ControlEntry ControlEntry;
static constexpr ExpressionType emptyExpression() { return Void; }
+ Stack createStack() { return Stack(); }
template <typename ...Args>
NEVER_INLINE UnexpectedResult WARN_UNUSED_RETURN fail(Args... args) const
// Control flow
ControlData WARN_UNUSED_RETURN addTopLevel(Type signature);
ControlData WARN_UNUSED_RETURN addBlock(Type signature);
- ControlData WARN_UNUSED_RETURN addLoop(Type signature);
+ ControlData WARN_UNUSED_RETURN addLoop(Type signature, const Stack&, uint32_t);
Result WARN_UNUSED_RETURN addIf(ExpressionType condition, Type signature, ControlData& result);
- Result WARN_UNUSED_RETURN addElse(ControlData&, const ExpressionList&);
+ Result WARN_UNUSED_RETURN addElse(ControlData&, const Stack&);
Result WARN_UNUSED_RETURN addElseToUnreachable(ControlData&);
- Result WARN_UNUSED_RETURN addReturn(ControlData& topLevel, const ExpressionList& returnValues);
- Result WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const ExpressionList& expressionStack);
- Result WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const ExpressionList& expressionStack);
- Result WARN_UNUSED_RETURN endBlock(ControlEntry&, ExpressionList& expressionStack);
+ Result WARN_UNUSED_RETURN addReturn(ControlData& topLevel, const Stack& returnValues);
+ Result WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const Stack& expressionStack);
+ Result WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const Stack& expressionStack);
+ Result WARN_UNUSED_RETURN endBlock(ControlEntry&, Stack& expressionStack);
Result WARN_UNUSED_RETURN addEndToUnreachable(ControlEntry&);
Result WARN_UNUSED_RETURN addGrowMemory(ExpressionType delta, ExpressionType& result);
Result WARN_UNUSED_RETURN addCurrentMemory(ExpressionType& result);
{
}
- void dump(const Vector<ControlEntry>&, const ExpressionList*);
+ void dump(const Vector<ControlEntry>&, const Stack*);
void setParser(FunctionParser<Validate>*) { }
private:
- Result WARN_UNUSED_RETURN unify(const ExpressionList&, const ControlData&);
+ Result WARN_UNUSED_RETURN unify(const Stack&, const ControlData&);
- Result WARN_UNUSED_RETURN checkBranchTarget(ControlData& target, const ExpressionList& expressionStack);
+ Result WARN_UNUSED_RETURN checkBranchTarget(ControlData& target, const Stack& expressionStack);
Vector<Type> m_locals;
const ModuleInformation& m_module;
return ControlData(BlockType::Block, signature);
}
-Validate::ControlType Validate::addLoop(Type signature)
+Validate::ControlType Validate::addLoop(Type signature, const Stack&, uint32_t)
{
return ControlData(BlockType::Loop, signature);
}
return { };
}
-auto Validate::addElse(ControlType& current, const ExpressionList& values) -> Result
+auto Validate::addElse(ControlType& current, const Stack& values) -> Result
{
WASM_FAIL_IF_HELPER_FAILS(unify(values, current));
return addElseToUnreachable(current);
return { };
}
-auto Validate::checkBranchTarget(ControlType& target, const ExpressionList& expressionStack) -> Result
+auto Validate::checkBranchTarget(ControlType& target, const Stack& expressionStack) -> Result
{
if (target.branchTargetSignature() == Void)
return { };
return { };
}
-auto Validate::addBranch(ControlType& target, ExpressionType condition, const ExpressionList& stack) -> Result
+auto Validate::addBranch(ControlType& target, ExpressionType condition, const Stack& stack) -> Result
{
// Void means this is an unconditional branch.
WASM_VALIDATOR_FAIL_IF(condition != Void && condition != I32, "conditional branch with non-i32 condition ", condition);
return checkBranchTarget(target, stack);
}
-auto Validate::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const ExpressionList& expressionStack) -> Result
+auto Validate::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const Stack& expressionStack) -> Result
{
WASM_VALIDATOR_FAIL_IF(condition != I32, "br_table with non-i32 condition ", condition);
return { };
}
-auto Validate::endBlock(ControlEntry& entry, ExpressionList& stack) -> Result
+auto Validate::endBlock(ControlEntry& entry, Stack& stack) -> Result
{
WASM_FAIL_IF_HELPER_FAILS(unify(stack, entry.controlData));
return addEndToUnreachable(entry);
return { };
}
-auto Validate::unify(const ExpressionList& values, const ControlType& block) -> Result
+auto Validate::unify(const Stack& values, const ControlType& block) -> Result
{
if (block.signature() == Void) {
WASM_VALIDATOR_FAIL_IF(!values.isEmpty(), "void block should end with an empty stack");
return { };
}
-static void dumpExpressionStack(const CommaPrinter& comma, const Validate::ExpressionList& expressionStack)
+static void dumpExpressionStack(const CommaPrinter& comma, const Validate::Stack& expressionStack)
{
dataLog(comma, " ExpressionStack:");
for (const auto& expression : expressionStack)
dataLog(comma, makeString(expression));
}
-void Validate::dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack)
+void Validate::dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack)
{
for (size_t i = controlStack.size(); i--;) {
dataLog(" ", controlStack[i].controlData);
+2019-08-19 Yusuke Suzuki <ysuzuki@apple.com>
+
+ [JSC] OSR entry to Wasm OMG
+ https://bugs.webkit.org/show_bug.cgi?id=200362
+
+ Reviewed by Michael Saboff.
+
+ * Scripts/run-jsc-stress-tests:
+
2019-08-19 Zhifei Fang <zhifei_fang@apple.com>
[results.webkit.org Timeline] Disable back gesture
# We force all tests to use a smaller (1.5M) stack so that stack overflow tests can run faster.
BASE_OPTIONS = ["--useFTLJIT=false", "--useFunctionDotArguments=true", "--validateExceptionChecks=true", "--useDollarVM=true", "--maxPerThreadStackUsage=1572864"]
-EAGER_OPTIONS = ["--thresholdForJITAfterWarmUp=10", "--thresholdForJITSoon=10", "--thresholdForOptimizeAfterWarmUp=20", "--thresholdForOptimizeAfterLongWarmUp=20", "--thresholdForOptimizeSoon=20", "--thresholdForFTLOptimizeAfterWarmUp=20", "--thresholdForFTLOptimizeSoon=20", "--maximumEvalCacheableSourceLength=150000", "--useEagerCodeBlockJettisonTiming=true"]
+EAGER_OPTIONS = ["--thresholdForJITAfterWarmUp=10", "--thresholdForJITSoon=10", "--thresholdForOptimizeAfterWarmUp=20", "--thresholdForOptimizeAfterLongWarmUp=20", "--thresholdForOptimizeSoon=20", "--thresholdForFTLOptimizeAfterWarmUp=20", "--thresholdForFTLOptimizeSoon=20", "--thresholdForOMGOptimizeAfterWarmUp=20", "--thresholdForOMGOptimizeSoon=20", "--maximumEvalCacheableSourceLength=150000", "--useEagerCodeBlockJettisonTiming=true"]
# NOTE: Tests rely on this using scribbleFreeCells.
NO_CJIT_OPTIONS = ["--useConcurrentJIT=false", "--thresholdForJITAfterWarmUp=100", "--scribbleFreeCells=true"]
B3O1_OPTIONS = ["--defaultB3OptLevel=1"]
run("default-wasm", "-m", *FTL_OPTIONS)
if $mode != "quick"
run("wasm-no-cjit-yes-tls-context", "-m", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS))
+ run("wasm-eager", "-m", *(FTL_OPTIONS + EAGER_OPTIONS))
run("wasm-eager-jettison", "-m", "--forceCodeBlockToJettisonDueToOldAge=true", *FTL_OPTIONS)
run("wasm-no-call-ic", "-m", "--useCallICsForWebAssemblyToJSCalls=false", *FTL_OPTIONS)
run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *FTL_OPTIONS)
run("default-wasm", "-m", *(FTL_OPTIONS + optionalTestSpecificOptions))
if $mode != "quick"
run("wasm-no-cjit-yes-tls-context", "-m", "--useFastTLSForWasmContext=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
+ run("wasm-eager", "-m", *(FTL_OPTIONS + EAGER_OPTIONS + optionalTestSpecificOptions))
run("wasm-eager-jettison", "-m", "--forceCodeBlockToJettisonDueToOldAge=true", *(FTL_OPTIONS + optionalTestSpecificOptions))
run("wasm-no-call-ic", "-m", "--useCallICsForWebAssemblyToJSCalls=false", *(FTL_OPTIONS + optionalTestSpecificOptions))
run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *(FTL_OPTIONS + optionalTestSpecificOptions))