59af094985004a4eea9bdb8bde688712e3311158
[WebKit-https.git] / Source / JavaScriptCore / b3 / B3LowerToAir.cpp
1 /*
2  * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "B3LowerToAir.h"
28
29 #if ENABLE(B3_JIT)
30
31 #include "AirCCallSpecial.h"
32 #include "AirCode.h"
33 #include "AirInsertionSet.h"
34 #include "AirInstInlines.h"
35 #include "AirStackSlot.h"
36 #include "B3ArgumentRegValue.h"
37 #include "B3BasicBlockInlines.h"
38 #include "B3BlockWorklist.h"
39 #include "B3CCallValue.h"
40 #include "B3CheckSpecial.h"
41 #include "B3Commutativity.h"
42 #include "B3Dominators.h"
43 #include "B3IndexMap.h"
44 #include "B3IndexSet.h"
45 #include "B3MemoryValue.h"
46 #include "B3PatchpointSpecial.h"
47 #include "B3PatchpointValue.h"
48 #include "B3PhaseScope.h"
49 #include "B3PhiChildren.h"
50 #include "B3Procedure.h"
51 #include "B3SlotBaseValue.h"
52 #include "B3StackSlot.h"
53 #include "B3UpsilonValue.h"
54 #include "B3UseCounts.h"
55 #include "B3ValueInlines.h"
56 #include "B3Variable.h"
57 #include "B3VariableValue.h"
58 #include <wtf/ListDump.h>
59
60 #if COMPILER(GCC) && ASSERT_DISABLED
61 #pragma GCC diagnostic push
62 #pragma GCC diagnostic ignored "-Wreturn-type"
63 #endif // COMPILER(GCC) && ASSERT_DISABLED
64
65 namespace JSC { namespace B3 {
66
67 using namespace Air;
68
69 namespace {
70
71 const bool verbose = false;
72
73 class LowerToAir {
74 public:
75     LowerToAir(Procedure& procedure)
76         : m_valueToTmp(procedure.values().size())
77         , m_phiToTmp(procedure.values().size())
78         , m_blockToBlock(procedure.size())
79         , m_useCounts(procedure)
80         , m_phiChildren(procedure)
81         , m_dominators(procedure.dominators())
82         , m_procedure(procedure)
83         , m_code(procedure.code())
84     {
85     }
86
87     void run()
88     {
89         for (B3::BasicBlock* block : m_procedure)
90             m_blockToBlock[block] = m_code.addBlock(block->frequency());
91         
92         for (Value* value : m_procedure.values()) {
93             switch (value->opcode()) {
94             case Phi: {
95                 m_phiToTmp[value] = m_code.newTmp(Arg::typeForB3Type(value->type()));
96                 if (verbose)
97                     dataLog("Phi tmp for ", *value, ": ", m_phiToTmp[value], "\n");
98                 break;
99             }
100             default:
101                 break;
102             }
103         }
104
105         for (B3::StackSlot* stack : m_procedure.stackSlots())
106             m_stackToStack.add(stack, m_code.addStackSlot(stack));
107         for (Variable* variable : m_procedure.variables())
108             m_variableToTmp.add(variable, m_code.newTmp(Arg::typeForB3Type(variable->type())));
109
110         // Figure out which blocks are not rare.
111         m_fastWorklist.push(m_procedure[0]);
112         while (B3::BasicBlock* block = m_fastWorklist.pop()) {
113             for (B3::FrequentedBlock& successor : block->successors()) {
114                 if (!successor.isRare())
115                     m_fastWorklist.push(successor.block());
116             }
117         }
118
119         m_procedure.resetValueOwners(); // Used by crossesInterference().
120
121         // Lower defs before uses on a global level. This is a good heuristic to lock down a
122         // hoisted address expression before we duplicate it back into the loop.
123         for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) {
124             m_block = block;
125             // Reset some state.
126             m_insts.resize(0);
127
128             m_isRare = !m_fastWorklist.saw(block);
129
130             if (verbose)
131                 dataLog("Lowering Block ", *block, ":\n");
132             
133             // Process blocks in reverse order so we see uses before defs. That's what allows us
134             // to match patterns effectively.
135             for (unsigned i = block->size(); i--;) {
136                 m_index = i;
137                 m_value = block->at(i);
138                 if (m_locked.contains(m_value))
139                     continue;
140                 m_insts.append(Vector<Inst>());
141                 if (verbose)
142                     dataLog("Lowering ", deepDump(m_procedure, m_value), ":\n");
143                 lower();
144                 if (verbose) {
145                     for (Inst& inst : m_insts.last())
146                         dataLog("    ", inst, "\n");
147                 }
148             }
149
150             // Now append the instructions. m_insts contains them in reverse order, so we process
151             // it in reverse.
152             for (unsigned i = m_insts.size(); i--;) {
153                 for (Inst& inst : m_insts[i])
154                     m_blockToBlock[block]->appendInst(WTFMove(inst));
155             }
156
157             // Make sure that the successors are set up correctly.
158             ASSERT(block->successors().size() <= 2);
159             for (B3::FrequentedBlock successor : block->successors()) {
160                 m_blockToBlock[block]->successors().append(
161                     Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency()));
162             }
163         }
164
165         Air::InsertionSet insertionSet(m_code);
166         for (Inst& inst : m_prologue)
167             insertionSet.insertInst(0, WTFMove(inst));
168         insertionSet.execute(m_code[0]);
169     }
170
171 private:
172     bool shouldCopyPropagate(Value* value)
173     {
174         switch (value->opcode()) {
175         case Trunc:
176         case Identity:
177             return true;
178         default:
179             return false;
180         }
181     }
182
183     class ArgPromise {
184     public:
185         ArgPromise() { }
186
187         ArgPromise(const Arg& arg, Value* valueToLock = nullptr)
188             : m_arg(arg)
189             , m_value(valueToLock)
190         {
191         }
192
193         static ArgPromise tmp(Value* value)
194         {
195             ArgPromise result;
196             result.m_value = value;
197             return result;
198         }
199
200         explicit operator bool() const { return m_arg || m_value; }
201
202         Arg::Kind kind() const
203         {
204             if (!m_arg && m_value)
205                 return Arg::Tmp;
206             return m_arg.kind();
207         }
208
209         const Arg& peek() const
210         {
211             return m_arg;
212         }
213
214         Arg consume(LowerToAir& lower) const
215         {
216             if (!m_arg && m_value)
217                 return lower.tmp(m_value);
218             if (m_value)
219                 lower.commitInternal(m_value);
220             return m_arg;
221         }
222         
223     private:
224         // Three forms:
225         // Everything null: invalid.
226         // Arg non-null, value null: just use the arg, nothing special.
227         // Arg null, value non-null: it's a tmp, pin it when necessary.
228         // Arg non-null, value non-null: use the arg, lock the value.
229         Arg m_arg;
230         Value* m_value;
231     };
232
233     // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet.
234     // Here are three canonical ways of using tmp() and tmpPromise():
235     //
236     // Idiom #1: You know that you want a tmp() and you know that it will be valid for the
237     // instruction you're emitting.
238     //
239     //     append(Foo, tmp(bar));
240     //
241     // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the
242     // instruction will accept it, so you query first. Note that the call to tmp() happens only after
243     // you are sure that you will use it.
244     //
245     //     if (isValidForm(Foo, Arg::Tmp))
246     //         append(Foo, tmp(bar))
247     //
248     // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after
249     // it's sure it will use the tmp. That's deliberate.
250     //
251     //     ArgPromise promise = tmpPromise(bar);
252     //     if (isValidForm(Foo, promise.kind()))
253     //         append(Foo, promise.consume(*this))
254     //
255     // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually
256     // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means
257     // that we will henceforth require that the value of 'bar' is generated as a separate
258     // instruction. We don't want to pin the value to a temporary if we might change our minds, and
259     // pass an address operand representing 'bar' to Foo instead.
260     //
261     // Because tmp() pins, the following is not an idiom you should use:
262     //
263     //     Tmp tmp = this->tmp(bar);
264     //     if (isValidForm(Foo, tmp.kind()))
265     //         append(Foo, tmp);
266     //
267     // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a
268     // temporary. You might later want to try to do something like loadPromise(), and that will fail.
269     // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code
270     // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right
271     // value is already pinned.
272     //
273     //     auto tryThings = [this] (const Arg& left, const Arg& right) {
274     //         if (isValidForm(Foo, left.kind(), right.kind()))
275     //             return Inst(Foo, m_value, left, right);
276     //         return Inst();
277     //     };
278     //     if (Inst result = tryThings(loadAddr(left), tmp(right)))
279     //         return result;
280     //     if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds.
281     //         return result;
282     //     return Inst(Foo, m_value, tmp(left), tmp(right));
283     //
284     // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code
285     // will run correctly - it will generate OK code - but the second form is never matched.
286     // loadAddr(right) will never succeed because it will observe that 'right' is already pinned.
287     // Of course, it's exactly because of the risky nature of such code that we don't have a
288     // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will
289     // work fine if written as:
290     //
291     //     auto tryThings = [this] (const ArgPromise& left, const ArgPromise& right) {
292     //         if (isValidForm(Foo, left.kind(), right.kind()))
293     //             return Inst(Foo, m_value, left.consume(*this), right.consume(*this));
294     //         return Inst();
295     //     };
296     //     if (Inst result = tryThings(loadPromise(left), tmpPromise(right)))
297     //         return result;
298     //     if (Inst result = tryThings(tmpPromise(left), loadPromise(right)))
299     //         return result;
300     //     return Inst(Foo, m_value, tmp(left), tmp(right));
301     //
302     // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure
303     // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so
304     // doesn't prevent us from trying loadPromise on the same value.
305     Tmp tmp(Value* value)
306     {
307         Tmp& tmp = m_valueToTmp[value];
308         if (!tmp) {
309             while (shouldCopyPropagate(value))
310                 value = value->child(0);
311
312             if (value->opcode() == FramePointer)
313                 return Tmp(GPRInfo::callFrameRegister);
314
315             Tmp& realTmp = m_valueToTmp[value];
316             if (!realTmp) {
317                 realTmp = m_code.newTmp(Arg::typeForB3Type(value->type()));
318                 if (m_procedure.isFastConstant(value->key()))
319                     m_code.addFastTmp(realTmp);
320                 if (verbose)
321                     dataLog("Tmp for ", *value, ": ", realTmp, "\n");
322             }
323             tmp = realTmp;
324         }
325         return tmp;
326     }
327
328     ArgPromise tmpPromise(Value* value)
329     {
330         return ArgPromise::tmp(value);
331     }
332
333     bool canBeInternal(Value* value)
334     {
335         // If one of the internal things has already been computed, then we don't want to cause
336         // it to be recomputed again.
337         if (m_valueToTmp[value])
338             return false;
339         
340         // We require internals to have only one use - us. It's not clear if this should be numUses() or
341         // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear
342         // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply
343         // doesn't matter because we don't implement patterns that would trigger this.
344         if (m_useCounts.numUses(value) != 1)
345             return false;
346
347         return true;
348     }
349
350     // If you ask canBeInternal() and then construct something from that, and you commit to emitting
351     // that code, then you must commitInternal() on that value. This is tricky, and you only need to
352     // do it if you're pattern matching by hand rather than using the patterns language. Long story
353     // short, you should avoid this by using the pattern matcher to match patterns.
354     void commitInternal(Value* value)
355     {
356         m_locked.add(value);
357     }
358
359     bool crossesInterference(Value* value)
360     {
361         // If it's in a foreign block, then be conservative. We could handle this if we were
362         // willing to do heavier analysis. For example, if we had liveness, then we could label
363         // values as "crossing interference" if they interfere with anything that they are live
364         // across. But, it's not clear how useful this would be.
365         if (value->owner != m_value->owner)
366             return true;
367
368         Effects effects = value->effects();
369
370         for (unsigned i = m_index; i--;) {
371             Value* otherValue = m_block->at(i);
372             if (otherValue == value)
373                 return false;
374             if (effects.interferes(otherValue->effects()))
375                 return true;
376         }
377
378         ASSERT_NOT_REACHED();
379         return true;
380     }
381
382     // This turns the given operand into an address.
383     Arg effectiveAddr(Value* address, int32_t offset, Arg::Width width)
384     {
385         ASSERT(Arg::isValidAddrForm(offset, width));
386
387         auto fallback = [&] () -> Arg {
388             return Arg::addr(tmp(address), offset);
389         };
390         
391         static const unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually.
392
393         // Only match if the address value isn't used in some large number of places.
394         if (m_useCounts.numUses(address) > lotsOfUses)
395             return fallback();
396         
397         switch (address->opcode()) {
398         case Add: {
399             Value* left = address->child(0);
400             Value* right = address->child(1);
401
402             auto tryIndex = [&] (Value* index, Value* base) -> Arg {
403                 if (index->opcode() != Shl)
404                     return Arg();
405                 if (m_locked.contains(index->child(0)) || m_locked.contains(base))
406                     return Arg();
407                 if (!index->child(1)->hasInt32())
408                     return Arg();
409                 
410                 unsigned scale = 1 << (index->child(1)->asInt32() & 31);
411                 if (!Arg::isValidIndexForm(scale, offset, width))
412                     return Arg();
413
414                 return Arg::index(tmp(base), tmp(index->child(0)), scale, offset);
415             };
416
417             if (Arg result = tryIndex(left, right))
418                 return result;
419             if (Arg result = tryIndex(right, left))
420                 return result;
421
422             if (m_locked.contains(left) || m_locked.contains(right)
423                 || !Arg::isValidIndexForm(1, offset, width))
424                 return fallback();
425             
426             return Arg::index(tmp(left), tmp(right), 1, offset);
427         }
428
429         case Shl: {
430             Value* left = address->child(0);
431
432             // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift
433             // amount is greater than 1, then there isn't really anything smart that we could do here.
434             // We avoid using baseless indexes because their encoding isn't particularly efficient.
435             if (m_locked.contains(left) || !address->child(1)->isInt32(1)
436                 || !Arg::isValidIndexForm(1, offset, width))
437                 return fallback();
438
439             return Arg::index(tmp(left), tmp(left), 1, offset);
440         }
441
442         case FramePointer:
443             return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset);
444
445         case SlotBase:
446             return Arg::stack(m_stackToStack.get(address->as<SlotBaseValue>()->slot()), offset);
447
448         default:
449             return fallback();
450         }
451     }
452
453     // This gives you the address of the given Load or Store. If it's not a Load or Store, then
454     // it returns Arg().
455     Arg addr(Value* memoryValue)
456     {
457         MemoryValue* value = memoryValue->as<MemoryValue>();
458         if (!value)
459             return Arg();
460
461         int32_t offset = value->offset();
462         Arg::Width width = Arg::widthForBytes(value->accessByteSize());
463
464         Arg result = effectiveAddr(value->lastChild(), offset, width);
465         ASSERT(result.isValidForm(width));
466
467         return result;
468     }
469
470     ArgPromise loadPromiseAnyOpcode(Value* loadValue)
471     {
472         if (!canBeInternal(loadValue))
473             return Arg();
474         if (crossesInterference(loadValue))
475             return Arg();
476         return ArgPromise(addr(loadValue), loadValue);
477     }
478
479     ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode)
480     {
481         if (loadValue->opcode() != loadOpcode)
482             return Arg();
483         return loadPromiseAnyOpcode(loadValue);
484     }
485
486     ArgPromise loadPromise(Value* loadValue)
487     {
488         return loadPromise(loadValue, Load);
489     }
490
491     Arg imm(Value* value)
492     {
493         if (value->hasInt()) {
494             int64_t intValue = value->asInt();
495             if (Arg::isValidImmForm(intValue))
496                 return Arg::imm(intValue);
497         }
498         return Arg();
499     }
500
501     Arg bitImm(Value* value)
502     {
503         if (value->hasInt()) {
504             int64_t intValue = value->asInt();
505             if (Arg::isValidBitImmForm(intValue))
506                 return Arg::bitImm(intValue);
507         }
508         return Arg();
509     }
510
511     Arg bitImm64(Value* value)
512     {
513         if (value->hasInt()) {
514             int64_t intValue = value->asInt();
515             if (Arg::isValidBitImm64Form(intValue))
516                 return Arg::bitImm64(intValue);
517         }
518         return Arg();
519     }
520
521     Arg immOrTmp(Value* value)
522     {
523         if (Arg result = imm(value))
524             return result;
525         return tmp(value);
526     }
527
528     // By convention, we use Oops to mean "I don't know".
529     Air::Opcode tryOpcodeForType(
530         Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
531     {
532         Air::Opcode opcode;
533         switch (type) {
534         case Int32:
535             opcode = opcode32;
536             break;
537         case Int64:
538             opcode = opcode64;
539             break;
540         case Float:
541             opcode = opcodeFloat;
542             break;
543         case Double:
544             opcode = opcodeDouble;
545             break;
546         default:
547             opcode = Air::Oops;
548             break;
549         }
550
551         return opcode;
552     }
553
554     Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
555     {
556         return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
557     }
558
559     Air::Opcode opcodeForType(
560         Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
561     {
562         Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type);
563         RELEASE_ASSERT(opcode != Air::Oops);
564         return opcode;
565     }
566
567     Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
568     {
569         return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
570     }
571
572     template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble = Air::Oops, Air::Opcode opcodeFloat = Air::Oops>
573     void appendUnOp(Value* value)
574     {
575         Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type());
576         
577         Tmp result = tmp(m_value);
578
579         // Two operand forms like:
580         //     Op a, b
581         // mean something like:
582         //     b = Op a
583
584         ArgPromise addr = loadPromise(value);
585         if (isValidForm(opcode, addr.kind(), Arg::Tmp)) {
586             append(opcode, addr.consume(*this), result);
587             return;
588         }
589
590         if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) {
591             append(opcode, tmp(value), result);
592             return;
593         }
594
595         ASSERT(value->type() == m_value->type());
596         append(relaxedMoveForType(m_value->type()), tmp(value), result);
597         append(opcode, result);
598     }
599
600     // Call this method when doing two-operand lowering of a commutative operation. You have a choice of
601     // which incoming Value is moved into the result. This will select which one is likely to be most
602     // profitable to use as the result. Doing the right thing can have big performance consequences in tight
603     // kernels.
604     bool preferRightForResult(Value* left, Value* right)
605     {
606         // The default is to move left into result, because that's required for non-commutative instructions.
607         // The value that we want to move into result position is the one that dies here. So, if we're
608         // compiling a commutative operation and we know that actually right is the one that dies right here,
609         // then we can flip things around to help coalescing, which then kills the move instruction.
610         //
611         // But it's more complicated:
612         // - Used-once is a bad estimate of whether the variable dies here.
613         // - A child might be a candidate for coalescing with this value.
614         //
615         // Currently, we have machinery in place to recognize super obvious forms of the latter issue.
616         
617         // We recognize when a child is a Phi that has this value as one of its children. We're very
618         // conservative about this; for example we don't even consider transitive Phi children.
619         bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value);
620         bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value);
621
622         if (leftIsPhiWithThis != rightIsPhiWithThis)
623             return rightIsPhiWithThis;
624
625         if (m_useCounts.numUsingInstructions(right) != 1)
626             return false;
627         
628         if (m_useCounts.numUsingInstructions(left) != 1)
629             return true;
630
631         // The use count might be 1 if the variable is live around a loop. We can guarantee that we
632         // pick the the variable that is least likely to suffer this problem if we pick the one that
633         // is closest to us in an idom walk. By convention, we slightly bias this in favor of
634         // returning true.
635
636         // We cannot prefer right if right is further away in an idom walk.
637         if (m_dominators.strictlyDominates(right->owner, left->owner))
638             return false;
639
640         return true;
641     }
642
643     template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Commutativity commutativity = NotCommutative>
644     void appendBinOp(Value* left, Value* right)
645     {
646         Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type());
647         
648         Tmp result = tmp(m_value);
649         
650         // Three-operand forms like:
651         //     Op a, b, c
652         // mean something like:
653         //     c = a Op b
654
655         if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
656             if (commutativity == Commutative) {
657                 if (imm(right)) {
658                     append(opcode, imm(right), tmp(left), result);
659                     return;
660                 }
661             } else {
662                 // A non-commutative operation could have an immediate in left.
663                 if (imm(left)) {
664                     append(opcode, imm(left), tmp(right), result);
665                     return;
666                 }
667             }
668         }
669
670         if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
671             if (commutativity == Commutative) {
672                 if (Arg rightArg = bitImm(right)) {
673                     append(opcode, rightArg, tmp(left), result);
674                     return;
675                 }
676             } else {
677                 // A non-commutative operation could have an immediate in left.
678                 if (Arg leftArg = bitImm(left)) {
679                     append(opcode, leftArg, tmp(right), result);
680                     return;
681                 }
682             }
683         }
684
685         if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
686             if (commutativity == Commutative) {
687                 if (Arg rightArg = bitImm64(right)) {
688                     append(opcode, rightArg, tmp(left), result);
689                     return;
690                 }
691             } else {
692                 // A non-commutative operation could have an immediate in left.
693                 if (Arg leftArg = bitImm64(left)) {
694                     append(opcode, leftArg, tmp(right), result);
695                     return;
696                 }
697             }
698         }
699
700         if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
701             append(opcode, tmp(left), imm(right), result);
702             return;
703         }
704
705         // Note that no extant architecture has a three-operand form of binary operations that also
706         // load from memory. If such an abomination did exist, we would handle it somewhere around
707         // here.
708
709         // Two-operand forms like:
710         //     Op a, b
711         // mean something like:
712         //     b = b Op a
713
714         // At this point, we prefer versions of the operation that have a fused load or an immediate
715         // over three operand forms.
716
717         if (left != right) {
718             if (commutativity == Commutative) {
719                 ArgPromise leftAddr = loadPromise(left);
720                 if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) {
721                     append(relaxedMoveForType(m_value->type()), tmp(right), result);
722                     append(opcode, leftAddr.consume(*this), result);
723                     return;
724                 }
725             }
726
727             ArgPromise rightAddr = loadPromise(right);
728             if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) {
729                 append(relaxedMoveForType(m_value->type()), tmp(left), result);
730                 append(opcode, rightAddr.consume(*this), result);
731                 return;
732             }
733         }
734
735         if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
736             append(relaxedMoveForType(m_value->type()), tmp(left), result);
737             append(opcode, imm(right), result);
738             return;
739         }
740
741         if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
742             append(opcode, tmp(left), tmp(right), result);
743             return;
744         }
745
746         if (commutativity == Commutative && preferRightForResult(left, right)) {
747             append(relaxedMoveForType(m_value->type()), tmp(right), result);
748             append(opcode, tmp(left), result);
749             return;
750         }
751         
752         append(relaxedMoveForType(m_value->type()), tmp(left), result);
753         append(opcode, tmp(right), result);
754     }
755
756     template<Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
757     void appendBinOp(Value* left, Value* right)
758     {
759         appendBinOp<opcode32, opcode64, Air::Oops, Air::Oops, commutativity>(left, right);
760     }
761
762     template<Air::Opcode opcode32, Air::Opcode opcode64>
763     void appendShift(Value* value, Value* amount)
764     {
765         Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type());
766         
767         if (imm(amount)) {
768             if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
769                 append(opcode, tmp(value), imm(amount), tmp(m_value));
770                 return;
771             }
772             if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
773                 append(Move, tmp(value), tmp(m_value));
774                 append(opcode, imm(amount), tmp(m_value));
775                 return;
776             }
777         }
778
779         if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
780             append(opcode, tmp(value), tmp(amount), tmp(m_value));
781             return;
782         }
783
784 #if CPU(X86) || CPU(X86_64)
785         append(Move, tmp(value), tmp(m_value));
786         append(Move, tmp(amount), Tmp(X86Registers::ecx));
787         append(opcode, Tmp(X86Registers::ecx), tmp(m_value));
788 #endif
789     }
790
791     template<Air::Opcode opcode32, Air::Opcode opcode64>
792     bool tryAppendStoreUnOp(Value* value)
793     {
794         Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type());
795         if (opcode == Air::Oops)
796             return false;
797         
798         Arg storeAddr = addr(m_value);
799         ASSERT(storeAddr);
800
801         ArgPromise loadPromise = this->loadPromise(value);
802         if (loadPromise.peek() != storeAddr)
803             return false;
804
805         if (!isValidForm(opcode, storeAddr.kind()))
806             return false;
807         
808         loadPromise.consume(*this);
809         append(opcode, storeAddr);
810         return true;
811     }
812
813     template<
814         Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
815     bool tryAppendStoreBinOp(Value* left, Value* right)
816     {
817         Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type());
818         if (opcode == Air::Oops)
819             return false;
820         
821         Arg storeAddr = addr(m_value);
822         ASSERT(storeAddr);
823
824         auto getLoadPromise = [&] (Value* load) -> ArgPromise {
825             switch (m_value->opcode()) {
826             case B3::Store:
827                 if (load->opcode() != B3::Load)
828                     return ArgPromise();
829                 break;
830             case B3::Store8:
831                 if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S)
832                     return ArgPromise();
833                 break;
834             case B3::Store16:
835                 if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S)
836                     return ArgPromise();
837                 break;
838             default:
839                 return ArgPromise();
840             }
841             return loadPromiseAnyOpcode(load);
842         };
843         
844         ArgPromise loadPromise;
845         Value* otherValue = nullptr;
846
847         loadPromise = getLoadPromise(left);
848         if (loadPromise.peek() == storeAddr)
849             otherValue = right;
850         else if (commutativity == Commutative) {
851             loadPromise = getLoadPromise(right);
852             if (loadPromise.peek() == storeAddr)
853                 otherValue = left;
854         }
855
856         if (!otherValue)
857             return false;
858
859         if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) {
860             loadPromise.consume(*this);
861             append(opcode, imm(otherValue), storeAddr);
862             return true;
863         }
864
865         if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind()))
866             return false;
867
868         loadPromise.consume(*this);
869         append(opcode, tmp(otherValue), storeAddr);
870         return true;
871     }
872
873     Inst createStore(Air::Opcode move, Value* value, const Arg& dest)
874     {
875         if (imm(value) && isValidForm(move, Arg::Imm, dest.kind()))
876             return Inst(move, m_value, imm(value), dest);
877
878         return Inst(move, m_value, tmp(value), dest);
879     }
880
881     Inst createStore(Value* value, const Arg& dest)
882     {
883         Air::Opcode moveOpcode = moveForType(value->type());
884         return createStore(moveOpcode, value, dest);
885     }
886
887     void appendStore(Value* value, const Arg& dest)
888     {
889         m_insts.last().append(createStore(value, dest));
890     }
891
892     Air::Opcode moveForType(Type type)
893     {
894         switch (type) {
895         case Int32:
896             return Move32;
897         case Int64:
898             RELEASE_ASSERT(is64Bit());
899             return Move;
900         case Float:
901             return MoveFloat;
902         case Double:
903             return MoveDouble;
904         case Void:
905             break;
906         }
907         RELEASE_ASSERT_NOT_REACHED();
908         return Air::Oops;
909     }
910
911     Air::Opcode relaxedMoveForType(Type type)
912     {
913         switch (type) {
914         case Int32:
915         case Int64:
916             // For Int32, we could return Move or Move32. It's a trade-off.
917             //
918             // Move32: Using Move32 guarantees that we use the narrower move, but in cases where the
919             //     register allocator can't prove that the variables involved are 32-bit, this will
920             //     disable coalescing.
921             //
922             // Move: Using Move guarantees that the register allocator can coalesce normally, but in
923             //     cases where it can't prove that the variables are 32-bit and it doesn't coalesce,
924             //     this will force us to use a full 64-bit Move instead of the slightly cheaper
925             //     32-bit Move32.
926             //
927             // Coalescing is a lot more profitable than turning Move into Move32. So, it's better to
928             // use Move here because in cases where the register allocator cannot prove that
929             // everything is 32-bit, we still get coalescing.
930             return Move;
931         case Float:
932             // MoveFloat is always coalescable and we never convert MoveDouble to MoveFloat, so we
933             // should use MoveFloat when we know that the temporaries involved are 32-bit.
934             return MoveFloat;
935         case Double:
936             return MoveDouble;
937         case Void:
938             break;
939         }
940         RELEASE_ASSERT_NOT_REACHED();
941         return Air::Oops;
942     }
943
944     template<typename... Arguments>
945     void append(Air::Opcode opcode, Arguments&&... arguments)
946     {
947         m_insts.last().append(Inst(opcode, m_value, std::forward<Arguments>(arguments)...));
948     }
949
950     template<typename T, typename... Arguments>
951     T* ensureSpecial(T*& field, Arguments&&... arguments)
952     {
953         if (!field) {
954             field = static_cast<T*>(
955                 m_code.addSpecial(std::make_unique<T>(std::forward<Arguments>(arguments)...)));
956         }
957         return field;
958     }
959
960     template<typename... Arguments>
961     CheckSpecial* ensureCheckSpecial(Arguments&&... arguments)
962     {
963         CheckSpecial::Key key(std::forward<Arguments>(arguments)...);
964         auto result = m_checkSpecials.add(key, nullptr);
965         return ensureSpecial(result.iterator->value, key);
966     }
967
968     void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped)
969     {
970         for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) {
971             ConstrainedValue value = stackmap->constrainedChild(i);
972
973             Arg arg;
974             switch (value.rep().kind()) {
975             case ValueRep::WarmAny:
976             case ValueRep::ColdAny:
977             case ValueRep::LateColdAny:
978                 if (imm(value.value()))
979                     arg = imm(value.value());
980                 else if (value.value()->hasInt64())
981                     arg = Arg::bigImm(value.value()->asInt64());
982                 else if (value.value()->hasDouble() && canBeInternal(value.value())) {
983                     commitInternal(value.value());
984                     arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble()));
985                 } else
986                     arg = tmp(value.value());
987                 break;
988             case ValueRep::SomeRegister:
989                 arg = tmp(value.value());
990                 break;
991             case ValueRep::Register:
992                 stackmap->earlyClobbered().clear(value.rep().reg());
993                 arg = Tmp(value.rep().reg());
994                 append(Move, immOrTmp(value.value()), arg);
995                 break;
996             case ValueRep::StackArgument:
997                 arg = Arg::callArg(value.rep().offsetFromSP());
998                 appendStore(value.value(), arg);
999                 break;
1000             default:
1001                 RELEASE_ASSERT_NOT_REACHED();
1002                 break;
1003             }
1004             inst.args.append(arg);
1005         }
1006     }
1007     
1008     // Create an Inst to do the comparison specified by the given value.
1009     template<typename CompareFunctor, typename TestFunctor, typename CompareDoubleFunctor, typename CompareFloatFunctor>
1010     Inst createGenericCompare(
1011         Value* value,
1012         const CompareFunctor& compare, // Signature: (Arg::Width, Arg relCond, Arg, Arg) -> Inst
1013         const TestFunctor& test, // Signature: (Arg::Width, Arg resCond, Arg, Arg) -> Inst
1014         const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
1015         const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
1016         bool inverted = false)
1017     {
1018         // NOTE: This is totally happy to match comparisons that have already been computed elsewhere
1019         // since on most architectures, the cost of branching on a previously computed comparison
1020         // result is almost always higher than just doing another fused compare/branch. The only time
1021         // it could be worse is if we have a binary comparison and both operands are variables (not
1022         // constants), and we encounter register pressure. Even in this case, duplicating the compare
1023         // so that we can fuse it to the branch will be more efficient most of the time, since
1024         // register pressure is not *that* common. For this reason, this algorithm will always
1025         // duplicate the comparison.
1026         //
1027         // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we
1028         // already validated canBeInternal() on all of the values that got us to the load. So, even
1029         // if we are sharing a value, we still need to call canBeInternal() for the purpose of
1030         // tracking whether we are still in good shape to fuse loads.
1031         //
1032         // We could even have a chain of compare values that we fuse, and any member of the chain
1033         // could be shared. Once any of them are shared, then the shared one's transitive children
1034         // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to
1035         // lock all of them because that's a prerequisite to fusing the loads so that the loads don't
1036         // get duplicated. For example, we might have: 
1037         //
1038         //     @tmp1 = LessThan(@a, @b)
1039         //     @tmp2 = Equal(@tmp1, 0)
1040         //     Branch(@tmp2)
1041         //
1042         // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they
1043         // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot
1044         // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that
1045         // try to share @tmp1 with us try to do their lowering.
1046         //
1047         // There's one more wrinkle. If we don't lock an internal value, then this internal value may
1048         // have already separately locked its children. So, if we're not locking a value then we need
1049         // to make sure that its children aren't locked. We encapsulate this in two ways:
1050         //
1051         // canCommitInternal: This variable tells us if the values that we've fused so far are
1052         // locked. This means that we're not sharing any of them with anyone. This permits us to fuse
1053         // loads. If it's false, then we cannot fuse loads and we also need to ensure that the
1054         // children of any values we try to fuse-by-sharing are not already locked. You don't have to
1055         // worry about the children locking thing if you use prepareToFuse() before trying to fuse a
1056         // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal
1057         // is true.
1058         //
1059         // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse
1060         // some value and that value is not a load. It will automatically handle the shared-or-locked
1061         // issues and it will clear canCommitInternal if necessary. This will return CannotFuse
1062         // (which acts like false) if the value cannot be locked and its children are locked. That's
1063         // rare, but you just need to make sure that you do smart things when this happens (i.e. just
1064         // use the value rather than trying to fuse it). After you call prepareToFuse(), you can
1065         // still change your mind about whether you will actually fuse the value. If you do fuse it,
1066         // you need to call commitFusion(value, fusionResult).
1067         //
1068         // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult
1069         // is FuseAndCommit.
1070         
1071         bool canCommitInternal = true;
1072
1073         enum FusionResult {
1074             CannotFuse,
1075             FuseAndCommit,
1076             Fuse
1077         };
1078         auto prepareToFuse = [&] (Value* value) -> FusionResult {
1079             if (value == m_value) {
1080                 // It's not actually internal. It's the root value. We're good to go.
1081                 return Fuse;
1082             }
1083
1084             if (canCommitInternal && canBeInternal(value)) {
1085                 // We are the only users of this value. This also means that the value's children
1086                 // could not have been locked, since we have now proved that m_value dominates value
1087                 // in the data flow graph. To only other way to value is from a user of m_value. If
1088                 // value's children are shared with others, then they could not have been locked
1089                 // because their use count is greater than 1. If they are only used from value, then
1090                 // in order for value's children to be locked, value would also have to be locked,
1091                 // and we just proved that it wasn't.
1092                 return FuseAndCommit;
1093             }
1094
1095             // We're going to try to share value with others. It's possible that some other basic
1096             // block had already emitted code for value and then matched over its children and then
1097             // locked them, in which case we just want to use value instead of duplicating it. So, we
1098             // validate the children. Note that this only arises in linear chains like:
1099             //
1100             //     BB#1:
1101             //         @1 = Foo(...)
1102             //         @2 = Bar(@1)
1103             //         Jump(#2)
1104             //     BB#2:
1105             //         @3 = Baz(@2)
1106             //
1107             // Notice how we could start by generating code for BB#1 and then decide to lock @1 when
1108             // generating code for @2, if we have some way of fusing Bar and Foo into a single
1109             // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now
1110             // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us
1111             // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end
1112             // up deciding not to fuse Foo into the fused Baz/Bar.
1113             //
1114             // Happily, there are only two places where this kind of child validation happens is in
1115             // rules that admit sharing, like this and effectiveAddress().
1116             //
1117             // N.B. We could probably avoid the need to do value locking if we committed to a well
1118             // chosen code generation order. For example, if we guaranteed that all of the users of
1119             // a value get generated before that value, then there's no way for the lowering of @3 to
1120             // see @1 locked. But we don't want to do that, since this is a greedy instruction
1121             // selector and so we want to be able to play with order.
1122             for (Value* child : value->children()) {
1123                 if (m_locked.contains(child))
1124                     return CannotFuse;
1125             }
1126
1127             // It's safe to share value, but since we're sharing, it means that we aren't locking it.
1128             // If we don't lock it, then fusing loads is off limits and all of value's children will
1129             // have to go through the sharing path as well.
1130             canCommitInternal = false;
1131             
1132             return Fuse;
1133         };
1134
1135         auto commitFusion = [&] (Value* value, FusionResult result) {
1136             if (result == FuseAndCommit)
1137                 commitInternal(value);
1138         };
1139         
1140         // Chew through any inversions. This loop isn't necessary for comparisons and branches, but
1141         // we do need at least one iteration of it for Check.
1142         for (;;) {
1143             bool shouldInvert =
1144                 (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() & 1) && value->child(0)->returnsBool())
1145                 || (value->opcode() == Equal && value->child(1)->isInt(0));
1146             if (!shouldInvert)
1147                 break;
1148
1149             FusionResult fusionResult = prepareToFuse(value);
1150             if (fusionResult == CannotFuse)
1151                 break;
1152             commitFusion(value, fusionResult);
1153             
1154             value = value->child(0);
1155             inverted = !inverted;
1156         }
1157
1158         auto createRelCond = [&] (
1159             MacroAssembler::RelationalCondition relationalCondition,
1160             MacroAssembler::DoubleCondition doubleCondition) {
1161             Arg relCond = Arg::relCond(relationalCondition).inverted(inverted);
1162             Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted);
1163             Value* left = value->child(0);
1164             Value* right = value->child(1);
1165
1166             if (isInt(value->child(0)->type())) {
1167                 // FIXME: We wouldn't have to worry about leftImm if we canonicalized integer
1168                 // comparisons.
1169                 // https://bugs.webkit.org/show_bug.cgi?id=150958
1170                 
1171                 Arg leftImm = imm(left);
1172                 Arg rightImm = imm(right);
1173
1174                 auto tryCompare = [&] (
1175                     Arg::Width width, const ArgPromise& left, const ArgPromise& right) -> Inst {
1176                     if (Inst result = compare(width, relCond, left, right))
1177                         return result;
1178                     if (Inst result = compare(width, relCond.flipped(), right, left))
1179                         return result;
1180                     return Inst();
1181                 };
1182
1183                 auto tryCompareLoadImm = [&] (
1184                     Arg::Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst {
1185                     if (rightImm && rightImm.isRepresentableAs(width, signedness)) {
1186                         if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) {
1187                             commitInternal(left);
1188                             return result;
1189                         }
1190                     }
1191                     if (leftImm && leftImm.isRepresentableAs(width, signedness)) {
1192                         if (Inst result = tryCompare(width, leftImm, loadPromise(right, loadOpcode))) {
1193                             commitInternal(right);
1194                             return result;
1195                         }
1196                     }
1197                     return Inst();
1198                 };
1199
1200                 Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
1201                 
1202                 if (canCommitInternal) {
1203                     // First handle compares that involve fewer bits than B3's type system supports.
1204                     // This is pretty important. For example, we want this to be a single
1205                     // instruction:
1206                     //
1207                     //     @1 = Load8S(...)
1208                     //     @2 = Const32(...)
1209                     //     @3 = LessThan(@1, @2)
1210                     //     Branch(@3)
1211                 
1212                     if (relCond.isSignedCond()) {
1213                         if (Inst result = tryCompareLoadImm(Arg::Width8, Load8S, Arg::Signed))
1214                             return result;
1215                     }
1216                 
1217                     if (relCond.isUnsignedCond()) {
1218                         if (Inst result = tryCompareLoadImm(Arg::Width8, Load8Z, Arg::Unsigned))
1219                             return result;
1220                     }
1221
1222                     if (relCond.isSignedCond()) {
1223                         if (Inst result = tryCompareLoadImm(Arg::Width16, Load16S, Arg::Signed))
1224                             return result;
1225                     }
1226                 
1227                     if (relCond.isUnsignedCond()) {
1228                         if (Inst result = tryCompareLoadImm(Arg::Width16, Load16Z, Arg::Unsigned))
1229                             return result;
1230                     }
1231
1232                     // Now handle compares that involve a load and an immediate.
1233
1234                     if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed))
1235                         return result;
1236
1237                     // Now handle compares that involve a load. It's not obvious that it's better to
1238                     // handle this before the immediate cases or not. Probably doesn't matter.
1239
1240                     if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) {
1241                         commitInternal(left);
1242                         return result;
1243                     }
1244                 
1245                     if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) {
1246                         commitInternal(right);
1247                         return result;
1248                     }
1249                 }
1250
1251                 // Now handle compares that involve an immediate and a tmp.
1252                 
1253                 if (leftImm && leftImm.isRepresentableAs<int32_t>()) {
1254                     if (Inst result = tryCompare(width, leftImm, tmpPromise(right)))
1255                         return result;
1256                 }
1257                 
1258                 if (rightImm && rightImm.isRepresentableAs<int32_t>()) {
1259                     if (Inst result = tryCompare(width, tmpPromise(left), rightImm))
1260                         return result;
1261                 }
1262
1263                 // Finally, handle comparison between tmps.
1264                 return compare(width, relCond, tmpPromise(left), tmpPromise(right));
1265             }
1266
1267             // Floating point comparisons can't really do anything smart.
1268             if (value->child(0)->type() == Float)
1269                 return compareFloat(doubleCond, tmpPromise(left), tmpPromise(right));
1270             return compareDouble(doubleCond, tmpPromise(left), tmpPromise(right));
1271         };
1272
1273         Arg::Width width = Arg::widthForB3Type(value->type());
1274         Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted);
1275         
1276         auto tryTest = [&] (
1277             Arg::Width width, const ArgPromise& left, const ArgPromise& right) -> Inst {
1278             if (Inst result = test(width, resCond, left, right))
1279                 return result;
1280             if (Inst result = test(width, resCond, right, left))
1281                 return result;
1282             return Inst();
1283         };
1284
1285         auto attemptFused = [&] () -> Inst {
1286             switch (value->opcode()) {
1287             case NotEqual:
1288                 return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered);
1289             case Equal:
1290                 return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual);
1291             case LessThan:
1292                 return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan);
1293             case GreaterThan:
1294                 return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan);
1295             case LessEqual:
1296                 return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual);
1297             case GreaterEqual:
1298                 return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual);
1299             case EqualOrUnordered:
1300                 // The integer condition is never used in this case.
1301                 return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered);
1302             case Above:
1303                 // We use a bogus double condition because these integer comparisons won't got down that
1304                 // path anyway.
1305                 return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual);
1306             case Below:
1307                 return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual);
1308             case AboveEqual:
1309                 return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual);
1310             case BelowEqual:
1311                 return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual);
1312             case BitAnd: {
1313                 Value* left = value->child(0);
1314                 Value* right = value->child(1);
1315
1316                 // FIXME: We don't actually have to worry about leftImm.
1317                 // https://bugs.webkit.org/show_bug.cgi?id=150954
1318
1319                 Arg leftImm = imm(left);
1320                 Arg rightImm = imm(right);
1321                 
1322                 auto tryTestLoadImm = [&] (Arg::Width width, B3::Opcode loadOpcode) -> Inst {
1323                     if (rightImm && rightImm.isRepresentableAs(width, Arg::Unsigned)) {
1324                         if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) {
1325                             commitInternal(left);
1326                             return result;
1327                         }
1328                     }
1329                     if (leftImm && leftImm.isRepresentableAs(width, Arg::Unsigned)) {
1330                         if (Inst result = tryTest(width, leftImm, loadPromise(right, loadOpcode))) {
1331                             commitInternal(right);
1332                             return result;
1333                         }
1334                     }
1335                     return Inst();
1336                 };
1337
1338                 if (canCommitInternal) {
1339                     // First handle test's that involve fewer bits than B3's type system supports.
1340
1341                     if (Inst result = tryTestLoadImm(Arg::Width8, Load8Z))
1342                         return result;
1343                     
1344                     if (Inst result = tryTestLoadImm(Arg::Width8, Load8S))
1345                         return result;
1346                     
1347                     if (Inst result = tryTestLoadImm(Arg::Width16, Load16Z))
1348                         return result;
1349                     
1350                     if (Inst result = tryTestLoadImm(Arg::Width16, Load16S))
1351                         return result;
1352
1353                     // Now handle test's that involve a load and an immediate. Note that immediates
1354                     // are 32-bit, and we want zero-extension. Hence, the immediate form is compiled
1355                     // as a 32-bit test. Note that this spits on the grave of inferior endians, such
1356                     // as the big one.
1357                     
1358                     if (Inst result = tryTestLoadImm(Arg::Width32, Load))
1359                         return result;
1360                     
1361                     // Now handle test's that involve a load.
1362                     
1363                     Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
1364                     if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) {
1365                         commitInternal(left);
1366                         return result;
1367                     }
1368                     
1369                     if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) {
1370                         commitInternal(right);
1371                         return result;
1372                     }
1373                 }
1374
1375                 // Now handle test's that involve an immediate and a tmp.
1376
1377                 if (leftImm && leftImm.isRepresentableAs<uint32_t>()) {
1378                     if (Inst result = tryTest(Arg::Width32, leftImm, tmpPromise(right)))
1379                         return result;
1380                 }
1381
1382                 if (rightImm && rightImm.isRepresentableAs<uint32_t>()) {
1383                     if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm))
1384                         return result;
1385                 }
1386
1387                 // Finally, just do tmp's.
1388                 return tryTest(width, tmpPromise(left), tmpPromise(right));
1389             }
1390             default:
1391                 return Inst();
1392             }
1393         };
1394
1395         if (FusionResult fusionResult = prepareToFuse(value)) {
1396             if (Inst result = attemptFused()) {
1397                 commitFusion(value, fusionResult);
1398                 return result;
1399             }
1400         }
1401
1402         if (Arg::isValidImmForm(-1)) {
1403             if (canCommitInternal && value->as<MemoryValue>()) {
1404                 // Handle things like Branch(Load8Z(value))
1405
1406                 if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8Z), Arg::imm(-1))) {
1407                     commitInternal(value);
1408                     return result;
1409                 }
1410
1411                 if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8S), Arg::imm(-1))) {
1412                     commitInternal(value);
1413                     return result;
1414                 }
1415
1416                 if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16Z), Arg::imm(-1))) {
1417                     commitInternal(value);
1418                     return result;
1419                 }
1420
1421                 if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16S), Arg::imm(-1))) {
1422                     commitInternal(value);
1423                     return result;
1424                 }
1425
1426                 if (Inst result = tryTest(width, loadPromise(value), Arg::imm(-1))) {
1427                     commitInternal(value);
1428                     return result;
1429                 }
1430             }
1431
1432             if (Inst result = test(width, resCond, tmpPromise(value), Arg::imm(-1)))
1433                 return result;
1434         }
1435         
1436         // Sometimes this is the only form of test available. We prefer not to use this because
1437         // it's less canonical.
1438         return test(width, resCond, tmpPromise(value), tmpPromise(value));
1439     }
1440
1441     Inst createBranch(Value* value, bool inverted = false)
1442     {
1443         return createGenericCompare(
1444             value,
1445             [this] (
1446                 Arg::Width width, const Arg& relCond,
1447                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1448                 switch (width) {
1449                 case Arg::Width8:
1450                     if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) {
1451                         return Inst(
1452                             Branch8, m_value, relCond,
1453                             left.consume(*this), right.consume(*this));
1454                     }
1455                     return Inst();
1456                 case Arg::Width16:
1457                     return Inst();
1458                 case Arg::Width32:
1459                     if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) {
1460                         return Inst(
1461                             Branch32, m_value, relCond,
1462                             left.consume(*this), right.consume(*this));
1463                     }
1464                     return Inst();
1465                 case Arg::Width64:
1466                     if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) {
1467                         return Inst(
1468                             Branch64, m_value, relCond,
1469                             left.consume(*this), right.consume(*this));
1470                     }
1471                     return Inst();
1472                 }
1473                 ASSERT_NOT_REACHED();
1474             },
1475             [this] (
1476                 Arg::Width width, const Arg& resCond,
1477                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1478                 switch (width) {
1479                 case Arg::Width8:
1480                     if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) {
1481                         return Inst(
1482                             BranchTest8, m_value, resCond,
1483                             left.consume(*this), right.consume(*this));
1484                     }
1485                     return Inst();
1486                 case Arg::Width16:
1487                     return Inst();
1488                 case Arg::Width32:
1489                     if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) {
1490                         return Inst(
1491                             BranchTest32, m_value, resCond,
1492                             left.consume(*this), right.consume(*this));
1493                     }
1494                     return Inst();
1495                 case Arg::Width64:
1496                     if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) {
1497                         return Inst(
1498                             BranchTest64, m_value, resCond,
1499                             left.consume(*this), right.consume(*this));
1500                     }
1501                     return Inst();
1502                 }
1503                 ASSERT_NOT_REACHED();
1504             },
1505             [this] (Arg doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1506                 if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) {
1507                     return Inst(
1508                         BranchDouble, m_value, doubleCond,
1509                         left.consume(*this), right.consume(*this));
1510                 }
1511                 return Inst();
1512             },
1513             [this] (Arg doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1514                 if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) {
1515                     return Inst(
1516                         BranchFloat, m_value, doubleCond,
1517                         left.consume(*this), right.consume(*this));
1518                 }
1519                 return Inst();
1520             },
1521             inverted);
1522     }
1523
1524     Inst createCompare(Value* value, bool inverted = false)
1525     {
1526         return createGenericCompare(
1527             value,
1528             [this] (
1529                 Arg::Width width, const Arg& relCond,
1530                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1531                 switch (width) {
1532                 case Arg::Width8:
1533                 case Arg::Width16:
1534                     return Inst();
1535                 case Arg::Width32:
1536                     if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
1537                         return Inst(
1538                             Compare32, m_value, relCond,
1539                             left.consume(*this), right.consume(*this), tmp(m_value));
1540                     }
1541                     return Inst();
1542                 case Arg::Width64:
1543                     if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
1544                         return Inst(
1545                             Compare64, m_value, relCond,
1546                             left.consume(*this), right.consume(*this), tmp(m_value));
1547                     }
1548                     return Inst();
1549                 }
1550                 ASSERT_NOT_REACHED();
1551             },
1552             [this] (
1553                 Arg::Width width, const Arg& resCond,
1554                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1555                 switch (width) {
1556                 case Arg::Width8:
1557                 case Arg::Width16:
1558                     return Inst();
1559                 case Arg::Width32:
1560                     if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
1561                         return Inst(
1562                             Test32, m_value, resCond,
1563                             left.consume(*this), right.consume(*this), tmp(m_value));
1564                     }
1565                     return Inst();
1566                 case Arg::Width64:
1567                     if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
1568                         return Inst(
1569                             Test64, m_value, resCond,
1570                             left.consume(*this), right.consume(*this), tmp(m_value));
1571                     }
1572                     return Inst();
1573                 }
1574                 ASSERT_NOT_REACHED();
1575             },
1576             [this] (const Arg& doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1577                 if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
1578                     return Inst(
1579                         CompareDouble, m_value, doubleCond,
1580                         left.consume(*this), right.consume(*this), tmp(m_value));
1581                 }
1582                 return Inst();
1583             },
1584             [this] (const Arg& doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1585                 if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
1586                     return Inst(
1587                         CompareFloat, m_value, doubleCond,
1588                         left.consume(*this), right.consume(*this), tmp(m_value));
1589                 }
1590                 return Inst();
1591             },
1592             inverted);
1593     }
1594
1595     struct MoveConditionallyConfig {
1596         Air::Opcode moveConditionally32;
1597         Air::Opcode moveConditionally64;
1598         Air::Opcode moveConditionallyTest32;
1599         Air::Opcode moveConditionallyTest64;
1600         Air::Opcode moveConditionallyDouble;
1601         Air::Opcode moveConditionallyFloat;
1602     };
1603     Inst createSelect(const MoveConditionallyConfig& config)
1604     {
1605         auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, const ArgPromise& left, const ArgPromise& right) -> Inst {
1606             if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
1607                 Tmp result = tmp(m_value);
1608                 Tmp thenCase = tmp(m_value->child(1));
1609                 Tmp elseCase = tmp(m_value->child(2));
1610                 append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result);
1611                 return Inst(
1612                     opcode, m_value, condition,
1613                     left.consume(*this), right.consume(*this), thenCase, elseCase, result);
1614             }
1615             if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) {
1616                 Tmp result = tmp(m_value);
1617                 Tmp source = tmp(m_value->child(1));
1618                 append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result);
1619                 return Inst(
1620                     opcode, m_value, condition,
1621                     left.consume(*this), right.consume(*this), source, result);
1622             }
1623             return Inst();
1624         };
1625
1626         return createGenericCompare(
1627             m_value->child(0),
1628             [&] (
1629                 Arg::Width width, const Arg& relCond,
1630                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1631                 switch (width) {
1632                 case Arg::Width8:
1633                     // FIXME: Support these things.
1634                     // https://bugs.webkit.org/show_bug.cgi?id=151504
1635                     return Inst();
1636                 case Arg::Width16:
1637                     return Inst();
1638                 case Arg::Width32:
1639                     return createSelectInstruction(config.moveConditionally32, relCond, left, right);
1640                 case Arg::Width64:
1641                     return createSelectInstruction(config.moveConditionally64, relCond, left, right);
1642                 }
1643                 ASSERT_NOT_REACHED();
1644             },
1645             [&] (
1646                 Arg::Width width, const Arg& resCond,
1647                 const ArgPromise& left, const ArgPromise& right) -> Inst {
1648                 switch (width) {
1649                 case Arg::Width8:
1650                     // FIXME: Support more things.
1651                     // https://bugs.webkit.org/show_bug.cgi?id=151504
1652                     return Inst();
1653                 case Arg::Width16:
1654                     return Inst();
1655                 case Arg::Width32:
1656                     return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right);
1657                 case Arg::Width64:
1658                     return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right);
1659                 }
1660                 ASSERT_NOT_REACHED();
1661             },
1662             [&] (Arg doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1663                 return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right);
1664             },
1665             [&] (Arg doubleCond, const ArgPromise& left, const ArgPromise& right) -> Inst {
1666                 return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right);
1667             },
1668             false);
1669     }
1670
1671     void lower()
1672     {
1673         switch (m_value->opcode()) {
1674         case B3::Nop: {
1675             // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of
1676             // properly removing things.
1677             return;
1678         }
1679             
1680         case Load: {
1681             append(
1682                 moveForType(m_value->type()),
1683                 addr(m_value), tmp(m_value));
1684             return;
1685         }
1686             
1687         case Load8S: {
1688             append(Load8SignedExtendTo32, addr(m_value), tmp(m_value));
1689             return;
1690         }
1691
1692         case Load8Z: {
1693             append(Load8, addr(m_value), tmp(m_value));
1694             return;
1695         }
1696
1697         case Load16S: {
1698             append(Load16SignedExtendTo32, addr(m_value), tmp(m_value));
1699             return;
1700         }
1701
1702         case Load16Z: {
1703             append(Load16, addr(m_value), tmp(m_value));
1704             return;
1705         }
1706
1707         case Add: {
1708             appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(
1709                 m_value->child(0), m_value->child(1));
1710             return;
1711         }
1712
1713         case Sub: {
1714             appendBinOp<Sub32, Sub64, SubDouble, SubFloat>(m_value->child(0), m_value->child(1));
1715             return;
1716         }
1717
1718         case Neg: {
1719             appendUnOp<Neg32, Neg64, NegateDouble, Air::Oops>(m_value->child(0));
1720             return;
1721         }
1722
1723         case Mul: {
1724             appendBinOp<Mul32, Mul64, MulDouble, MulFloat, Commutative>(
1725                 m_value->child(0), m_value->child(1));
1726             return;
1727         }
1728
1729         case ChillDiv:
1730             RELEASE_ASSERT(isARM64());
1731             FALLTHROUGH;
1732         case Div: {
1733 #if CPU(X86) || CPU(X86_64)
1734             if (isInt(m_value->type())) {
1735                 lowerX86Div();
1736                 append(Move, Tmp(X86Registers::eax), tmp(m_value));
1737                 return;
1738             }
1739 #endif
1740             ASSERT(!isX86() || isFloat(m_value->type()));
1741
1742             appendBinOp<Div32, Div64, DivDouble, DivFloat>(m_value->child(0), m_value->child(1));
1743             return;
1744         }
1745
1746         case Mod: {
1747             RELEASE_ASSERT(isX86());
1748 #if CPU(X86) || CPU(X86_64)
1749             lowerX86Div();
1750             append(Move, Tmp(X86Registers::edx), tmp(m_value));
1751 #endif
1752             return;
1753         }
1754
1755         case BitAnd: {
1756             if (m_value->child(1)->isInt(0xff)) {
1757                 appendUnOp<ZeroExtend8To32, ZeroExtend8To32>(m_value->child(0));
1758                 return;
1759             }
1760             
1761             if (m_value->child(1)->isInt(0xffff)) {
1762                 appendUnOp<ZeroExtend16To32, ZeroExtend16To32>(m_value->child(0));
1763                 return;
1764             }
1765
1766             if (m_value->child(1)->isInt(0xffffffff)) {
1767                 appendUnOp<Move32, Move32>(m_value->child(0));
1768                 return;
1769             }
1770             
1771             appendBinOp<And32, And64, AndDouble, AndFloat, Commutative>(
1772                 m_value->child(0), m_value->child(1));
1773             return;
1774         }
1775
1776         case BitOr: {
1777             appendBinOp<Or32, Or64, Commutative>(
1778                 m_value->child(0), m_value->child(1));
1779             return;
1780         }
1781
1782         case BitXor: {
1783             // FIXME: If canBeInternal(child), we should generate this using the comparison path.
1784             // https://bugs.webkit.org/show_bug.cgi?id=152367
1785             
1786             if (m_value->child(1)->isInt(-1)) {
1787                 appendUnOp<Not32, Not64>(m_value->child(0));
1788                 return;
1789             }
1790             appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>(
1791                 m_value->child(0), m_value->child(1));
1792             return;
1793         }
1794
1795         case Shl: {
1796             if (m_value->child(1)->isInt32(1)) {
1797                 appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(m_value->child(0), m_value->child(0));
1798                 return;
1799             }
1800             
1801             appendShift<Lshift32, Lshift64>(m_value->child(0), m_value->child(1));
1802             return;
1803         }
1804
1805         case SShr: {
1806             appendShift<Rshift32, Rshift64>(m_value->child(0), m_value->child(1));
1807             return;
1808         }
1809
1810         case ZShr: {
1811             appendShift<Urshift32, Urshift64>(m_value->child(0), m_value->child(1));
1812             return;
1813         }
1814
1815         case Clz: {
1816             appendUnOp<CountLeadingZeros32, CountLeadingZeros64>(m_value->child(0));
1817             return;
1818         }
1819
1820         case Abs: {
1821             RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation.");
1822             appendUnOp<Air::Oops, Air::Oops, AbsDouble, AbsFloat>(m_value->child(0));
1823             return;
1824         }
1825
1826         case Ceil: {
1827             appendUnOp<Air::Oops, Air::Oops, CeilDouble, CeilFloat>(m_value->child(0));
1828             return;
1829         }
1830
1831         case Sqrt: {
1832             appendUnOp<Air::Oops, Air::Oops, SqrtDouble, SqrtFloat>(m_value->child(0));
1833             return;
1834         }
1835
1836         case BitwiseCast: {
1837             appendUnOp<Move32ToFloat, Move64ToDouble, MoveDoubleTo64, MoveFloatTo32>(m_value->child(0));
1838             return;
1839         }
1840
1841         case Store: {
1842             Value* valueToStore = m_value->child(0);
1843             if (canBeInternal(valueToStore)) {
1844                 bool matched = false;
1845                 switch (valueToStore->opcode()) {
1846                 case Add:
1847                     matched = tryAppendStoreBinOp<Add32, Add64, Commutative>(
1848                         valueToStore->child(0), valueToStore->child(1));
1849                     break;
1850                 case Sub:
1851                     if (valueToStore->child(0)->isInt(0)) {
1852                         matched = tryAppendStoreUnOp<Neg32, Neg64>(valueToStore->child(1));
1853                         break;
1854                     }
1855                     matched = tryAppendStoreBinOp<Sub32, Sub64>(
1856                         valueToStore->child(0), valueToStore->child(1));
1857                     break;
1858                 case BitAnd:
1859                     matched = tryAppendStoreBinOp<And32, And64, Commutative>(
1860                         valueToStore->child(0), valueToStore->child(1));
1861                     break;
1862                 case BitXor:
1863                     if (valueToStore->child(1)->isInt(-1)) {
1864                         matched = tryAppendStoreUnOp<Not32, Not64>(valueToStore->child(0));
1865                         break;
1866                     }
1867                     matched = tryAppendStoreBinOp<Xor32, Xor64, Commutative>(
1868                         valueToStore->child(0), valueToStore->child(1));
1869                     break;
1870                 default:
1871                     break;
1872                 }
1873                 if (matched) {
1874                     commitInternal(valueToStore);
1875                     return;
1876                 }
1877             }
1878
1879             appendStore(valueToStore, addr(m_value));
1880             return;
1881         }
1882
1883         case B3::Store8: {
1884             Value* valueToStore = m_value->child(0);
1885             if (canBeInternal(valueToStore)) {
1886                 bool matched = false;
1887                 switch (valueToStore->opcode()) {
1888                 case Add:
1889                     matched = tryAppendStoreBinOp<Add8, Air::Oops, Commutative>(
1890                         valueToStore->child(0), valueToStore->child(1));
1891                     break;
1892                 default:
1893                     break;
1894                 }
1895                 if (matched) {
1896                     commitInternal(valueToStore);
1897                     return;
1898                 }
1899             }
1900             m_insts.last().append(createStore(Air::Store8, valueToStore, addr(m_value)));
1901             return;
1902         }
1903
1904         case B3::Store16: {
1905             Value* valueToStore = m_value->child(0);
1906             if (canBeInternal(valueToStore)) {
1907                 bool matched = false;
1908                 switch (valueToStore->opcode()) {
1909                 case Add:
1910                     matched = tryAppendStoreBinOp<Add16, Air::Oops, Commutative>(
1911                         valueToStore->child(0), valueToStore->child(1));
1912                     break;
1913                 default:
1914                     break;
1915                 }
1916                 if (matched) {
1917                     commitInternal(valueToStore);
1918                     return;
1919                 }
1920             }
1921             m_insts.last().append(createStore(Air::Store16, valueToStore, addr(m_value)));
1922             return;
1923         }
1924
1925         case Trunc: {
1926             ASSERT(tmp(m_value->child(0)) == tmp(m_value));
1927             return;
1928         }
1929
1930         case SExt8: {
1931             appendUnOp<SignExtend8To32, Air::Oops>(m_value->child(0));
1932             return;
1933         }
1934
1935         case SExt16: {
1936             appendUnOp<SignExtend16To32, Air::Oops>(m_value->child(0));
1937             return;
1938         }
1939
1940         case ZExt32: {
1941             appendUnOp<Move32, Air::Oops>(m_value->child(0));
1942             return;
1943         }
1944
1945         case SExt32: {
1946             // FIXME: We should have support for movsbq/movswq
1947             // https://bugs.webkit.org/show_bug.cgi?id=152232
1948             
1949             appendUnOp<SignExtend32ToPtr, Air::Oops>(m_value->child(0));
1950             return;
1951         }
1952
1953         case FloatToDouble: {
1954             appendUnOp<Air::Oops, Air::Oops, Air::Oops, ConvertFloatToDouble>(m_value->child(0));
1955             return;
1956         }
1957
1958         case DoubleToFloat: {
1959             appendUnOp<Air::Oops, Air::Oops, ConvertDoubleToFloat>(m_value->child(0));
1960             return;
1961         }
1962
1963         case ArgumentReg: {
1964             m_prologue.append(Inst(
1965                 moveForType(m_value->type()), m_value,
1966                 Tmp(m_value->as<ArgumentRegValue>()->argumentReg()),
1967                 tmp(m_value)));
1968             return;
1969         }
1970
1971         case Const32:
1972         case Const64: {
1973             if (imm(m_value))
1974                 append(Move, imm(m_value), tmp(m_value));
1975             else
1976                 append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
1977             return;
1978         }
1979
1980         case ConstDouble:
1981         case ConstFloat: {
1982             // We expect that the moveConstants() phase has run, and any doubles referenced from
1983             // stackmaps get fused.
1984             RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0));
1985             RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0));
1986             append(MoveZeroToDouble, tmp(m_value));
1987             return;
1988         }
1989
1990         case FramePointer: {
1991             ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister));
1992             return;
1993         }
1994
1995         case SlotBase: {
1996             append(
1997                 Lea,
1998                 Arg::stack(m_stackToStack.get(m_value->as<SlotBaseValue>()->slot())),
1999                 tmp(m_value));
2000             return;
2001         }
2002
2003         case Equal:
2004         case NotEqual:
2005         case LessThan:
2006         case GreaterThan:
2007         case LessEqual:
2008         case GreaterEqual:
2009         case Above:
2010         case Below:
2011         case AboveEqual:
2012         case BelowEqual:
2013         case EqualOrUnordered: {
2014             m_insts.last().append(createCompare(m_value));
2015             return;
2016         }
2017
2018         case Select: {
2019             MoveConditionallyConfig config;
2020             if (isInt(m_value->type())) {
2021                 config.moveConditionally32 = MoveConditionally32;
2022                 config.moveConditionally64 = MoveConditionally64;
2023                 config.moveConditionallyTest32 = MoveConditionallyTest32;
2024                 config.moveConditionallyTest64 = MoveConditionallyTest64;
2025                 config.moveConditionallyDouble = MoveConditionallyDouble;
2026                 config.moveConditionallyFloat = MoveConditionallyFloat;
2027             } else {
2028                 // FIXME: it's not obvious that these are particularly efficient.
2029                 config.moveConditionally32 = MoveDoubleConditionally32;
2030                 config.moveConditionally64 = MoveDoubleConditionally64;
2031                 config.moveConditionallyTest32 = MoveDoubleConditionallyTest32;
2032                 config.moveConditionallyTest64 = MoveDoubleConditionallyTest64;
2033                 config.moveConditionallyDouble = MoveDoubleConditionallyDouble;
2034                 config.moveConditionallyFloat = MoveDoubleConditionallyFloat;
2035             }
2036             
2037             m_insts.last().append(createSelect(config));
2038             return;
2039         }
2040
2041         case IToD: {
2042             appendUnOp<ConvertInt32ToDouble, ConvertInt64ToDouble>(m_value->child(0));
2043             return;
2044         }
2045
2046         case B3::CCall: {
2047             CCallValue* cCall = m_value->as<CCallValue>();
2048
2049             Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall);
2050
2051             // We have a ton of flexibility regarding the callee argument, but currently, we don't
2052             // use it yet. It gets weird for reasons:
2053             // 1) We probably will never take advantage of this. We don't have C calls to locations
2054             //    loaded from addresses. We have JS calls like that, but those use Patchpoints.
2055             // 2) On X86_64 we still don't support call with BaseIndex.
2056             // 3) On non-X86, we don't natively support any kind of loading from address.
2057             // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to
2058             //    decide.
2059             // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052
2060             inst.args.append(tmp(cCall->child(0)));
2061
2062             if (cCall->type() != Void)
2063                 inst.args.append(tmp(cCall));
2064
2065             for (unsigned i = 1; i < cCall->numChildren(); ++i)
2066                 inst.args.append(immOrTmp(cCall->child(i)));
2067
2068             m_insts.last().append(WTFMove(inst));
2069             return;
2070         }
2071
2072         case Patchpoint: {
2073             PatchpointValue* patchpointValue = m_value->as<PatchpointValue>();
2074             ensureSpecial(m_patchpointSpecial);
2075             
2076             Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial));
2077
2078             Vector<Inst> after;
2079             if (patchpointValue->type() != Void) {
2080                 switch (patchpointValue->resultConstraint.kind()) {
2081                 case ValueRep::WarmAny:
2082                 case ValueRep::ColdAny:
2083                 case ValueRep::LateColdAny:
2084                 case ValueRep::SomeRegister:
2085                     inst.args.append(tmp(patchpointValue));
2086                     break;
2087                 case ValueRep::Register: {
2088                     Tmp reg = Tmp(patchpointValue->resultConstraint.reg());
2089                     inst.args.append(reg);
2090                     after.append(Inst(
2091                         relaxedMoveForType(patchpointValue->type()), m_value, reg, tmp(patchpointValue)));
2092                     break;
2093                 }
2094                 case ValueRep::StackArgument: {
2095                     Arg arg = Arg::callArg(patchpointValue->resultConstraint.offsetFromSP());
2096                     inst.args.append(arg);
2097                     after.append(Inst(
2098                         moveForType(patchpointValue->type()), m_value, arg, tmp(patchpointValue)));
2099                     break;
2100                 }
2101                 default:
2102                     RELEASE_ASSERT_NOT_REACHED();
2103                     break;
2104                 }
2105             }
2106             
2107             fillStackmap(inst, patchpointValue, 0);
2108             
2109             if (patchpointValue->resultConstraint.isReg())
2110                 patchpointValue->lateClobbered().clear(patchpointValue->resultConstraint.reg());
2111
2112             for (unsigned i = patchpointValue->numGPScratchRegisters; i--;)
2113                 inst.args.append(m_code.newTmp(Arg::GP));
2114             for (unsigned i = patchpointValue->numFPScratchRegisters; i--;)
2115                 inst.args.append(m_code.newTmp(Arg::FP));
2116             
2117             m_insts.last().append(WTFMove(inst));
2118             m_insts.last().appendVector(after);
2119             return;
2120         }
2121
2122         case CheckAdd:
2123         case CheckSub:
2124         case CheckMul: {
2125             CheckValue* checkValue = m_value->as<CheckValue>();
2126
2127             Value* left = checkValue->child(0);
2128             Value* right = checkValue->child(1);
2129
2130             Tmp result = tmp(m_value);
2131
2132             // Handle checked negation.
2133             if (checkValue->opcode() == CheckSub && left->isInt(0)) {
2134                 append(Move, tmp(right), result);
2135
2136                 Air::Opcode opcode =
2137                     opcodeForType(BranchNeg32, BranchNeg64, checkValue->type());
2138                 CheckSpecial* special = ensureCheckSpecial(opcode, 2);
2139
2140                 Inst inst(Patch, checkValue, Arg::special(special));
2141                 inst.args.append(Arg::resCond(MacroAssembler::Overflow));
2142                 inst.args.append(result);
2143
2144                 fillStackmap(inst, checkValue, 2);
2145
2146                 m_insts.last().append(WTFMove(inst));
2147                 return;
2148             }
2149
2150             Air::Opcode opcode = Air::Oops;
2151             Commutativity commutativity = NotCommutative;
2152             StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep;
2153             switch (m_value->opcode()) {
2154             case CheckAdd:
2155                 opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type());
2156                 stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable;
2157                 commutativity = Commutative;
2158                 break;
2159             case CheckSub:
2160                 opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type());
2161                 break;
2162             case CheckMul:
2163                 opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type());
2164                 stackmapRole = StackmapSpecial::ForceLateUse;
2165                 break;
2166             default:
2167                 RELEASE_ASSERT_NOT_REACHED();
2168                 break;
2169             }
2170
2171             // FIXME: It would be great to fuse Loads into these. We currently don't do it because the
2172             // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax
2173             // this rule here.
2174             // https://bugs.webkit.org/show_bug.cgi?id=151228
2175
2176             Vector<Arg, 2> sources;
2177             if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
2178                 sources.append(tmp(left));
2179                 sources.append(imm(right));
2180             } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) {
2181                 sources.append(imm(right));
2182                 append(Move, tmp(left), result);
2183             } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
2184                 sources.append(tmp(left));
2185                 sources.append(tmp(right));
2186             }  else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) {
2187                 if (commutativity == Commutative && preferRightForResult(left, right)) {
2188                     sources.append(tmp(left));
2189                     append(Move, tmp(right), result);
2190                 } else {
2191                     sources.append(tmp(right));
2192                     append(Move, tmp(left), result);
2193                 }
2194             } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
2195                 sources.append(tmp(left));
2196                 sources.append(tmp(right));
2197                 sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
2198                 sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
2199             }
2200
2201             // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit
2202             // such code, but the coalescing in our register allocator also does copy propagation, so
2203             // although we emit:
2204             //
2205             //     Move %tmp1, %tmp2
2206             //     BranchAdd32 %tmp1, %tmp2
2207             //
2208             // The register allocator may turn this into:
2209             //
2210             //     BranchAdd32 %rax, %rax
2211             //
2212             // Currently we handle this by ensuring that even this kind of addition can be undone. We can
2213             // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this
2214             // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of
2215             // performance. So it's tempting to do it only if left == right. But that creates an awkward
2216             // constraint on Air: it means that Air would not be allowed to do any copy propagation.
2217             // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are
2218             // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we
2219             // emit something like a Move - or even do other kinds of copy-propagation on tmp's -
2220             // somewhere else in this code. The add-to-self situation may only emerge after some other Air
2221             // optimizations remove other Move's or identity-like operations. That's why we don't use
2222             // LateUse here to take care of add-to-self.
2223             
2224             CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole);
2225             
2226             Inst inst(Patch, checkValue, Arg::special(special));
2227
2228             inst.args.append(Arg::resCond(MacroAssembler::Overflow));
2229
2230             inst.args.appendVector(sources);
2231             inst.args.append(result);
2232
2233             fillStackmap(inst, checkValue, 2);
2234
2235             m_insts.last().append(WTFMove(inst));
2236             return;
2237         }
2238
2239         case Check: {
2240             Inst branch = createBranch(m_value->child(0));
2241
2242             CheckSpecial* special = ensureCheckSpecial(branch);
2243             
2244             CheckValue* checkValue = m_value->as<CheckValue>();
2245             
2246             Inst inst(Patch, checkValue, Arg::special(special));
2247             inst.args.appendVector(branch.args);
2248             
2249             fillStackmap(inst, checkValue, 1);
2250             
2251             m_insts.last().append(WTFMove(inst));
2252             return;
2253         }
2254
2255         case Upsilon: {
2256             Value* value = m_value->child(0);
2257             append(
2258                 relaxedMoveForType(value->type()), immOrTmp(value),
2259                 m_phiToTmp[m_value->as<UpsilonValue>()->phi()]);
2260             return;
2261         }
2262
2263         case Phi: {
2264             // Snapshot the value of the Phi. It may change under us because you could do:
2265             // a = Phi()
2266             // Upsilon(@x, ^a)
2267             // @a => this should get the value of the Phi before the Upsilon, i.e. not @x.
2268
2269             append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value));
2270             return;
2271         }
2272
2273         case Set: {
2274             Value* value = m_value->child(0);
2275             append(
2276                 relaxedMoveForType(value->type()), immOrTmp(value),
2277                 m_variableToTmp.get(m_value->as<VariableValue>()->variable()));
2278             return;
2279         }
2280
2281         case Get: {
2282             append(
2283                 relaxedMoveForType(m_value->type()),
2284                 m_variableToTmp.get(m_value->as<VariableValue>()->variable()), tmp(m_value));
2285             return;
2286         }
2287
2288         case Branch: {
2289             m_insts.last().append(createBranch(m_value->child(0)));
2290             return;
2291         }
2292
2293         case B3::Jump: {
2294             append(Air::Jump);
2295             return;
2296         }
2297             
2298         case Identity: {
2299             ASSERT(tmp(m_value->child(0)) == tmp(m_value));
2300             return;
2301         }
2302
2303         case Return: {
2304             Value* value = m_value->child(0);
2305             Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR);
2306             Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR);
2307             switch (value->type()) {
2308             case Void:
2309                 // It's impossible for a void value to be used as a child. If we did want to have a
2310                 // void return, we'd introduce a different opcode, like ReturnVoid.
2311                 RELEASE_ASSERT_NOT_REACHED();
2312                 break;
2313             case Int32:
2314                 append(Move, immOrTmp(value), returnValueGPR);
2315                 append(Ret32, returnValueGPR);
2316                 break;
2317             case Int64:
2318                 append(Move, immOrTmp(value), returnValueGPR);
2319                 append(Ret64, returnValueGPR);
2320                 break;
2321             case Float:
2322                 append(MoveFloat, tmp(value), returnValueFPR);
2323                 append(RetFloat, returnValueFPR);
2324                 break;
2325             case Double:
2326                 append(MoveDouble, tmp(value), returnValueFPR);
2327                 append(RetDouble, returnValueFPR);
2328                 break;
2329             }
2330             return;
2331         }
2332
2333         case B3::Oops: {
2334             append(Air::Oops);
2335             return;
2336         }
2337
2338         default:
2339             break;
2340         }
2341
2342         dataLog("FATAL: could not lower ", deepDump(m_procedure, m_value), "\n");
2343         RELEASE_ASSERT_NOT_REACHED();
2344     }
2345
2346 #if CPU(X86) || CPU(X86_64)
2347     void lowerX86Div()
2348     {
2349         Tmp eax = Tmp(X86Registers::eax);
2350         Tmp edx = Tmp(X86Registers::edx);
2351
2352         Air::Opcode convertToDoubleWord;
2353         Air::Opcode div;
2354         switch (m_value->type()) {
2355         case Int32:
2356             convertToDoubleWord = X86ConvertToDoubleWord32;
2357             div = X86Div32;
2358             break;
2359         case Int64:
2360             convertToDoubleWord = X86ConvertToQuadWord64;
2361             div = X86Div64;
2362             break;
2363         default:
2364             RELEASE_ASSERT_NOT_REACHED();
2365             return;
2366         }
2367         
2368         append(Move, tmp(m_value->child(0)), eax);
2369         append(convertToDoubleWord, eax, edx);
2370         append(div, eax, edx, tmp(m_value->child(1)));
2371     }
2372 #endif
2373
2374     IndexSet<Value> m_locked; // These are values that will have no Tmp in Air.
2375     IndexMap<Value, Tmp> m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned".
2376     IndexMap<Value, Tmp> m_phiToTmp; // Each Phi gets its own Tmp.
2377     IndexMap<B3::BasicBlock, Air::BasicBlock*> m_blockToBlock;
2378     HashMap<B3::StackSlot*, Air::StackSlot*> m_stackToStack;
2379     HashMap<Variable*, Tmp> m_variableToTmp;
2380
2381     UseCounts m_useCounts;
2382     PhiChildren m_phiChildren;
2383     BlockWorklist m_fastWorklist;
2384     Dominators& m_dominators;
2385
2386     Vector<Vector<Inst, 4>> m_insts;
2387     Vector<Inst> m_prologue;
2388
2389     B3::BasicBlock* m_block;
2390     bool m_isRare;
2391     unsigned m_index;
2392     Value* m_value;
2393
2394     PatchpointSpecial* m_patchpointSpecial { nullptr };
2395     HashMap<CheckSpecial::Key, CheckSpecial*> m_checkSpecials;
2396
2397     Procedure& m_procedure;
2398     Code& m_code;
2399 };
2400
2401 } // anonymous namespace
2402
2403 void lowerToAir(Procedure& procedure)
2404 {
2405     PhaseScope phaseScope(procedure, "lowerToAir");
2406     LowerToAir lowerToAir(procedure);
2407     lowerToAir.run();
2408 }
2409
2410 } } // namespace JSC::B3
2411
2412 #if COMPILER(GCC) && ASSERT_DISABLED
2413 #pragma GCC diagnostic pop
2414 #endif // COMPILER(GCC) && ASSERT_DISABLED
2415
2416 #endif // ENABLE(B3_JIT)