1 # Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
30 # x<number> => GPR. This is both the generic name of the register, and the name used
31 # to indicate that the register is used in 64-bit mode.
32 # w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
33 # mutated then the high 32-bit part of the register is zero filled.
34 # q<number> => FPR. This is the generic name of the register.
35 # d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
37 # GPR conventions, to match the baseline JIT:
39 # x0 => return value, cached result, first argument, t0, a0, r0
45 # x9 => (nonArgGPR1 in baseline)
46 # x13 => scratch (unused in baseline)
51 # x27 => csr1 (tagTypeNumber)
52 # x28 => csr2 (tagMask)
57 # FPR conentions, to match the baseline JIT:
63 # q4 => ft4 (unused in baseline)
64 # q5 => ft5 (unused in baseline)
67 def arm64GPRName(name, kind)
68 raise "bad GPR name #{name}" unless name =~ /^x/
76 raise "Wrong kind: #{kind}"
80 def arm64FPRName(name, kind)
81 raise "bad FPR kind #{kind}" unless kind == :double
82 raise "bad FPR name #{name}" unless name =~ /^q/
87 def arm64Operand(kind)
90 arm64GPRName(@name, kind)
92 arm64FPRName(@name, kind)
94 raise "Bad name: #{@name}"
99 ARM64_EXTRA_GPRS = [SpecialRegister.new("x16"), SpecialRegister.new("x17"), SpecialRegister.new("x13")]
100 ARM64_EXTRA_FPRS = [SpecialRegister.new("q31")]
103 def arm64Operand(kind)
105 when 't0', 'a0', 'r0'
106 arm64GPRName('x0', kind)
107 when 't1', 'a1', 'r1'
108 arm64GPRName('x1', kind)
110 arm64GPRName('x2', kind)
112 arm64GPRName('x3', kind)
114 arm64GPRName('x23', kind)
116 arm64GPRName('x5', kind)
118 arm64GPRName('x24', kind)
120 arm64GPRName('x6', kind)
122 arm64GPRName('x29', kind)
124 arm64GPRName('x27', kind)
126 arm64GPRName('x28', kind)
132 raise "Bad register name #{@name} at #{codeOriginString}"
138 def arm64Operand(kind)
141 arm64FPRName('q0', kind)
143 arm64FPRName('q1', kind)
145 arm64FPRName('q2', kind)
147 arm64FPRName('q3', kind)
149 arm64FPRName('q4', kind)
151 arm64FPRName('q5', kind)
152 else "Bad register name #{@name} at #{codeOriginString}"
158 def arm64Operand(kind)
159 raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 4095
165 def arm64Operand(kind)
166 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value < -255 or offset.value > 4095
167 "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
170 def arm64EmitLea(destination, kind)
171 $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
176 def arm64Operand(kind)
177 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value != 0
178 "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
181 def arm64EmitLea(destination, kind)
182 $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
186 class AbsoluteAddress
187 def arm64Operand(kind)
188 raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
192 # FIXME: We could support AbsoluteAddress for lea, but we don't.
195 # Actual lowering code follows.
199 def getModifiedListARM64
201 result = riscLowerNot(result)
202 result = riscLowerSimpleBranchOps(result)
203 result = riscLowerHardBranchOps64(result)
204 result = riscLowerShiftOps(result)
205 result = riscLowerMalformedAddresses(result) {
208 when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
210 when "loadh", "loadhs"
212 when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
213 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
214 /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
216 when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
217 "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
218 "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
219 "jmp", "call", "leap", "leaq"
222 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
225 if address.is_a? BaseIndex
226 address.offset.value == 0 and
227 (node.opcode =~ /^lea/ or address.scale == 1 or address.scale == size)
228 elsif address.is_a? Address
229 (-255..4095).include? address.offset.value
234 result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
235 result = riscLowerMalformedImmediates(result, 0..4095)
236 result = riscLowerMisplacedAddresses(result)
237 result = riscLowerMalformedAddresses(result) {
243 not (address.is_a? Address and address.offset.value < 0)
247 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
250 result = riscLowerTest(result)
251 result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS)
252 result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS)
257 def arm64Operands(operands, kinds)
259 raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands.size != kinds.size
261 kinds = operands.map{ kinds }
263 (0...operands.size).map {
265 operands[index].arm64Operand(kinds[index])
269 def arm64FlippedOperands(operands, kinds)
271 kinds = [kinds[-1]] + kinds[0..-2]
273 arm64Operands([operands[-1]] + operands[0..-2], kinds)
276 # TAC = three address code.
277 def arm64TACOperands(operands, kind)
278 if operands.size == 3
279 return arm64FlippedOperands(operands, kind)
282 raise unless operands.size == 2
284 return operands[1].arm64Operand(kind) + ", " + arm64FlippedOperands(operands, kind)
287 def emitARM64Add(opcode, operands, kind)
288 if operands.size == 3
289 raise unless operands[1].register?
290 raise unless operands[2].register?
292 if operands[0].immediate?
293 if operands[0].value == 0 and flag !~ /s$/
294 unless operands[1] == operands[2]
295 $asm.puts "mov #{arm64FlippedOperands(operands[1..2], kind)}"
298 $asm.puts "#{opcode} #{arm64Operands(operands.reverse, kind)}"
303 raise unless operands[0].register?
304 $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
308 raise unless operands.size == 2
310 if operands[0].immediate? and operands[0].value == 0 and opcode !~ /s$/
314 $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
317 def emitARM64Unflipped(opcode, operands, kind)
318 $asm.puts "#{opcode} #{arm64Operands(operands, kind)}"
321 def emitARM64TAC(opcode, operands, kind)
322 $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
325 def emitARM64(opcode, operands, kind)
326 $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
329 def emitARM64Access(opcode, opcodeNegativeOffset, register, memory, kind)
330 if memory.is_a? Address and memory.offset.value < 0
331 $asm.puts "#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
335 $asm.puts "#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
338 def emitARM64Shift(opcodeRegs, opcodeImmediate, operands, kind)
339 if operands.size == 3 and operands[1].immediate?
340 magicNumbers = yield operands[1].value
341 $asm.puts "#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
345 if operands.size == 2 and operands[0].immediate?
346 magicNumbers = yield operands[0].value
347 $asm.puts "#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
351 emitARM64TAC(opcodeRegs, operands, kind)
354 def emitARM64Branch(opcode, operands, kind, branchOpcode)
355 emitARM64Unflipped(opcode, operands[0..-2], kind)
356 $asm.puts "#{branchOpcode} #{operands[-1].asmLabel}"
359 def emitARM64Compare(operands, kind, compareCode)
360 emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands[0..-2], kind)
361 $asm.puts "csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
364 def emitARM64MoveImmediate(value, target)
366 isNegative = value < 0
367 [48, 32, 16, 0].each {
369 currentValue = (value >> shift) & 0xffff
370 next if currentValue == (isNegative ? 0xffff : 0) and shift != 0
373 $asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
375 $asm.puts "movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
379 $asm.puts "movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
386 $asm.comment codeOriginString
387 $asm.annotation annotation if $enableInstrAnnotations
391 emitARM64Add("add", operands, :int)
393 emitARM64Add("adds", operands, :int)
395 emitARM64Add("add", operands, :ptr)
397 emitARM64Add("adds", operands, :ptr)
399 emitARM64Add("add", operands, :ptr)
401 emitARM64TAC("and", operands, :int)
403 emitARM64TAC("and", operands, :ptr)
405 emitARM64TAC("and", operands, :ptr)
407 emitARM64TAC("orr", operands, :int)
409 emitARM64TAC("orr", operands, :ptr)
411 emitARM64TAC("orr", operands, :ptr)
413 emitARM64TAC("eor", operands, :int)
415 emitARM64TAC("eor", operands, :ptr)
417 emitARM64TAC("eor", operands, :ptr)
419 emitARM64Shift("lslv", "ubfm", operands, :int) {
421 [32 - value, 31 - value]
424 emitARM64Shift("lslv", "ubfm", operands, :ptr) {
426 [64 - value, 63 - value]
429 emitARM64Shift("lslv", "ubfm", operands, :ptr) {
431 [64 - value, 63 - value]
434 emitARM64Shift("asrv", "sbfm", operands, :int) {
439 emitARM64Shift("asrv", "sbfm", operands, :ptr) {
444 emitARM64Shift("asrv", "sbfm", operands, :ptr) {
449 emitARM64Shift("lsrv", "ubfm", operands, :int) {
454 emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
459 emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
464 $asm.puts "madd #{arm64TACOperands(operands, :int)}, wzr"
466 $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
468 $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
470 emitARM64TAC("sub", operands, :int)
472 emitARM64TAC("sub", operands, :ptr)
474 emitARM64TAC("sub", operands, :ptr)
476 emitARM64TAC("subs", operands, :int)
478 $asm.puts "sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
480 $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
482 $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
484 emitARM64Access("ldr", "ldur", operands[1], operands[0], :int)
486 emitARM64Access("ldrsw", "ldursw", operands[1], operands[0], :ptr)
488 emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
490 emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
492 emitARM64Unflipped("str", operands, :int)
494 emitARM64Unflipped("str", operands, :ptr)
496 emitARM64Unflipped("str", operands, :ptr)
498 emitARM64Access("ldrb", "ldurb", operands[1], operands[0], :int)
500 emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :int)
502 emitARM64Unflipped("strb", operands, :int)
504 emitARM64Access("ldrh", "ldurh", operands[1], operands[0], :int)
506 emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :int)
508 emitARM64Unflipped("strh", operands, :int)
510 emitARM64Access("ldr", "ldur", operands[1], operands[0], :double)
512 emitARM64Unflipped("str", operands, :double)
514 emitARM64TAC("fadd", operands, :double)
516 emitARM64TAC("fdiv", operands, :double)
518 emitARM64TAC("fsub", operands, :double)
520 emitARM64TAC("fmul", operands, :double)
522 emitARM64("fsqrt", operands, :double)
524 emitARM64("scvtf", operands, [:int, :double])
526 emitARM64Branch("fcmp", operands, :double, "b.eq")
528 emitARM64Unflipped("fcmp", operands[0..1], :double)
529 isUnordered = LocalLabel.unique("bdneq")
530 $asm.puts "b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
531 $asm.puts "b.ne #{operands[2].asmLabel}"
532 isUnordered.lower("ARM64")
534 emitARM64Branch("fcmp", operands, :double, "b.gt")
536 emitARM64Branch("fcmp", operands, :double, "b.ge")
538 emitARM64Branch("fcmp", operands, :double, "b.mi")
540 emitARM64Branch("fcmp", operands, :double, "b.ls")
542 emitARM64Unflipped("fcmp", operands[0..1], :double)
543 $asm.puts "b.vs #{operands[2].asmLabel}"
544 $asm.puts "b.eq #{operands[2].asmLabel}"
546 emitARM64Branch("fcmp", operands, :double, "b.ne")
548 emitARM64Branch("fcmp", operands, :double, "b.hi")
550 emitARM64Branch("fcmp", operands, :double, "b.pl")
552 emitARM64Branch("fcmp", operands, :double, "b.lt")
554 emitARM64Branch("fcmp", operands, :double, "b.le")
556 # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
557 # currently does not use it.
558 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
560 emitARM64("fcvtzs", operands, [:double, :int])
562 # FIXME: Remove this instruction, or use it and implement it. Currently it's not
564 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
566 # FIXME: Remove it or support it.
567 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
569 operands.each_slice(2) {
571 # Note that the operands are in the reverse order of the case for push.
572 # This is due to the fact that order matters for pushing and popping, and
573 # on platforms that only push/pop one slot at a time they pop their
574 # arguments in the reverse order that they were pushed. In order to remain
575 # compatible with those platforms we assume here that that's what has been done.
577 # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
578 # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
579 # instructions we need to flip flop the argument positions that were passed to us.
580 $asm.puts "ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
583 operands.each_slice(2) {
585 $asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
588 $asm.puts "ldp fp, lr, [sp], #16"
590 $asm.puts "stp fp, lr, [sp, #-16]!"
591 when "popCalleeSaves"
592 $asm.puts "ldp x28, x27, [sp], #16"
593 $asm.puts "ldp x26, x25, [sp], #16"
594 $asm.puts "ldp x24, x23, [sp], #16"
595 $asm.puts "ldp x22, x21, [sp], #16"
596 $asm.puts "ldp x20, x19, [sp], #16"
597 when "pushCalleeSaves"
598 $asm.puts "stp x20, x19, [sp, #-16]!"
599 $asm.puts "stp x22, x21, [sp, #-16]!"
600 $asm.puts "stp x24, x23, [sp, #-16]!"
601 $asm.puts "stp x26, x25, [sp, #-16]!"
602 $asm.puts "stp x28, x27, [sp, #-16]!"
604 if operands[0].immediate?
605 emitARM64MoveImmediate(operands[0].value, operands[1])
607 emitARM64("mov", operands, :ptr)
610 emitARM64("sxtw", operands, :ptr)
612 emitARM64("sxtw", operands, :ptr)
614 emitARM64("uxtw", operands, :ptr)
616 emitARM64("uxtw", operands, :ptr)
620 if operands[0].immediate? and operands[0].value == 0
621 $asm.puts "cbz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
622 elsif operands[1].immediate? and operands[1].value == 0
623 $asm.puts "cbz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
625 emitARM64Branch("subs wzr, ", operands, :int, "b.eq")
628 if operands[0].immediate? and operands[0].value == 0
629 $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
630 elsif operands[1].immediate? and operands[1].value == 0
631 $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
633 emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
636 if operands[0].immediate? and operands[0].value == 0
637 $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
638 elsif operands[1].immediate? and operands[1].value == 0
639 $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
641 emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
643 when "bineq", "bbneq"
644 if operands[0].immediate? and operands[0].value == 0
645 $asm.puts "cbnz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
646 elsif operands[1].immediate? and operands[1].value == 0
647 $asm.puts "cbnz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
649 emitARM64Branch("subs wzr, ", operands, :int, "b.ne")
652 if operands[0].immediate? and operands[0].value == 0
653 $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
654 elsif operands[1].immediate? and operands[1].value == 0
655 $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
657 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
660 if operands[0].immediate? and operands[0].value == 0
661 $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
662 elsif operands[1].immediate? and operands[1].value == 0
663 $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
665 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
668 emitARM64Branch("subs wzr, ", operands, :int, "b.hi")
670 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
672 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
673 when "biaeq", "bbaeq"
674 emitARM64Branch("subs wzr, ", operands, :int, "b.hs")
676 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
678 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
680 emitARM64Branch("subs wzr, ", operands, :int, "b.lo")
682 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
684 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
685 when "bibeq", "bbbeq"
686 emitARM64Branch("subs wzr, ", operands, :int, "b.ls")
688 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
690 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
692 emitARM64Branch("subs wzr, ", operands, :int, "b.gt")
694 emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
696 emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
697 when "bigteq", "bbgteq"
698 emitARM64Branch("subs wzr, ", operands, :int, "b.ge")
700 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
702 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
704 emitARM64Branch("subs wzr, ", operands, :int, "b.lt")
706 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
708 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
709 when "bilteq", "bblteq"
710 emitARM64Branch("subs wzr, ", operands, :int, "b.le")
712 emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
714 emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
716 if operands[0].label?
717 $asm.puts "b #{operands[0].asmLabel}"
719 emitARM64Unflipped("br", operands, :ptr)
722 if operands[0].label?
723 $asm.puts "bl #{operands[0].asmLabel}"
725 emitARM64Unflipped("blr", operands, :ptr)
732 emitARM64Compare(operands, :int, "ne")
734 emitARM64Compare(operands, :ptr, "ne")
736 emitARM64Compare(operands, :ptr, "ne")
737 when "cineq", "cbneq"
738 emitARM64Compare(operands, :int, "eq")
740 emitARM64Compare(operands, :ptr, "eq")
742 emitARM64Compare(operands, :ptr, "eq")
744 emitARM64Compare(operands, :int, "ls")
746 emitARM64Compare(operands, :ptr, "ls")
748 emitARM64Compare(operands, :ptr, "ls")
749 when "ciaeq", "cbaeq"
750 emitARM64Compare(operands, :int, "lo")
752 emitARM64Compare(operands, :ptr, "lo")
754 emitARM64Compare(operands, :ptr, "lo")
756 emitARM64Compare(operands, :int, "hs")
758 emitARM64Compare(operands, :ptr, "hs")
760 emitARM64Compare(operands, :ptr, "hs")
761 when "cibeq", "cbbeq"
762 emitARM64Compare(operands, :int, "hi")
764 emitARM64Compare(operands, :ptr, "hi")
766 emitARM64Compare(operands, :ptr, "hi")
768 emitARM64Compare(operands, :int, "ge")
770 emitARM64Compare(operands, :ptr, "ge")
772 emitARM64Compare(operands, :ptr, "ge")
773 when "cilteq", "cblteq"
774 emitARM64Compare(operands, :int, "gt")
776 emitARM64Compare(operands, :ptr, "gt")
778 emitARM64Compare(operands, :ptr, "gt")
780 emitARM64Compare(operands, :int, "le")
782 emitARM64Compare(operands, :ptr, "le")
784 emitARM64Compare(operands, :ptr, "le")
785 when "cigteq", "cbgteq"
786 emitARM64Compare(operands, :int, "lt")
788 emitARM64Compare(operands, :ptr, "lt")
790 emitARM64Compare(operands, :ptr, "lt")
792 $asm.puts "ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
794 $asm.puts "str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
796 emitARM64("fmov", operands, [:ptr, :double])
798 emitARM64("fmov", operands, [:ptr, :double])
800 emitARM64("fmov", operands, [:double, :ptr])
802 emitARM64("fmov", operands, [:double, :ptr])
804 $asm.puts "b.vs #{operands[0].asmLabel}"
806 $asm.puts "b.mi #{operands[0].asmLabel}"
808 $asm.puts "b.eq #{operands[0].asmLabel}"
810 $asm.puts "b.ne #{operands[0].asmLabel}"
812 operands[0].arm64EmitLea(operands[1], :int)
814 operands[0].arm64EmitLea(operands[1], :ptr)
816 operands[0].arm64EmitLea(operands[1], :ptr)
818 $asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"