2 * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #if ENABLE(ASSEMBLER) && CPU(X86_64)
30 #include "MacroAssemblerX86Common.h"
32 #define REPATCH_OFFSET_CALL_R11 3
34 inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
38 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
40 static const unsigned numGPRs = 16;
41 static const unsigned numFPRs = 16;
43 static const Scale ScalePtr = TimesEight;
45 using MacroAssemblerX86Common::add32;
46 using MacroAssemblerX86Common::and32;
47 using MacroAssemblerX86Common::branch32;
48 using MacroAssemblerX86Common::branchAdd32;
49 using MacroAssemblerX86Common::or32;
50 using MacroAssemblerX86Common::sub32;
51 using MacroAssemblerX86Common::load8;
52 using MacroAssemblerX86Common::load32;
53 using MacroAssemblerX86Common::store32;
54 using MacroAssemblerX86Common::store8;
55 using MacroAssemblerX86Common::call;
56 using MacroAssemblerX86Common::jump;
57 using MacroAssemblerX86Common::addDouble;
58 using MacroAssemblerX86Common::loadDouble;
59 using MacroAssemblerX86Common::convertInt32ToDouble;
61 void add32(TrustedImm32 imm, AbsoluteAddress address)
63 move(TrustedImmPtr(address.m_ptr), scratchRegister());
64 add32(imm, Address(scratchRegister()));
67 void and32(TrustedImm32 imm, AbsoluteAddress address)
69 move(TrustedImmPtr(address.m_ptr), scratchRegister());
70 and32(imm, Address(scratchRegister()));
73 void add32(AbsoluteAddress address, RegisterID dest)
75 move(TrustedImmPtr(address.m_ptr), scratchRegister());
76 add32(Address(scratchRegister()), dest);
79 void or32(TrustedImm32 imm, AbsoluteAddress address)
81 move(TrustedImmPtr(address.m_ptr), scratchRegister());
82 or32(imm, Address(scratchRegister()));
85 void or32(RegisterID reg, AbsoluteAddress address)
87 move(TrustedImmPtr(address.m_ptr), scratchRegister());
88 or32(reg, Address(scratchRegister()));
91 void sub32(TrustedImm32 imm, AbsoluteAddress address)
93 move(TrustedImmPtr(address.m_ptr), scratchRegister());
94 sub32(imm, Address(scratchRegister()));
97 void load8(const void* address, RegisterID dest)
99 move(TrustedImmPtr(address), dest);
103 void load32(const void* address, RegisterID dest)
105 if (dest == X86Registers::eax)
106 m_assembler.movl_mEAX(address);
108 move(TrustedImmPtr(address), dest);
113 void addDouble(AbsoluteAddress address, FPRegisterID dest)
115 move(TrustedImmPtr(address.m_ptr), scratchRegister());
116 m_assembler.addsd_mr(0, scratchRegister(), dest);
119 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
121 move(imm, scratchRegister());
122 m_assembler.cvtsi2sd_rr(scratchRegister(), dest);
125 void store32(TrustedImm32 imm, void* address)
127 move(TrustedImmPtr(address), scratchRegister());
128 store32(imm, scratchRegister());
131 void store32(RegisterID source, void* address)
133 if (source == X86Registers::eax)
134 m_assembler.movl_EAXm(address);
136 move(TrustedImmPtr(address), scratchRegister());
137 store32(source, scratchRegister());
141 void store8(TrustedImm32 imm, void* address)
143 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
144 move(TrustedImmPtr(address), scratchRegister());
145 store8(imm8, Address(scratchRegister()));
148 void store8(RegisterID reg, void* address)
150 move(TrustedImmPtr(address), scratchRegister());
151 store8(reg, Address(scratchRegister()));
155 Call callWithSlowPathReturnType()
157 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
158 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
159 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
160 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
161 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
162 // It is assumed that the parameters are already shifted to the right, when entering this method.
163 // Note: this implementation supports up to 3 parameters.
165 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
166 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
167 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
168 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
170 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
171 // In addition, we need to allocate 16 bytes for the return value.
172 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
173 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
175 // The first parameter register should contain a pointer to the stack allocated space for the return value.
176 move(X86Registers::esp, X86Registers::ecx);
177 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
179 DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister());
180 Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
182 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
184 // Copy the return value into rax and rdx.
185 load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
186 load64(Address(X86Registers::eax), X86Registers::eax);
188 ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
196 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
197 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
198 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
199 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
201 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
202 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
205 load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister());
206 store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t))));
209 load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister());
210 store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t))));
212 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
213 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
214 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
215 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
217 DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister());
218 Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
220 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
222 ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
226 // Address is a memory location containing the address to jump to
227 void jump(AbsoluteAddress address)
229 move(TrustedImmPtr(address.m_ptr), scratchRegister());
230 jump(Address(scratchRegister()));
233 Call tailRecursiveCall()
235 DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister());
236 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
237 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
238 return Call::fromTailJump(newJump);
241 Call makeTailRecursiveCall(Jump oldJump)
244 DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister());
245 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
246 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
247 return Call::fromTailJump(newJump);
250 Call threadSafePatchableNearCall()
252 const size_t nearCallOpcodeSize = 1;
253 const size_t nearCallRelativeLocationSize = sizeof(int32_t);
254 // We want to make sure the 32-bit near call immediate is 32-bit aligned.
255 size_t codeSize = m_assembler.codeSize();
256 size_t alignedSize = WTF::roundUpToMultipleOf<nearCallRelativeLocationSize>(codeSize + nearCallOpcodeSize);
257 emitNops(alignedSize - (codeSize + nearCallOpcodeSize));
258 DataLabelPtr label = DataLabelPtr(this);
259 Call result = nearCall();
260 ASSERT_UNUSED(label, differenceBetween(label, result) == (nearCallOpcodeSize + nearCallRelativeLocationSize));
264 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
266 move(TrustedImmPtr(dest.m_ptr), scratchRegister());
267 add32(src, Address(scratchRegister()));
268 return Jump(m_assembler.jCC(x86Condition(cond)));
271 void add64(RegisterID src, RegisterID dest)
273 m_assembler.addq_rr(src, dest);
276 void add64(Address src, RegisterID dest)
278 m_assembler.addq_mr(src.offset, src.base, dest);
281 void add64(BaseIndex src, RegisterID dest)
283 m_assembler.addq_mr(src.offset, src.base, src.index, src.scale, dest);
286 void add64(RegisterID src, Address dest)
288 m_assembler.addq_rm(src, dest.offset, dest.base);
291 void add64(RegisterID src, BaseIndex dest)
293 m_assembler.addq_rm(src, dest.offset, dest.base, dest.index, dest.scale);
296 void add64(AbsoluteAddress src, RegisterID dest)
298 move(TrustedImmPtr(src.m_ptr), scratchRegister());
299 add64(Address(scratchRegister()), dest);
302 void add64(TrustedImm32 imm, RegisterID srcDest)
304 if (imm.m_value == 1)
305 m_assembler.incq_r(srcDest);
307 m_assembler.addq_ir(imm.m_value, srcDest);
310 void add64(TrustedImm64 imm, RegisterID dest)
312 if (imm.m_value == 1)
313 m_assembler.incq_r(dest);
315 move(imm, scratchRegister());
316 add64(scratchRegister(), dest);
320 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
322 m_assembler.leaq_mr(imm.m_value, src, dest);
325 void add64(TrustedImm32 imm, Address address)
327 if (imm.m_value == 1)
328 m_assembler.incq_m(address.offset, address.base);
330 m_assembler.addq_im(imm.m_value, address.offset, address.base);
333 void add64(TrustedImm32 imm, BaseIndex address)
335 if (imm.m_value == 1)
336 m_assembler.incq_m(address.offset, address.base, address.index, address.scale);
338 m_assembler.addq_im(imm.m_value, address.offset, address.base, address.index, address.scale);
341 void add64(TrustedImm32 imm, AbsoluteAddress address)
343 move(TrustedImmPtr(address.m_ptr), scratchRegister());
344 add64(imm, Address(scratchRegister()));
347 void add64(RegisterID a, RegisterID b, RegisterID dest)
349 x86Lea64(BaseIndex(a, b, TimesOne), dest);
352 void x86Lea64(BaseIndex index, RegisterID dest)
354 if (!index.scale && !index.offset) {
355 if (index.base == dest) {
356 add64(index.index, dest);
359 if (index.index == dest) {
360 add64(index.base, dest);
364 m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest);
367 void getEffectiveAddress(BaseIndex address, RegisterID dest)
369 return x86Lea64(address, dest);
372 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
374 m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
377 void and64(RegisterID src, RegisterID dest)
379 m_assembler.andq_rr(src, dest);
382 void and64(RegisterID src, Address dest)
384 m_assembler.andq_rm(src, dest.offset, dest.base);
387 void and64(RegisterID src, BaseIndex dest)
389 m_assembler.andq_rm(src, dest.offset, dest.base, dest.index, dest.scale);
392 void and64(Address src, RegisterID dest)
394 m_assembler.andq_mr(src.offset, src.base, dest);
397 void and64(BaseIndex src, RegisterID dest)
399 m_assembler.andq_mr(src.offset, src.base, src.index, src.scale, dest);
402 void and64(TrustedImm32 imm, RegisterID srcDest)
404 m_assembler.andq_ir(imm.m_value, srcDest);
407 void and64(TrustedImm32 imm, Address dest)
409 m_assembler.andq_im(imm.m_value, dest.offset, dest.base);
412 void and64(TrustedImm32 imm, BaseIndex dest)
414 m_assembler.andq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
417 void and64(TrustedImmPtr imm, RegisterID srcDest)
419 intptr_t intValue = imm.asIntptr();
420 if (intValue <= std::numeric_limits<int32_t>::max()
421 && intValue >= std::numeric_limits<int32_t>::min()) {
422 and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest);
425 move(imm, scratchRegister());
426 and64(scratchRegister(), srcDest);
429 void and64(RegisterID op1, RegisterID op2, RegisterID dest)
431 if (op1 == op2 && op1 != dest && op2 != dest)
433 else if (op1 == dest)
441 void countLeadingZeros64(RegisterID src, RegisterID dst)
443 if (supportsLZCNT()) {
444 m_assembler.lzcntq_rr(src, dst);
447 m_assembler.bsrq_rr(src, dst);
451 void countLeadingZeros64(Address src, RegisterID dst)
453 if (supportsLZCNT()) {
454 m_assembler.lzcntq_mr(src.offset, src.base, dst);
457 m_assembler.bsrq_mr(src.offset, src.base, dst);
461 void countTrailingZeros64(RegisterID src, RegisterID dst)
463 if (supportsBMI1()) {
464 m_assembler.tzcntq_rr(src, dst);
467 m_assembler.bsfq_rr(src, dst);
468 ctzAfterBsf<64>(dst);
471 void lshift64(TrustedImm32 imm, RegisterID dest)
473 m_assembler.shlq_i8r(imm.m_value, dest);
476 void lshift64(RegisterID src, RegisterID dest)
478 if (src == X86Registers::ecx)
479 m_assembler.shlq_CLr(dest);
483 // Can only shift by ecx, so we do some swapping if we see anything else.
484 swap(src, X86Registers::ecx);
485 m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest);
486 swap(src, X86Registers::ecx);
490 void rshift64(TrustedImm32 imm, RegisterID dest)
492 m_assembler.sarq_i8r(imm.m_value, dest);
495 void rshift64(RegisterID src, RegisterID dest)
497 if (src == X86Registers::ecx)
498 m_assembler.sarq_CLr(dest);
502 // Can only shift by ecx, so we do some swapping if we see anything else.
503 swap(src, X86Registers::ecx);
504 m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest);
505 swap(src, X86Registers::ecx);
509 void urshift64(TrustedImm32 imm, RegisterID dest)
511 m_assembler.shrq_i8r(imm.m_value, dest);
514 void urshift64(RegisterID src, RegisterID dest)
516 if (src == X86Registers::ecx)
517 m_assembler.shrq_CLr(dest);
521 // Can only shift by ecx, so we do some swapping if we see anything else.
522 swap(src, X86Registers::ecx);
523 m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest);
524 swap(src, X86Registers::ecx);
528 void rotateRight64(TrustedImm32 imm, RegisterID dest)
530 m_assembler.rorq_i8r(imm.m_value, dest);
533 void rotateRight64(RegisterID src, RegisterID dest)
535 if (src == X86Registers::ecx)
536 m_assembler.rorq_CLr(dest);
540 // Can only rotate by ecx, so we do some swapping if we see anything else.
541 swap(src, X86Registers::ecx);
542 m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest);
543 swap(src, X86Registers::ecx);
547 void rotateLeft64(TrustedImm32 imm, RegisterID dest)
549 m_assembler.rolq_i8r(imm.m_value, dest);
552 void rotateLeft64(RegisterID src, RegisterID dest)
554 if (src == X86Registers::ecx)
555 m_assembler.rolq_CLr(dest);
559 // Can only rotate by ecx, so we do some swapping if we see anything else.
560 swap(src, X86Registers::ecx);
561 m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest);
562 swap(src, X86Registers::ecx);
566 void mul64(RegisterID src, RegisterID dest)
568 m_assembler.imulq_rr(src, dest);
571 void mul64(RegisterID src1, RegisterID src2, RegisterID dest)
574 m_assembler.imulq_rr(src1, dest);
578 m_assembler.imulq_rr(src2, dest);
581 void x86ConvertToQuadWord64()
586 void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx)
588 ASSERT_UNUSED(rax, rax == X86Registers::eax);
589 ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
590 x86ConvertToQuadWord64();
593 void x86Div64(RegisterID denominator)
595 m_assembler.idivq_r(denominator);
598 void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator)
600 ASSERT_UNUSED(rax, rax == X86Registers::eax);
601 ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
602 x86Div64(denominator);
605 void x86UDiv64(RegisterID denominator)
607 m_assembler.divq_r(denominator);
610 void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator)
612 ASSERT_UNUSED(rax, rax == X86Registers::eax);
613 ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
614 x86UDiv64(denominator);
617 void neg64(RegisterID dest)
619 m_assembler.negq_r(dest);
622 void neg64(RegisterID src, RegisterID dest)
625 m_assembler.negq_r(dest);
628 void neg64(Address dest)
630 m_assembler.negq_m(dest.offset, dest.base);
633 void neg64(BaseIndex dest)
635 m_assembler.negq_m(dest.offset, dest.base, dest.index, dest.scale);
638 void or64(RegisterID src, RegisterID dest)
640 m_assembler.orq_rr(src, dest);
643 void or64(RegisterID src, Address dest)
645 m_assembler.orq_rm(src, dest.offset, dest.base);
648 void or64(RegisterID src, BaseIndex dest)
650 m_assembler.orq_rm(src, dest.offset, dest.base, dest.index, dest.scale);
653 void or64(Address src, RegisterID dest)
655 m_assembler.orq_mr(src.offset, src.base, dest);
658 void or64(BaseIndex src, RegisterID dest)
660 m_assembler.orq_mr(src.offset, src.base, src.index, src.scale, dest);
663 void or64(TrustedImm32 imm, Address dest)
665 m_assembler.orq_im(imm.m_value, dest.offset, dest.base);
668 void or64(TrustedImm32 imm, BaseIndex dest)
670 m_assembler.orq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
673 void or64(TrustedImm64 imm, RegisterID srcDest)
675 if (imm.m_value <= std::numeric_limits<int32_t>::max()
676 && imm.m_value >= std::numeric_limits<int32_t>::min()) {
677 or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest);
680 move(imm, scratchRegister());
681 or64(scratchRegister(), srcDest);
684 void or64(TrustedImm32 imm, RegisterID dest)
686 m_assembler.orq_ir(imm.m_value, dest);
689 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
693 else if (op1 == dest)
701 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
707 void sub64(RegisterID src, RegisterID dest)
709 m_assembler.subq_rr(src, dest);
712 void sub64(TrustedImm32 imm, RegisterID dest)
714 if (imm.m_value == 1)
715 m_assembler.decq_r(dest);
717 m_assembler.subq_ir(imm.m_value, dest);
720 void sub64(TrustedImm64 imm, RegisterID dest)
722 if (imm.m_value == 1)
723 m_assembler.decq_r(dest);
725 move(imm, scratchRegister());
726 sub64(scratchRegister(), dest);
730 void sub64(TrustedImm32 imm, Address address)
732 m_assembler.subq_im(imm.m_value, address.offset, address.base);
735 void sub64(TrustedImm32 imm, BaseIndex address)
737 m_assembler.subq_im(imm.m_value, address.offset, address.base, address.index, address.scale);
740 void sub64(Address src, RegisterID dest)
742 m_assembler.subq_mr(src.offset, src.base, dest);
745 void sub64(BaseIndex src, RegisterID dest)
747 m_assembler.subq_mr(src.offset, src.base, src.index, src.scale, dest);
750 void sub64(RegisterID src, Address dest)
752 m_assembler.subq_rm(src, dest.offset, dest.base);
755 void sub64(RegisterID src, BaseIndex dest)
757 m_assembler.subq_rm(src, dest.offset, dest.base, dest.index, dest.scale);
760 void xor64(RegisterID src, RegisterID dest)
762 m_assembler.xorq_rr(src, dest);
765 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
768 move(TrustedImm32(0), dest);
769 else if (op1 == dest)
777 void xor64(RegisterID src, Address dest)
779 m_assembler.xorq_rm(src, dest.offset, dest.base);
782 void xor64(RegisterID src, BaseIndex dest)
784 m_assembler.xorq_rm(src, dest.offset, dest.base, dest.index, dest.scale);
787 void xor64(Address src, RegisterID dest)
789 m_assembler.xorq_mr(src.offset, src.base, dest);
792 void xor64(BaseIndex src, RegisterID dest)
794 m_assembler.xorq_mr(src.offset, src.base, src.index, src.scale, dest);
797 void xor64(TrustedImm32 imm, Address dest)
799 m_assembler.xorq_im(imm.m_value, dest.offset, dest.base);
802 void xor64(TrustedImm32 imm, BaseIndex dest)
804 m_assembler.xorq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale);
807 void xor64(TrustedImm32 imm, RegisterID srcDest)
809 m_assembler.xorq_ir(imm.m_value, srcDest);
812 void xor64(TrustedImm64 imm, RegisterID srcDest)
814 move(imm, scratchRegister());
815 xor64(scratchRegister(), srcDest);
818 void not64(RegisterID srcDest)
820 m_assembler.notq_r(srcDest);
823 void not64(Address dest)
825 m_assembler.notq_m(dest.offset, dest.base);
828 void not64(BaseIndex dest)
830 m_assembler.notq_m(dest.offset, dest.base, dest.index, dest.scale);
833 void load64(ImplicitAddress address, RegisterID dest)
835 m_assembler.movq_mr(address.offset, address.base, dest);
838 void load64(BaseIndex address, RegisterID dest)
840 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
843 void load64(const void* address, RegisterID dest)
845 if (dest == X86Registers::eax)
846 m_assembler.movq_mEAX(address);
848 move(TrustedImmPtr(address), dest);
853 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
856 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
857 return DataLabel32(this);
860 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
863 m_assembler.movq_mr_disp8(address.offset, address.base, dest);
864 return DataLabelCompact(this);
867 void store64(RegisterID src, ImplicitAddress address)
869 m_assembler.movq_rm(src, address.offset, address.base);
872 void store64(RegisterID src, BaseIndex address)
874 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
877 void store64(RegisterID src, void* address)
879 if (src == X86Registers::eax)
880 m_assembler.movq_EAXm(address);
882 move(TrustedImmPtr(address), scratchRegister());
883 store64(src, scratchRegister());
887 void store64(TrustedImm32 imm, ImplicitAddress address)
889 m_assembler.movq_i32m(imm.m_value, address.offset, address.base);
892 void store64(TrustedImm32 imm, BaseIndex address)
894 m_assembler.movq_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
897 void store64(TrustedImm64 imm, ImplicitAddress address)
899 if (CAN_SIGN_EXTEND_32_64(imm.m_value)) {
900 store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address);
904 move(imm, scratchRegister());
905 store64(scratchRegister(), address);
908 void store64(TrustedImm64 imm, BaseIndex address)
910 move(imm, scratchRegister());
911 m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale);
914 void storeZero64(ImplicitAddress address)
916 store64(TrustedImm32(0), address);
919 void storeZero64(BaseIndex address)
921 store64(TrustedImm32(0), address);
924 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
927 m_assembler.movq_rm_disp32(src, address.offset, address.base);
928 return DataLabel32(this);
931 void swap64(RegisterID src, RegisterID dest)
933 m_assembler.xchgq_rr(src, dest);
936 void swap64(RegisterID src, Address dest)
938 m_assembler.xchgq_rm(src, dest.offset, dest.base);
941 void move64ToDouble(RegisterID src, FPRegisterID dest)
943 m_assembler.movq_rr(src, dest);
946 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
948 m_assembler.movq_rr(src, dest);
951 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
953 if (!right.m_value) {
954 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
955 test64(*resultCondition, left, left, dest);
960 m_assembler.cmpq_ir(right.m_value, left);
961 set32(x86Condition(cond), dest);
964 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
966 m_assembler.cmpq_rr(right, left);
967 set32(x86Condition(cond), dest);
970 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
972 if (cond & DoubleConditionBitInvert)
973 m_assembler.ucomisd_rr(left, right);
975 m_assembler.ucomisd_rr(right, left);
977 if (cond == DoubleEqual) {
979 m_assembler.setnp_r(dest);
983 Jump isUnordered(m_assembler.jp());
984 m_assembler.sete_r(dest);
985 isUnordered.link(this);
989 if (cond == DoubleNotEqualOrUnordered) {
991 m_assembler.setp_r(dest);
995 m_assembler.setp_r(dest);
996 m_assembler.setne_r(dest);
1000 ASSERT(!(cond & DoubleConditionBitSpecial));
1001 m_assembler.setCC_r(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), dest);
1004 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1006 m_assembler.cmpq_rr(right, left);
1007 return Jump(m_assembler.jCC(x86Condition(cond)));
1010 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1012 if (!right.m_value) {
1013 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
1014 return branchTest64(*resultCondition, left, left);
1016 m_assembler.cmpq_ir(right.m_value, left);
1017 return Jump(m_assembler.jCC(x86Condition(cond)));
1020 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1022 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
1023 m_assembler.testq_rr(left, left);
1024 return Jump(m_assembler.jCC(x86Condition(cond)));
1026 move(right, scratchRegister());
1027 return branch64(cond, left, scratchRegister());
1030 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1032 m_assembler.cmpq_mr(right.offset, right.base, left);
1033 return Jump(m_assembler.jCC(x86Condition(cond)));
1036 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1038 move(TrustedImmPtr(left.m_ptr), scratchRegister());
1039 return branch64(cond, Address(scratchRegister()), right);
1042 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1044 m_assembler.cmpq_rm(right, left.offset, left.base);
1045 return Jump(m_assembler.jCC(x86Condition(cond)));
1048 Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right)
1050 m_assembler.cmpq_im(right.m_value, left.offset, left.base);
1051 return Jump(m_assembler.jCC(x86Condition(cond)));
1054 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1056 move(right, scratchRegister());
1057 return branch64(cond, left, scratchRegister());
1060 Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
1062 m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
1063 return Jump(m_assembler.jCC(x86Condition(cond)));
1066 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1068 load32(left.m_ptr, scratchRegister());
1069 return branch32(cond, scratchRegister(), right);
1072 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1074 return branch64(cond, left, right);
1077 Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
1079 move(right, scratchRegister());
1080 return branchPtr(cond, left, scratchRegister());
1083 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1085 m_assembler.testq_rr(reg, mask);
1086 return Jump(m_assembler.jCC(x86Condition(cond)));
1089 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1091 // if we are only interested in the low seven bits, this can be tested with a testb
1092 if (mask.m_value == -1)
1093 m_assembler.testq_rr(reg, reg);
1094 else if ((mask.m_value & ~0x7f) == 0)
1095 m_assembler.testb_i8r(mask.m_value, reg);
1097 m_assembler.testq_i32r(mask.m_value, reg);
1098 return Jump(m_assembler.jCC(x86Condition(cond)));
1101 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
1103 move(mask, scratchRegister());
1104 return branchTest64(cond, reg, scratchRegister());
1107 void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
1109 if (mask.m_value == -1)
1110 m_assembler.testq_rr(reg, reg);
1111 else if ((mask.m_value & ~0x7f) == 0)
1112 m_assembler.testb_i8r(mask.m_value, reg);
1114 m_assembler.testq_i32r(mask.m_value, reg);
1115 set32(x86Condition(cond), dest);
1118 void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
1120 m_assembler.testq_rr(reg, mask);
1121 set32(x86Condition(cond), dest);
1124 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1126 load64(address.m_ptr, scratchRegister());
1127 return branchTest64(cond, scratchRegister(), mask);
1130 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1132 if (mask.m_value == -1)
1133 m_assembler.cmpq_im(0, address.offset, address.base);
1135 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
1136 return Jump(m_assembler.jCC(x86Condition(cond)));
1139 Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
1141 m_assembler.testq_rm(reg, address.offset, address.base);
1142 return Jump(m_assembler.jCC(x86Condition(cond)));
1145 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1147 if (mask.m_value == -1)
1148 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
1150 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1151 return Jump(m_assembler.jCC(x86Condition(cond)));
1155 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1158 return Jump(m_assembler.jCC(x86Condition(cond)));
1161 Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1164 return branchAdd64(cond, src2, dest);
1166 return branchAdd64(cond, src1, dest);
1169 Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
1172 return branchAdd64(cond, op1, dest);
1173 if (op1.base == dest) {
1175 return branchAdd64(cond, op2, dest);
1178 return branchAdd64(cond, op1, dest);
1181 Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
1183 return branchAdd64(cond, src2, src1, dest);
1186 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1189 return Jump(m_assembler.jCC(x86Condition(cond)));
1192 Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest)
1195 return Jump(m_assembler.jCC(x86Condition(cond)));
1198 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
1201 if (cond != Overflow)
1202 m_assembler.testq_rr(dest, dest);
1203 return Jump(m_assembler.jCC(x86Condition(cond)));
1206 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1209 return branchMul64(cond, src2, dest);
1211 return branchMul64(cond, src1, dest);
1214 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1217 return Jump(m_assembler.jCC(x86Condition(cond)));
1220 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
1223 return Jump(m_assembler.jCC(x86Condition(cond)));
1226 Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1229 return branchSub64(cond, src2, dest);
1232 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
1235 return Jump(m_assembler.jCC(x86Condition(cond)));
1238 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
1240 m_assembler.cmpq_rr(right, left);
1241 cmov(x86Condition(cond), src, dest);
1244 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1246 m_assembler.cmpq_rr(right, left);
1248 if (thenCase != dest && elseCase != dest) {
1249 move(elseCase, dest);
1253 if (elseCase == dest)
1254 cmov(x86Condition(cond), thenCase, dest);
1256 cmov(x86Condition(invert(cond)), elseCase, dest);
1259 void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1261 if (!right.m_value) {
1262 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
1263 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
1268 m_assembler.cmpq_ir(right.m_value, left);
1270 if (thenCase != dest && elseCase != dest) {
1271 move(elseCase, dest);
1275 if (elseCase == dest)
1276 cmov(x86Condition(cond), thenCase, dest);
1278 cmov(x86Condition(invert(cond)), elseCase, dest);
1281 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
1283 m_assembler.testq_rr(testReg, mask);
1284 cmov(x86Condition(cond), src, dest);
1287 void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1289 ASSERT(isInvertible(cond));
1290 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
1292 m_assembler.testq_rr(right, left);
1294 if (thenCase != dest && elseCase != dest) {
1295 move(elseCase, dest);
1299 if (elseCase == dest)
1300 cmov(x86Condition(cond), thenCase, dest);
1302 cmov(x86Condition(invert(cond)), elseCase, dest);
1305 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
1307 // if we are only interested in the low seven bits, this can be tested with a testb
1308 if (mask.m_value == -1)
1309 m_assembler.testq_rr(testReg, testReg);
1310 else if ((mask.m_value & ~0x7f) == 0)
1311 m_assembler.testb_i8r(mask.m_value, testReg);
1313 m_assembler.testq_i32r(mask.m_value, testReg);
1314 cmov(x86Condition(cond), src, dest);
1317 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1319 ASSERT(isInvertible(cond));
1320 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
1322 if (mask.m_value == -1)
1323 m_assembler.testq_rr(testReg, testReg);
1324 else if (!(mask.m_value & ~0x7f))
1325 m_assembler.testb_i8r(mask.m_value, testReg);
1327 m_assembler.testq_i32r(mask.m_value, testReg);
1329 if (thenCase != dest && elseCase != dest) {
1330 move(elseCase, dest);
1334 if (elseCase == dest)
1335 cmov(x86Condition(cond), thenCase, dest);
1337 cmov(x86Condition(invert(cond)), elseCase, dest);
1340 template<typename LeftType, typename RightType>
1341 void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1343 static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
1345 if (thenCase != dest && elseCase != dest) {
1346 moveDouble(elseCase, dest);
1350 if (elseCase == dest) {
1351 Jump falseCase = branch64(invert(cond), left, right);
1352 moveDouble(thenCase, dest);
1353 falseCase.link(this);
1355 Jump trueCase = branch64(cond, left, right);
1356 moveDouble(elseCase, dest);
1357 trueCase.link(this);
1361 template<typename TestType, typename MaskType>
1362 void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1364 static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
1366 if (elseCase == dest && isInvertible(cond)) {
1367 Jump falseCase = branchTest64(invert(cond), test, mask);
1368 moveDouble(thenCase, dest);
1369 falseCase.link(this);
1370 } else if (thenCase == dest) {
1371 Jump trueCase = branchTest64(cond, test, mask);
1372 moveDouble(elseCase, dest);
1373 trueCase.link(this);
1376 Jump trueCase = branchTest64(cond, test, mask);
1377 moveDouble(elseCase, dest);
1378 Jump falseCase = jump();
1379 trueCase.link(this);
1380 moveDouble(thenCase, dest);
1381 falseCase.link(this);
1384 void abortWithReason(AbortReason reason)
1386 move(TrustedImm32(reason), X86Registers::r11);
1390 void abortWithReason(AbortReason reason, intptr_t misc)
1392 move(TrustedImm64(misc), X86Registers::r10);
1393 abortWithReason(reason);
1396 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1398 ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
1399 m_assembler.movq_mr(address.offset, address.base, dest);
1403 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
1406 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
1407 return DataLabelPtr(this);
1410 DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
1413 m_assembler.movq_i64r(initialValue.m_value, dest);
1414 return DataLabelPtr(this);
1417 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
1419 dataLabel = moveWithPatch(initialRightValue, scratchRegister());
1420 return branch64(cond, left, scratchRegister());
1423 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
1425 dataLabel = moveWithPatch(initialRightValue, scratchRegister());
1426 return branch64(cond, left, scratchRegister());
1429 Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1432 m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister());
1433 dataLabel = DataLabel32(this);
1434 return branch32(cond, left, scratchRegister());
1437 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1439 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister());
1440 store64(scratchRegister(), address);
1444 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
1446 return PatchableJump(branch64(cond, reg, imm));
1449 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
1451 return PatchableJump(branch64(cond, left, right));
1454 using MacroAssemblerX86Common::branch8;
1455 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1457 MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister());
1458 return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right);
1461 using MacroAssemblerX86Common::branchTest8;
1462 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1464 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1465 TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
1466 MacroAssemblerX86Common::move(addr, scratchRegister());
1467 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8);
1470 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1472 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1473 MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister());
1474 return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8);
1477 void xchg64(RegisterID reg, Address address)
1479 m_assembler.xchgq_rm(reg, address.offset, address.base);
1482 void xchg64(RegisterID reg, BaseIndex address)
1484 m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale);
1487 void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result)
1489 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); });
1492 void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result)
1494 atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); });
1497 void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, Address address)
1499 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); });
1502 void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
1504 atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); });
1507 Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address)
1509 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); });
1512 Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address)
1514 return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); });
1517 void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
1519 atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result);
1522 void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
1524 atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result);
1527 Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
1529 return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address);
1532 Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
1534 return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address);
1537 void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result)
1539 atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result);
1542 void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result)
1544 atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result);
1547 Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address)
1549 return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address);
1552 Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address)
1554 return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address);
1557 void atomicAdd64(TrustedImm32 imm, Address address)
1560 add64(imm, address);
1563 void atomicAdd64(TrustedImm32 imm, BaseIndex address)
1566 add64(imm, address);
1569 void atomicAdd64(RegisterID reg, Address address)
1572 add64(reg, address);
1575 void atomicAdd64(RegisterID reg, BaseIndex address)
1578 add64(reg, address);
1581 void atomicSub64(TrustedImm32 imm, Address address)
1584 sub64(imm, address);
1587 void atomicSub64(TrustedImm32 imm, BaseIndex address)
1590 sub64(imm, address);
1593 void atomicSub64(RegisterID reg, Address address)
1596 sub64(reg, address);
1599 void atomicSub64(RegisterID reg, BaseIndex address)
1602 sub64(reg, address);
1605 void atomicAnd64(TrustedImm32 imm, Address address)
1608 and64(imm, address);
1611 void atomicAnd64(TrustedImm32 imm, BaseIndex address)
1614 and64(imm, address);
1617 void atomicAnd64(RegisterID reg, Address address)
1620 and64(reg, address);
1623 void atomicAnd64(RegisterID reg, BaseIndex address)
1626 and64(reg, address);
1629 void atomicOr64(TrustedImm32 imm, Address address)
1635 void atomicOr64(TrustedImm32 imm, BaseIndex address)
1641 void atomicOr64(RegisterID reg, Address address)
1647 void atomicOr64(RegisterID reg, BaseIndex address)
1653 void atomicXor64(TrustedImm32 imm, Address address)
1656 xor64(imm, address);
1659 void atomicXor64(TrustedImm32 imm, BaseIndex address)
1662 xor64(imm, address);
1665 void atomicXor64(RegisterID reg, Address address)
1668 xor64(reg, address);
1671 void atomicXor64(RegisterID reg, BaseIndex address)
1674 xor64(reg, address);
1677 void atomicNeg64(Address address)
1683 void atomicNeg64(BaseIndex address)
1689 void atomicNot64(Address address)
1695 void atomicNot64(BaseIndex address)
1701 void atomicXchgAdd64(RegisterID reg, Address address)
1704 m_assembler.xaddq_rm(reg, address.offset, address.base);
1707 void atomicXchgAdd64(RegisterID reg, BaseIndex address)
1710 m_assembler.xaddq_rm(reg, address.offset, address.base, address.index, address.scale);
1713 void atomicXchg64(RegisterID reg, Address address)
1716 m_assembler.xchgq_rm(reg, address.offset, address.base);
1719 void atomicXchg64(RegisterID reg, BaseIndex address)
1722 m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale);
1725 #if ENABLE(FAST_TLS_JIT)
1726 void loadFromTLS64(uint32_t offset, RegisterID dst)
1729 m_assembler.movq_mr(offset, dst);
1732 void storeToTLS64(RegisterID src, uint32_t offset)
1735 m_assembler.movq_rm(src, offset);
1739 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1741 m_assembler.cvttsd2siq_rr(src, dest);
1744 void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
1746 m_assembler.cvttsd2siq_rr(src, dest);
1749 // int64Min should contain exactly 0x43E0000000000000 == static_cast<double>(int64_t::min()). scratch may
1750 // be the same FPR as src.
1751 void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
1753 ASSERT(scratch != int64Min);
1755 // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
1756 // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
1757 // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
1758 // uint64_t; then add back int64_t::min() in the destination gpr.
1760 Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min);
1761 m_assembler.cvttsd2siq_rr(src, dest);
1764 moveDouble(src, scratch);
1765 m_assembler.subsd_rr(int64Min, scratch);
1766 m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
1767 m_assembler.cvttsd2siq_rr(scratch, dest);
1768 m_assembler.orq_rr(scratchRegister(), dest);
1772 void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
1774 m_assembler.cvttss2siq_rr(src, dest);
1777 void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
1779 m_assembler.cvttss2siq_rr(src, dest);
1782 // int64Min should contain exactly 0x5f000000 == static_cast<float>(int64_t::min()). scratch may be the
1784 void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
1786 ASSERT(scratch != int64Min);
1788 // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
1789 // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
1790 // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
1791 // uint64_t; then add back int64_t::min() in the destination gpr.
1793 Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min);
1794 m_assembler.cvttss2siq_rr(src, dest);
1797 moveDouble(src, scratch);
1798 m_assembler.subss_rr(int64Min, scratch);
1799 m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
1800 m_assembler.cvttss2siq_rr(scratch, dest);
1801 m_assembler.orq_rr(scratchRegister(), dest);
1805 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1807 m_assembler.cvtsi2sdq_rr(src, dest);
1810 void convertInt64ToDouble(Address src, FPRegisterID dest)
1812 m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest);
1815 void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
1817 m_assembler.cvtsi2ssq_rr(src, dest);
1820 void convertInt64ToFloat(Address src, FPRegisterID dest)
1822 m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest);
1825 // One of scratch or scratch2 may be the same as src
1826 void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch)
1828 RegisterID scratch2 = scratchRegister();
1830 m_assembler.testq_rr(src, src);
1831 AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
1832 m_assembler.cvtsi2sdq_rr(src, dest);
1833 AssemblerLabel done = m_assembler.jmp();
1834 m_assembler.linkJump(signBitSet, m_assembler.label());
1836 m_assembler.movq_rr(src, scratch);
1837 m_assembler.movq_rr(src, scratch2);
1838 m_assembler.shrq_i8r(1, scratch);
1839 m_assembler.andq_ir(1, scratch2);
1840 m_assembler.orq_rr(scratch, scratch2);
1841 m_assembler.cvtsi2sdq_rr(scratch2, dest);
1842 m_assembler.addsd_rr(dest, dest);
1843 m_assembler.linkJump(done, m_assembler.label());
1846 // One of scratch or scratch2 may be the same as src
1847 void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch)
1849 RegisterID scratch2 = scratchRegister();
1850 m_assembler.testq_rr(src, src);
1851 AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
1852 m_assembler.cvtsi2ssq_rr(src, dest);
1853 AssemblerLabel done = m_assembler.jmp();
1854 m_assembler.linkJump(signBitSet, m_assembler.label());
1856 m_assembler.movq_rr(src, scratch);
1857 m_assembler.movq_rr(src, scratch2);
1858 m_assembler.shrq_i8r(1, scratch);
1859 m_assembler.andq_ir(1, scratch2);
1860 m_assembler.orq_rr(scratch, scratch2);
1861 m_assembler.cvtsi2ssq_rr(scratch2, dest);
1862 m_assembler.addss_rr(dest, dest);
1863 m_assembler.linkJump(done, m_assembler.label());
1866 static bool supportsFloatingPoint() { return true; }
1867 static bool supportsFloatingPointTruncate() { return true; }
1868 static bool supportsFloatingPointSqrt() { return true; }
1869 static bool supportsFloatingPointAbs() { return true; }
1871 static FunctionPtr readCallTarget(CodeLocationCall call)
1873 return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()));
1876 bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; }
1877 RegisterID scratchRegisterForBlinding() { return scratchRegister(); }
1879 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
1880 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
1882 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1884 const int rexBytes = 1;
1885 const int opcodeBytes = 1;
1886 const int immediateBytes = 8;
1887 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
1888 ASSERT(totalBytes >= maxJumpReplacementSize());
1889 return label.labelAtOffset(-totalBytes);
1892 static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
1894 const int rexBytes = 1;
1895 const int opcodeBytes = 1;
1896 const int immediateBytes = 4;
1897 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
1898 ASSERT(totalBytes >= maxJumpReplacementSize());
1899 return label.labelAtOffset(-totalBytes);
1902 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
1904 return startOfBranchPtrWithPatchOnRegister(label);
1907 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
1909 return startOfBranch32WithPatchOnRegister(label);
1912 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
1914 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister);
1917 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
1919 X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister);
1922 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
1924 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister);
1927 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1929 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
1932 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1934 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
1938 // If lzcnt is not available, use this after BSR
1939 // to count the leading zeros.
1940 void clz64AfterBsr(RegisterID dst)
1942 Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
1943 move(TrustedImm32(64), dst);
1945 Jump skipNonZeroCase = jump();
1946 srcIsNonZero.link(this);
1947 xor64(TrustedImm32(0x3f), dst);
1948 skipNonZeroCase.link(this);
1951 friend class LinkBuffer;
1953 static void linkCall(void* code, Call call, FunctionPtr function)
1955 if (!call.isFlagSet(Call::Near))
1956 X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value());
1957 else if (call.isFlagSet(Call::Tail))
1958 X86Assembler::linkJump(code, call.m_label, function.value());
1960 X86Assembler::linkCall(code, call.m_label, function.value());
1966 #endif // ENABLE(ASSEMBLER)