2 * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
31 #include "AssemblerBuffer.h"
32 #include "JITCompilationEffort.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
39 #include <xmmintrin.h>
44 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
46 namespace X86Registers {
92 #define FOR_EACH_CPU_REGISTER(V) \
93 FOR_EACH_CPU_GPREGISTER(V) \
94 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
95 FOR_EACH_CPU_FPREGISTER(V)
97 #define FOR_EACH_CPU_GPREGISTER(V) \
106 FOR_EACH_X86_64_CPU_GPREGISTER(V)
108 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
112 #define FOR_EACH_CPU_FPREGISTER(V) \
123 #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
125 #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
134 #endif // CPU(X86_64)
135 #endif // USE(MASM_PROBE)
140 typedef X86Registers::RegisterID RegisterID;
142 static RegisterID firstRegister() { return X86Registers::eax; }
143 static RegisterID lastRegister()
146 return X86Registers::r15;
148 return X86Registers::edi;
152 typedef X86Registers::XMMRegisterID XMMRegisterID;
153 typedef XMMRegisterID FPRegisterID;
155 static FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
156 static FPRegisterID lastFPRegister()
159 return X86Registers::xmm15;
161 return X86Registers::xmm7;
183 ConditionC = ConditionB,
184 ConditionNC = ConditionAE,
193 OP_2BYTE_ESCAPE = 0x0F,
198 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
209 OP_MOVSXD_GvEv = 0x63,
211 PRE_OPERAND_SIZE = 0x66,
214 OP_IMUL_GvEvIz = 0x69,
215 OP_GROUP1_EbIb = 0x80,
216 OP_GROUP1_EvIz = 0x81,
217 OP_GROUP1_EvIb = 0x83,
225 OP_GROUP1A_Ev = 0x8F,
232 OP_GROUP2_EvIb = 0xC1,
234 OP_GROUP11_EvIb = 0xC6,
235 OP_GROUP11_EvIz = 0xC7,
237 OP_GROUP2_Ev1 = 0xD1,
238 OP_GROUP2_EvCL = 0xD3,
240 OP_CALL_rel32 = 0xE8,
245 OP_GROUP3_EbIb = 0xF6,
247 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
252 OP2_MOVSD_VsdWsd = 0x10,
253 OP2_MOVSD_WsdVsd = 0x11,
254 OP2_MOVSS_VsdWsd = 0x10,
255 OP2_MOVSS_WsdVsd = 0x11,
256 OP2_CVTSI2SD_VsdEd = 0x2A,
257 OP2_CVTTSD2SI_GdWsd = 0x2C,
258 OP2_UCOMISD_VsdWsd = 0x2E,
259 OP2_ADDSD_VsdWsd = 0x58,
260 OP2_MULSD_VsdWsd = 0x59,
261 OP2_CVTSD2SS_VsdWsd = 0x5A,
262 OP2_CVTSS2SD_VsdWsd = 0x5A,
263 OP2_SUBSD_VsdWsd = 0x5C,
264 OP2_DIVSD_VsdWsd = 0x5E,
265 OP2_SQRTSD_VsdWsd = 0x51,
266 OP2_ANDNPD_VpdWpd = 0x55,
267 OP2_XORPD_VpdWpd = 0x57,
268 OP2_MOVD_VdEd = 0x6E,
269 OP2_MOVD_EdVd = 0x7E,
270 OP2_JCC_rel32 = 0x80,
272 OP2_3BYTE_ESCAPE = 0xAE,
273 OP2_IMUL_GvEv = 0xAF,
274 OP2_MOVZX_GvEb = 0xB6,
275 OP2_MOVSX_GvEb = 0xBE,
276 OP2_MOVZX_GvEw = 0xB7,
277 OP2_MOVSX_GvEw = 0xBF,
278 OP2_PEXTRW_GdUdIb = 0xC5,
279 OP2_PSLLQ_UdqIb = 0x73,
280 OP2_PSRLQ_UdqIb = 0x73,
281 OP2_POR_VdqWdq = 0XEB,
288 TwoByteOpcodeID jccRel32(Condition cond)
290 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
293 TwoByteOpcodeID setccOpcode(Condition cond)
295 return (TwoByteOpcodeID)(OP_SETCC + cond);
329 GROUP14_OP_PSLLQ = 6,
330 GROUP14_OP_PSRLQ = 2,
332 ESCAPE_DD_FSTP_doubleReal = 3,
335 class X86InstructionFormatter;
339 : m_indexOfLastWatchpoint(INT_MIN)
340 , m_indexOfTailOfLastWatchpoint(INT_MIN)
344 AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
348 void push_r(RegisterID reg)
350 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
353 void pop_r(RegisterID reg)
355 m_formatter.oneByteOp(OP_POP_EAX, reg);
358 void push_i32(int imm)
360 m_formatter.oneByteOp(OP_PUSH_Iz);
361 m_formatter.immediate32(imm);
364 void push_m(int offset, RegisterID base)
366 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
369 void pop_m(int offset, RegisterID base)
371 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
374 // Arithmetic operations:
377 void adcl_im(int imm, const void* addr)
379 if (CAN_SIGN_EXTEND_8_32(imm)) {
380 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
381 m_formatter.immediate8(imm);
383 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
384 m_formatter.immediate32(imm);
389 void addl_rr(RegisterID src, RegisterID dst)
391 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
394 void addl_mr(int offset, RegisterID base, RegisterID dst)
396 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
400 void addl_mr(const void* addr, RegisterID dst)
402 m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
406 void addl_rm(RegisterID src, int offset, RegisterID base)
408 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
411 void addl_ir(int imm, RegisterID dst)
413 if (CAN_SIGN_EXTEND_8_32(imm)) {
414 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
415 m_formatter.immediate8(imm);
417 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
418 m_formatter.immediate32(imm);
422 void addl_im(int imm, int offset, RegisterID base)
424 if (CAN_SIGN_EXTEND_8_32(imm)) {
425 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
426 m_formatter.immediate8(imm);
428 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
429 m_formatter.immediate32(imm);
434 void addq_rr(RegisterID src, RegisterID dst)
436 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
439 void addq_mr(int offset, RegisterID base, RegisterID dst)
441 m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
444 void addq_ir(int imm, RegisterID dst)
446 if (CAN_SIGN_EXTEND_8_32(imm)) {
447 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
448 m_formatter.immediate8(imm);
450 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
451 m_formatter.immediate32(imm);
455 void addq_im(int imm, int offset, RegisterID base)
457 if (CAN_SIGN_EXTEND_8_32(imm)) {
458 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
459 m_formatter.immediate8(imm);
461 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
462 m_formatter.immediate32(imm);
466 void addl_im(int imm, const void* addr)
468 if (CAN_SIGN_EXTEND_8_32(imm)) {
469 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
470 m_formatter.immediate8(imm);
472 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
473 m_formatter.immediate32(imm);
478 void andl_rr(RegisterID src, RegisterID dst)
480 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
483 void andl_mr(int offset, RegisterID base, RegisterID dst)
485 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
488 void andl_rm(RegisterID src, int offset, RegisterID base)
490 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
493 void andl_ir(int imm, RegisterID dst)
495 if (CAN_SIGN_EXTEND_8_32(imm)) {
496 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
497 m_formatter.immediate8(imm);
499 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
500 m_formatter.immediate32(imm);
504 void andl_im(int imm, int offset, RegisterID base)
506 if (CAN_SIGN_EXTEND_8_32(imm)) {
507 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
508 m_formatter.immediate8(imm);
510 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
511 m_formatter.immediate32(imm);
516 void andq_rr(RegisterID src, RegisterID dst)
518 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
521 void andq_ir(int imm, RegisterID dst)
523 if (CAN_SIGN_EXTEND_8_32(imm)) {
524 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
525 m_formatter.immediate8(imm);
527 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
528 m_formatter.immediate32(imm);
532 void andl_im(int imm, const void* addr)
534 if (CAN_SIGN_EXTEND_8_32(imm)) {
535 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
536 m_formatter.immediate8(imm);
538 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
539 m_formatter.immediate32(imm);
544 void dec_r(RegisterID dst)
546 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
550 void decq_r(RegisterID dst)
552 m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
554 #endif // CPU(X86_64)
556 void inc_r(RegisterID dst)
558 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
562 void incq_r(RegisterID dst)
564 m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
566 #endif // CPU(X86_64)
568 void negl_r(RegisterID dst)
570 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
574 void negq_r(RegisterID dst)
576 m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
580 void negl_m(int offset, RegisterID base)
582 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
585 void notl_r(RegisterID dst)
587 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
590 void notl_m(int offset, RegisterID base)
592 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
595 void orl_rr(RegisterID src, RegisterID dst)
597 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
600 void orl_mr(int offset, RegisterID base, RegisterID dst)
602 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
605 void orl_rm(RegisterID src, int offset, RegisterID base)
607 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
610 void orl_ir(int imm, RegisterID dst)
612 if (CAN_SIGN_EXTEND_8_32(imm)) {
613 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
614 m_formatter.immediate8(imm);
616 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
617 m_formatter.immediate32(imm);
621 void orl_im(int imm, int offset, RegisterID base)
623 if (CAN_SIGN_EXTEND_8_32(imm)) {
624 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
625 m_formatter.immediate8(imm);
627 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
628 m_formatter.immediate32(imm);
633 void orq_rr(RegisterID src, RegisterID dst)
635 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
638 void orq_ir(int imm, RegisterID dst)
640 if (CAN_SIGN_EXTEND_8_32(imm)) {
641 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
642 m_formatter.immediate8(imm);
644 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
645 m_formatter.immediate32(imm);
649 void orl_im(int imm, const void* addr)
651 if (CAN_SIGN_EXTEND_8_32(imm)) {
652 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
653 m_formatter.immediate8(imm);
655 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
656 m_formatter.immediate32(imm);
660 void orl_rm(RegisterID src, const void* addr)
662 m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
666 void subl_rr(RegisterID src, RegisterID dst)
668 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
671 void subl_mr(int offset, RegisterID base, RegisterID dst)
673 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
676 void subl_rm(RegisterID src, int offset, RegisterID base)
678 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
681 void subl_ir(int imm, RegisterID dst)
683 if (CAN_SIGN_EXTEND_8_32(imm)) {
684 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
685 m_formatter.immediate8(imm);
687 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
688 m_formatter.immediate32(imm);
692 void subl_im(int imm, int offset, RegisterID base)
694 if (CAN_SIGN_EXTEND_8_32(imm)) {
695 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
696 m_formatter.immediate8(imm);
698 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
699 m_formatter.immediate32(imm);
704 void subq_rr(RegisterID src, RegisterID dst)
706 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
709 void subq_ir(int imm, RegisterID dst)
711 if (CAN_SIGN_EXTEND_8_32(imm)) {
712 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
713 m_formatter.immediate8(imm);
715 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
716 m_formatter.immediate32(imm);
720 void subl_im(int imm, const void* addr)
722 if (CAN_SIGN_EXTEND_8_32(imm)) {
723 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
724 m_formatter.immediate8(imm);
726 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
727 m_formatter.immediate32(imm);
732 void xorl_rr(RegisterID src, RegisterID dst)
734 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
737 void xorl_mr(int offset, RegisterID base, RegisterID dst)
739 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
742 void xorl_rm(RegisterID src, int offset, RegisterID base)
744 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
747 void xorl_im(int imm, int offset, RegisterID base)
749 if (CAN_SIGN_EXTEND_8_32(imm)) {
750 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
751 m_formatter.immediate8(imm);
753 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
754 m_formatter.immediate32(imm);
758 void xorl_ir(int imm, RegisterID dst)
760 if (CAN_SIGN_EXTEND_8_32(imm)) {
761 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
762 m_formatter.immediate8(imm);
764 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
765 m_formatter.immediate32(imm);
770 void xorq_rr(RegisterID src, RegisterID dst)
772 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
775 void xorq_ir(int imm, RegisterID dst)
777 if (CAN_SIGN_EXTEND_8_32(imm)) {
778 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
779 m_formatter.immediate8(imm);
781 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
782 m_formatter.immediate32(imm);
786 void xorq_rm(RegisterID src, int offset, RegisterID base)
788 m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
791 void rorq_i8r(int imm, RegisterID dst)
794 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
796 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
797 m_formatter.immediate8(imm);
803 void sarl_i8r(int imm, RegisterID dst)
806 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
808 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
809 m_formatter.immediate8(imm);
813 void sarl_CLr(RegisterID dst)
815 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
818 void shrl_i8r(int imm, RegisterID dst)
821 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
823 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
824 m_formatter.immediate8(imm);
828 void shrl_CLr(RegisterID dst)
830 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
833 void shll_i8r(int imm, RegisterID dst)
836 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
838 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
839 m_formatter.immediate8(imm);
843 void shll_CLr(RegisterID dst)
845 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
849 void sarq_CLr(RegisterID dst)
851 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
854 void sarq_i8r(int imm, RegisterID dst)
857 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
859 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
860 m_formatter.immediate8(imm);
864 void shlq_i8r(int imm, RegisterID dst)
867 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
869 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
870 m_formatter.immediate8(imm);
873 #endif // CPU(X86_64)
875 void imull_rr(RegisterID src, RegisterID dst)
877 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
881 void imulq_rr(RegisterID src, RegisterID dst)
883 m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src);
885 #endif // CPU(X86_64)
887 void imull_mr(int offset, RegisterID base, RegisterID dst)
889 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
892 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
894 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
895 m_formatter.immediate32(value);
898 void idivl_r(RegisterID dst)
900 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
905 void cmpl_rr(RegisterID src, RegisterID dst)
907 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
910 void cmpl_rm(RegisterID src, int offset, RegisterID base)
912 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
915 void cmpl_mr(int offset, RegisterID base, RegisterID src)
917 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
920 void cmpl_ir(int imm, RegisterID dst)
922 if (CAN_SIGN_EXTEND_8_32(imm)) {
923 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
924 m_formatter.immediate8(imm);
926 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
927 m_formatter.immediate32(imm);
931 void cmpl_ir_force32(int imm, RegisterID dst)
933 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
934 m_formatter.immediate32(imm);
937 void cmpl_im(int imm, int offset, RegisterID base)
939 if (CAN_SIGN_EXTEND_8_32(imm)) {
940 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
941 m_formatter.immediate8(imm);
943 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
944 m_formatter.immediate32(imm);
948 void cmpb_im(int imm, int offset, RegisterID base)
950 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
951 m_formatter.immediate8(imm);
954 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
956 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
957 m_formatter.immediate8(imm);
961 void cmpb_im(int imm, const void* addr)
963 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
964 m_formatter.immediate8(imm);
968 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
970 if (CAN_SIGN_EXTEND_8_32(imm)) {
971 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
972 m_formatter.immediate8(imm);
974 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
975 m_formatter.immediate32(imm);
979 void cmpl_im_force32(int imm, int offset, RegisterID base)
981 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
982 m_formatter.immediate32(imm);
986 void cmpq_rr(RegisterID src, RegisterID dst)
988 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
991 void cmpq_rm(RegisterID src, int offset, RegisterID base)
993 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
996 void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
998 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset);
1001 void cmpq_mr(int offset, RegisterID base, RegisterID src)
1003 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
1006 void cmpq_ir(int imm, RegisterID dst)
1008 if (CAN_SIGN_EXTEND_8_32(imm)) {
1009 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
1010 m_formatter.immediate8(imm);
1012 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
1013 m_formatter.immediate32(imm);
1017 void cmpq_im(int imm, int offset, RegisterID base)
1019 if (CAN_SIGN_EXTEND_8_32(imm)) {
1020 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
1021 m_formatter.immediate8(imm);
1023 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
1024 m_formatter.immediate32(imm);
1028 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
1030 if (CAN_SIGN_EXTEND_8_32(imm)) {
1031 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
1032 m_formatter.immediate8(imm);
1034 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
1035 m_formatter.immediate32(imm);
1039 void cmpl_rm(RegisterID reg, const void* addr)
1041 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
1044 void cmpl_im(int imm, const void* addr)
1046 if (CAN_SIGN_EXTEND_8_32(imm)) {
1047 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
1048 m_formatter.immediate8(imm);
1050 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
1051 m_formatter.immediate32(imm);
1056 void cmpw_ir(int imm, RegisterID dst)
1058 if (CAN_SIGN_EXTEND_8_32(imm)) {
1059 m_formatter.prefix(PRE_OPERAND_SIZE);
1060 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
1061 m_formatter.immediate8(imm);
1063 m_formatter.prefix(PRE_OPERAND_SIZE);
1064 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
1065 m_formatter.immediate16(imm);
1069 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1071 m_formatter.prefix(PRE_OPERAND_SIZE);
1072 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
1075 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
1077 if (CAN_SIGN_EXTEND_8_32(imm)) {
1078 m_formatter.prefix(PRE_OPERAND_SIZE);
1079 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
1080 m_formatter.immediate8(imm);
1082 m_formatter.prefix(PRE_OPERAND_SIZE);
1083 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
1084 m_formatter.immediate16(imm);
1088 void testl_rr(RegisterID src, RegisterID dst)
1090 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
1093 void testl_i32r(int imm, RegisterID dst)
1095 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
1096 m_formatter.immediate32(imm);
1099 void testl_i32m(int imm, int offset, RegisterID base)
1101 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
1102 m_formatter.immediate32(imm);
1105 void testb_rr(RegisterID src, RegisterID dst)
1107 m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
1110 void testb_im(int imm, int offset, RegisterID base)
1112 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
1113 m_formatter.immediate8(imm);
1116 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
1118 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
1119 m_formatter.immediate8(imm);
1123 void testb_im(int imm, const void* addr)
1125 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
1126 m_formatter.immediate8(imm);
1130 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1132 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
1133 m_formatter.immediate32(imm);
1137 void testq_rr(RegisterID src, RegisterID dst)
1139 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
1142 void testq_rm(RegisterID src, int offset, RegisterID base)
1144 m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
1147 void testq_i32r(int imm, RegisterID dst)
1149 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
1150 m_formatter.immediate32(imm);
1153 void testq_i32m(int imm, int offset, RegisterID base)
1155 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
1156 m_formatter.immediate32(imm);
1159 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1161 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
1162 m_formatter.immediate32(imm);
1166 void testw_rr(RegisterID src, RegisterID dst)
1168 m_formatter.prefix(PRE_OPERAND_SIZE);
1169 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
1172 void testb_i8r(int imm, RegisterID dst)
1174 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
1175 m_formatter.immediate8(imm);
1178 void setCC_r(Condition cond, RegisterID dst)
1180 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
1183 void sete_r(RegisterID dst)
1185 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
1188 void setz_r(RegisterID dst)
1193 void setne_r(RegisterID dst)
1195 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
1198 void setnz_r(RegisterID dst)
1203 // Various move ops:
1207 m_formatter.oneByteOp(OP_CDQ);
1210 void fstpl(int offset, RegisterID base)
1212 m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
1215 void xchgl_rr(RegisterID src, RegisterID dst)
1217 if (src == X86Registers::eax)
1218 m_formatter.oneByteOp(OP_XCHG_EAX, dst);
1219 else if (dst == X86Registers::eax)
1220 m_formatter.oneByteOp(OP_XCHG_EAX, src);
1222 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
1226 void xchgq_rr(RegisterID src, RegisterID dst)
1228 if (src == X86Registers::eax)
1229 m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
1230 else if (dst == X86Registers::eax)
1231 m_formatter.oneByteOp64(OP_XCHG_EAX, src);
1233 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
1237 void movl_rr(RegisterID src, RegisterID dst)
1239 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
1242 void movl_rm(RegisterID src, int offset, RegisterID base)
1244 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
1247 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
1249 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
1252 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1254 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1257 void movl_mEAX(const void* addr)
1259 m_formatter.oneByteOp(OP_MOV_EAXOv);
1261 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1263 m_formatter.immediate32(reinterpret_cast<int>(addr));
1267 void movl_mr(int offset, RegisterID base, RegisterID dst)
1269 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1272 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1274 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1277 void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
1279 m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
1282 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1284 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1287 void movl_i32r(int imm, RegisterID dst)
1289 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1290 m_formatter.immediate32(imm);
1293 void movl_i32m(int imm, int offset, RegisterID base)
1295 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1296 m_formatter.immediate32(imm);
1299 void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1301 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
1302 m_formatter.immediate32(imm);
1306 void movb_i8m(int imm, const void* addr)
1308 ASSERT(-128 <= imm && imm < 128);
1309 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
1310 m_formatter.immediate8(imm);
1314 void movb_i8m(int imm, int offset, RegisterID base)
1316 ASSERT(-128 <= imm && imm < 128);
1317 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
1318 m_formatter.immediate8(imm);
1321 void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1323 ASSERT(-128 <= imm && imm < 128);
1324 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
1325 m_formatter.immediate8(imm);
1329 void movb_rm(RegisterID src, const void* addr)
1331 m_formatter.oneByteOp(OP_MOV_EbGb, src, addr);
1335 void movb_rm(RegisterID src, int offset, RegisterID base)
1337 m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset);
1340 void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1342 m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
1345 void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1347 m_formatter.prefix(PRE_OPERAND_SIZE);
1348 m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
1351 void movl_EAXm(const void* addr)
1353 m_formatter.oneByteOp(OP_MOV_OvEAX);
1355 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1357 m_formatter.immediate32(reinterpret_cast<int>(addr));
1362 void movq_rr(RegisterID src, RegisterID dst)
1364 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1367 void movq_rm(RegisterID src, int offset, RegisterID base)
1369 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1372 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1374 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1377 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1379 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1382 void movq_mEAX(const void* addr)
1384 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1385 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1388 void movq_EAXm(const void* addr)
1390 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1391 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1394 void movq_mr(int offset, RegisterID base, RegisterID dst)
1396 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1399 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1401 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1404 void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
1406 m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
1409 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1411 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1414 void movq_i32m(int imm, int offset, RegisterID base)
1416 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1417 m_formatter.immediate32(imm);
1420 void movq_i64r(int64_t imm, RegisterID dst)
1422 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1423 m_formatter.immediate64(imm);
1426 void movsxd_rr(RegisterID src, RegisterID dst)
1428 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1433 void movl_rm(RegisterID src, const void* addr)
1435 if (src == X86Registers::eax)
1438 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1441 void movl_mr(const void* addr, RegisterID dst)
1443 if (dst == X86Registers::eax)
1446 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1449 void movl_i32m(int imm, const void* addr)
1451 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1452 m_formatter.immediate32(imm);
1456 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1458 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1461 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1463 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1466 void movswl_mr(int offset, RegisterID base, RegisterID dst)
1468 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
1471 void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1473 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
1476 void movzbl_mr(int offset, RegisterID base, RegisterID dst)
1478 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
1481 void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1483 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
1487 void movzbl_mr(const void* address, RegisterID dst)
1489 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, address);
1493 void movsbl_mr(int offset, RegisterID base, RegisterID dst)
1495 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
1498 void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1500 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
1503 void movzbl_rr(RegisterID src, RegisterID dst)
1505 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1506 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1507 // REX prefixes are defined to be silently ignored by the processor.
1508 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1511 void leal_mr(int offset, RegisterID base, RegisterID dst)
1513 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1516 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1518 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1524 AssemblerLabel call()
1526 m_formatter.oneByteOp(OP_CALL_rel32);
1527 return m_formatter.immediateRel32();
1530 AssemblerLabel call(RegisterID dst)
1532 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1533 return m_formatter.label();
1536 void call_m(int offset, RegisterID base)
1538 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1541 AssemblerLabel jmp()
1543 m_formatter.oneByteOp(OP_JMP_rel32);
1544 return m_formatter.immediateRel32();
1547 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1548 // To make a tail recursive call on x86-64. The MacroAssembler
1549 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1550 AssemblerLabel jmp_r(RegisterID dst)
1552 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1553 return m_formatter.label();
1556 void jmp_m(int offset, RegisterID base)
1558 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1562 void jmp_m(const void* address)
1564 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
1568 AssemblerLabel jne()
1570 m_formatter.twoByteOp(jccRel32(ConditionNE));
1571 return m_formatter.immediateRel32();
1574 AssemblerLabel jnz()
1581 m_formatter.twoByteOp(jccRel32(ConditionE));
1582 return m_formatter.immediateRel32();
1592 m_formatter.twoByteOp(jccRel32(ConditionL));
1593 return m_formatter.immediateRel32();
1598 m_formatter.twoByteOp(jccRel32(ConditionB));
1599 return m_formatter.immediateRel32();
1602 AssemblerLabel jle()
1604 m_formatter.twoByteOp(jccRel32(ConditionLE));
1605 return m_formatter.immediateRel32();
1608 AssemblerLabel jbe()
1610 m_formatter.twoByteOp(jccRel32(ConditionBE));
1611 return m_formatter.immediateRel32();
1614 AssemblerLabel jge()
1616 m_formatter.twoByteOp(jccRel32(ConditionGE));
1617 return m_formatter.immediateRel32();
1622 m_formatter.twoByteOp(jccRel32(ConditionG));
1623 return m_formatter.immediateRel32();
1628 m_formatter.twoByteOp(jccRel32(ConditionA));
1629 return m_formatter.immediateRel32();
1632 AssemblerLabel jae()
1634 m_formatter.twoByteOp(jccRel32(ConditionAE));
1635 return m_formatter.immediateRel32();
1640 m_formatter.twoByteOp(jccRel32(ConditionO));
1641 return m_formatter.immediateRel32();
1644 AssemblerLabel jnp()
1646 m_formatter.twoByteOp(jccRel32(ConditionNP));
1647 return m_formatter.immediateRel32();
1652 m_formatter.twoByteOp(jccRel32(ConditionP));
1653 return m_formatter.immediateRel32();
1658 m_formatter.twoByteOp(jccRel32(ConditionS));
1659 return m_formatter.immediateRel32();
1662 AssemblerLabel jCC(Condition cond)
1664 m_formatter.twoByteOp(jccRel32(cond));
1665 return m_formatter.immediateRel32();
1670 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1672 m_formatter.prefix(PRE_SSE_F2);
1673 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1676 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1678 m_formatter.prefix(PRE_SSE_F2);
1679 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1683 void addsd_mr(const void* address, XMMRegisterID dst)
1685 m_formatter.prefix(PRE_SSE_F2);
1686 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
1690 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1692 m_formatter.prefix(PRE_SSE_F2);
1693 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1697 void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
1699 m_formatter.prefix(PRE_SSE_F2);
1700 m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1704 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1706 m_formatter.prefix(PRE_SSE_F2);
1707 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1711 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1713 m_formatter.prefix(PRE_SSE_F2);
1714 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1718 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1720 m_formatter.prefix(PRE_SSE_F2);
1721 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1724 void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
1726 m_formatter.prefix(PRE_SSE_F2);
1727 m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
1730 void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
1732 m_formatter.prefix(PRE_SSE_F3);
1733 m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
1737 void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
1739 m_formatter.prefix(PRE_SSE_F2);
1740 m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1744 void movd_rr(XMMRegisterID src, RegisterID dst)
1746 m_formatter.prefix(PRE_SSE_66);
1747 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1750 void movd_rr(RegisterID src, XMMRegisterID dst)
1752 m_formatter.prefix(PRE_SSE_66);
1753 m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
1757 void movq_rr(XMMRegisterID src, RegisterID dst)
1759 m_formatter.prefix(PRE_SSE_66);
1760 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1763 void movq_rr(RegisterID src, XMMRegisterID dst)
1765 m_formatter.prefix(PRE_SSE_66);
1766 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1770 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1772 m_formatter.prefix(PRE_SSE_F2);
1773 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1776 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1778 m_formatter.prefix(PRE_SSE_F2);
1779 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1782 void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1784 m_formatter.prefix(PRE_SSE_F2);
1785 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1788 void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1790 m_formatter.prefix(PRE_SSE_F3);
1791 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1794 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1796 m_formatter.prefix(PRE_SSE_F2);
1797 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1800 void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1802 m_formatter.prefix(PRE_SSE_F2);
1803 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1806 void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1808 m_formatter.prefix(PRE_SSE_F3);
1809 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1813 void movsd_mr(const void* address, XMMRegisterID dst)
1815 m_formatter.prefix(PRE_SSE_F2);
1816 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1818 void movsd_rm(XMMRegisterID src, const void* address)
1820 m_formatter.prefix(PRE_SSE_F2);
1821 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
1825 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1827 m_formatter.prefix(PRE_SSE_F2);
1828 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1831 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1833 m_formatter.prefix(PRE_SSE_F2);
1834 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1837 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1839 m_formatter.prefix(PRE_SSE_66);
1840 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1841 m_formatter.immediate8(whichWord);
1844 void psllq_i8r(int imm, XMMRegisterID dst)
1846 m_formatter.prefix(PRE_SSE_66);
1847 m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
1848 m_formatter.immediate8(imm);
1851 void psrlq_i8r(int imm, XMMRegisterID dst)
1853 m_formatter.prefix(PRE_SSE_66);
1854 m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
1855 m_formatter.immediate8(imm);
1858 void por_rr(XMMRegisterID src, XMMRegisterID dst)
1860 m_formatter.prefix(PRE_SSE_66);
1861 m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
1864 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1866 m_formatter.prefix(PRE_SSE_F2);
1867 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1870 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1872 m_formatter.prefix(PRE_SSE_F2);
1873 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1876 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1878 m_formatter.prefix(PRE_SSE_66);
1879 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1882 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1884 m_formatter.prefix(PRE_SSE_66);
1885 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1888 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1890 m_formatter.prefix(PRE_SSE_F2);
1891 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1894 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1896 m_formatter.prefix(PRE_SSE_F2);
1897 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1900 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1902 m_formatter.prefix(PRE_SSE_66);
1903 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1906 void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
1908 m_formatter.prefix(PRE_SSE_66);
1909 m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1912 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1914 m_formatter.prefix(PRE_SSE_F2);
1915 m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1918 // Misc instructions:
1922 m_formatter.oneByteOp(OP_INT3);
1927 m_formatter.oneByteOp(OP_RET);
1930 void predictNotTaken()
1932 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1937 m_formatter.threeByteOp(OP3_MFENCE);
1940 // Assembler admin methods:
1942 size_t codeSize() const
1944 return m_formatter.codeSize();
1947 AssemblerLabel labelForWatchpoint()
1949 AssemblerLabel result = m_formatter.label();
1950 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1952 m_indexOfLastWatchpoint = result.m_offset;
1953 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1957 AssemblerLabel labelIgnoringWatchpoints()
1959 return m_formatter.label();
1962 AssemblerLabel label()
1964 AssemblerLabel result = m_formatter.label();
1965 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1967 result = m_formatter.label();
1972 AssemblerLabel align(int alignment)
1974 while (!m_formatter.isAligned(alignment))
1975 m_formatter.oneByteOp(OP_HLT);
1980 // Linking & patching:
1982 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1983 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1984 // code has been finalized it is (platform support permitting) within a non-
1985 // writable region of memory; to modify the code in an execute-only execuable
1986 // pool the 'repatch' and 'relink' methods should be used.
1988 void linkJump(AssemblerLabel from, AssemblerLabel to)
1990 ASSERT(from.isSet());
1993 char* code = reinterpret_cast<char*>(m_formatter.data());
1994 ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
1995 setRel32(code + from.m_offset, code + to.m_offset);
1998 static void linkJump(void* code, AssemblerLabel from, void* to)
2000 ASSERT(from.isSet());
2002 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
2005 static void linkCall(void* code, AssemblerLabel from, void* to)
2007 ASSERT(from.isSet());
2009 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
2012 static void linkPointer(void* code, AssemblerLabel where, void* value)
2014 ASSERT(where.isSet());
2016 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
2019 static void relinkJump(void* from, void* to)
2024 static void relinkCall(void* from, void* to)
2029 static void repatchCompact(void* where, int32_t value)
2031 ASSERT(value >= std::numeric_limits<int8_t>::min());
2032 ASSERT(value <= std::numeric_limits<int8_t>::max());
2033 setInt8(where, value);
2036 static void repatchInt32(void* where, int32_t value)
2038 setInt32(where, value);
2041 static void repatchPointer(void* where, void* value)
2043 setPointer(where, value);
2046 static void* readPointer(void* where)
2048 return reinterpret_cast<void**>(where)[-1];
2051 static void replaceWithJump(void* instructionStart, void* to)
2053 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2054 uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
2055 intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
2056 ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
2057 *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
2060 static ptrdiff_t maxJumpReplacementSize()
2066 static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
2068 const unsigned instructionSize = 10; // REX.W MOV IMM64
2069 const int rexBytes = 1;
2070 const int opcodeBytes = 1;
2071 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2072 ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
2073 ptr[1] = OP_MOV_EAXIv | (dst & 7);
2080 for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
2081 ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
2084 static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
2086 // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
2087 // FIXME: If the above is ever false then we need to make this smarter with respect to emitting
2089 ASSERT(dst == X86Registers::r11);
2090 const unsigned instructionSize = 6; // REX MOV IMM32
2091 const int rexBytes = 1;
2092 const int opcodeBytes = 1;
2093 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2094 ptr[0] = PRE_REX | (dst >> 3);
2095 ptr[1] = OP_MOV_EAXIv | (dst & 7);
2102 for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
2103 ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
2107 static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
2109 const int opcodeBytes = 1;
2110 const int modRMBytes = 1;
2111 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
2112 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2113 ptr[0] = OP_GROUP1_EvIz;
2114 ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
2120 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
2121 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
2124 static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
2126 ASSERT_UNUSED(offset, !offset);
2127 const int opcodeBytes = 1;
2128 const int modRMBytes = 1;
2129 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
2130 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2131 ptr[0] = OP_GROUP1_EvIz;
2132 ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
2138 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
2139 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
2142 static void replaceWithLoad(void* instructionStart)
2144 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2146 if ((*ptr & ~15) == PRE_REX)
2156 RELEASE_ASSERT_NOT_REACHED();
2160 static void replaceWithAddressComputation(void* instructionStart)
2162 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2164 if ((*ptr & ~15) == PRE_REX)
2174 RELEASE_ASSERT_NOT_REACHED();
2178 static unsigned getCallReturnOffset(AssemblerLabel call)
2180 ASSERT(call.isSet());
2181 return call.m_offset;
2184 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2186 ASSERT(label.isSet());
2187 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2190 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2192 return b.m_offset - a.m_offset;
2195 unsigned debugOffset() { return m_formatter.debugOffset(); }
2199 m_formatter.oneByteOp(OP_NOP);
2202 static void fillNops(void* base, size_t size)
2204 memset(base, OP_NOP, size);
2207 // This is a no-op on x86
2208 ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
2212 static void setPointer(void* where, void* value)
2214 reinterpret_cast<void**>(where)[-1] = value;
2217 static void setInt32(void* where, int32_t value)
2219 reinterpret_cast<int32_t*>(where)[-1] = value;
2222 static void setInt8(void* where, int8_t value)
2224 reinterpret_cast<int8_t*>(where)[-1] = value;
2227 static void setRel32(void* from, void* to)
2229 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
2230 ASSERT(offset == static_cast<int32_t>(offset));
2232 setInt32(from, offset);
2235 class X86InstructionFormatter {
2237 static const int maxInstructionSize = 16;
2248 // Legacy prefix bytes:
2250 // These are emmitted prior to the instruction.
2252 void prefix(OneByteOpcodeID pre)
2254 m_buffer.putByte(pre);
2257 // Word-sized operands / no operand instruction formatters.
2259 // In addition to the opcode, the following operand permutations are supported:
2260 // * None - instruction takes no operands.
2261 // * One register - the low three bits of the RegisterID are added into the opcode.
2262 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
2263 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
2264 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
2266 // For 32-bit x86 targets, the address operand may also be provided as a void*.
2267 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
2269 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
2271 void oneByteOp(OneByteOpcodeID opcode)
2273 m_buffer.ensureSpace(maxInstructionSize);
2274 m_buffer.putByteUnchecked(opcode);
2277 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
2279 m_buffer.ensureSpace(maxInstructionSize);
2280 emitRexIfNeeded(0, 0, reg);
2281 m_buffer.putByteUnchecked(opcode + (reg & 7));
2284 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
2286 m_buffer.ensureSpace(maxInstructionSize);
2287 emitRexIfNeeded(reg, 0, rm);
2288 m_buffer.putByteUnchecked(opcode);
2289 registerModRM(reg, rm);
2292 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2294 m_buffer.ensureSpace(maxInstructionSize);
2295 emitRexIfNeeded(reg, 0, base);
2296 m_buffer.putByteUnchecked(opcode);
2297 memoryModRM(reg, base, offset);
2300 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2302 m_buffer.ensureSpace(maxInstructionSize);
2303 emitRexIfNeeded(reg, 0, base);
2304 m_buffer.putByteUnchecked(opcode);
2305 memoryModRM_disp32(reg, base, offset);
2308 void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2310 m_buffer.ensureSpace(maxInstructionSize);
2311 emitRexIfNeeded(reg, 0, base);
2312 m_buffer.putByteUnchecked(opcode);
2313 memoryModRM_disp8(reg, base, offset);
2316 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2318 m_buffer.ensureSpace(maxInstructionSize);
2319 emitRexIfNeeded(reg, index, base);
2320 m_buffer.putByteUnchecked(opcode);
2321 memoryModRM(reg, base, index, scale, offset);
2325 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
2327 m_buffer.ensureSpace(maxInstructionSize);
2328 m_buffer.putByteUnchecked(opcode);
2329 memoryModRM(reg, address);
2333 void twoByteOp(TwoByteOpcodeID opcode)
2335 m_buffer.ensureSpace(maxInstructionSize);
2336 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2337 m_buffer.putByteUnchecked(opcode);
2340 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2342 m_buffer.ensureSpace(maxInstructionSize);
2343 emitRexIfNeeded(reg, 0, rm);
2344 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2345 m_buffer.putByteUnchecked(opcode);
2346 registerModRM(reg, rm);
2349 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
2351 m_buffer.ensureSpace(maxInstructionSize);
2352 emitRexIfNeeded(reg, 0, base);
2353 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2354 m_buffer.putByteUnchecked(opcode);
2355 memoryModRM(reg, base, offset);
2358 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2360 m_buffer.ensureSpace(maxInstructionSize);
2361 emitRexIfNeeded(reg, index, base);
2362 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2363 m_buffer.putByteUnchecked(opcode);
2364 memoryModRM(reg, base, index, scale, offset);
2368 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
2370 m_buffer.ensureSpace(maxInstructionSize);
2371 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2372 m_buffer.putByteUnchecked(opcode);
2373 memoryModRM(reg, address);
2377 void threeByteOp(ThreeByteOpcodeID opcode)
2379 m_buffer.ensureSpace(maxInstructionSize);
2380 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2381 m_buffer.putByteUnchecked(OP2_3BYTE_ESCAPE);
2382 m_buffer.putByteUnchecked(opcode);
2386 // Quad-word-sized operands:
2388 // Used to format 64-bit operantions, planting a REX.w prefix.
2389 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
2390 // the normal (non-'64'-postfixed) formatters should be used.
2392 void oneByteOp64(OneByteOpcodeID opcode)
2394 m_buffer.ensureSpace(maxInstructionSize);
2396 m_buffer.putByteUnchecked(opcode);
2399 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
2401 m_buffer.ensureSpace(maxInstructionSize);
2402 emitRexW(0, 0, reg);
2403 m_buffer.putByteUnchecked(opcode + (reg & 7));
2406 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
2408 m_buffer.ensureSpace(maxInstructionSize);
2409 emitRexW(reg, 0, rm);
2410 m_buffer.putByteUnchecked(opcode);
2411 registerModRM(reg, rm);
2414 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2416 m_buffer.ensureSpace(maxInstructionSize);
2417 emitRexW(reg, 0, base);
2418 m_buffer.putByteUnchecked(opcode);
2419 memoryModRM(reg, base, offset);
2422 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2424 m_buffer.ensureSpace(maxInstructionSize);
2425 emitRexW(reg, 0, base);
2426 m_buffer.putByteUnchecked(opcode);
2427 memoryModRM_disp32(reg, base, offset);
2430 void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2432 m_buffer.ensureSpace(maxInstructionSize);
2433 emitRexW(reg, 0, base);
2434 m_buffer.putByteUnchecked(opcode);
2435 memoryModRM_disp8(reg, base, offset);
2438 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2440 m_buffer.ensureSpace(maxInstructionSize);
2441 emitRexW(reg, index, base);
2442 m_buffer.putByteUnchecked(opcode);
2443 memoryModRM(reg, base, index, scale, offset);
2446 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2448 m_buffer.ensureSpace(maxInstructionSize);
2449 emitRexW(reg, 0, rm);
2450 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2451 m_buffer.putByteUnchecked(opcode);
2452 registerModRM(reg, rm);
2458 // These methods format byte operations. Byte operations differ from the normal
2459 // formatters in the circumstances under which they will decide to emit REX prefixes.
2460 // These should be used where any register operand signifies a byte register.
2462 // The disctinction is due to the handling of register numbers in the range 4..7 on
2463 // x86-64. These register numbers may either represent the second byte of the first
2464 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2466 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2467 // be accessed where a REX prefix is present), these are likely best treated as
2468 // deprecated. In order to ensure the correct registers spl..dil are selected a
2469 // REX prefix will be emitted for any byte register operand in the range 4..15.
2471 // These formatters may be used in instructions where a mix of operand sizes, in which
2472 // case an unnecessary REX will be emitted, for example:
2474 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2475 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2476 // be silently ignored by the processor.
2478 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2479 // is provided to check byte register operands.
2481 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2483 m_buffer.ensureSpace(maxInstructionSize);
2484 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2485 m_buffer.putByteUnchecked(opcode);
2486 registerModRM(groupOp, rm);
2489 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
2491 m_buffer.ensureSpace(maxInstructionSize);
2492 emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
2493 m_buffer.putByteUnchecked(opcode);
2494 registerModRM(reg, rm);
2497 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2499 m_buffer.ensureSpace(maxInstructionSize);
2500 emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base);
2501 m_buffer.putByteUnchecked(opcode);
2502 memoryModRM(reg, base, offset);
2505 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2507 m_buffer.ensureSpace(maxInstructionSize);
2508 emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
2509 m_buffer.putByteUnchecked(opcode);
2510 memoryModRM(reg, base, index, scale, offset);
2513 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
2515 m_buffer.ensureSpace(maxInstructionSize);
2516 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
2517 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2518 m_buffer.putByteUnchecked(opcode);
2519 registerModRM(reg, rm);
2522 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2524 m_buffer.ensureSpace(maxInstructionSize);
2525 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2526 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2527 m_buffer.putByteUnchecked(opcode);
2528 registerModRM(groupOp, rm);
2533 // An immedaite should be appended where appropriate after an op has been emitted.
2534 // The writes are unchecked since the opcode formatters above will have ensured space.
2536 void immediate8(int imm)
2538 m_buffer.putByteUnchecked(imm);
2541 void immediate16(int imm)
2543 m_buffer.putShortUnchecked(imm);
2546 void immediate32(int imm)
2548 m_buffer.putIntUnchecked(imm);
2551 void immediate64(int64_t imm)
2553 m_buffer.putInt64Unchecked(imm);
2556 AssemblerLabel immediateRel32()
2558 m_buffer.putIntUnchecked(0);
2562 // Administrative methods:
2564 size_t codeSize() const { return m_buffer.codeSize(); }
2565 AssemblerLabel label() const { return m_buffer.label(); }
2566 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2567 void* data() const { return m_buffer.data(); }
2569 unsigned debugOffset() { return m_buffer.debugOffset(); }
2573 // Internals; ModRm and REX formatters.
2575 static const RegisterID noBase = X86Registers::ebp;
2576 static const RegisterID hasSib = X86Registers::esp;
2577 static const RegisterID noIndex = X86Registers::esp;
2579 static const RegisterID noBase2 = X86Registers::r13;
2580 static const RegisterID hasSib2 = X86Registers::r12;
2582 // Registers r8 & above require a REX prefixe.
2583 inline bool regRequiresRex(int reg)
2585 return (reg >= X86Registers::r8);
2588 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2589 inline bool byteRegRequiresRex(int reg)
2591 return (reg >= X86Registers::esp);
2594 // Format a REX prefix byte.
2595 inline void emitRex(bool w, int r, int x, int b)
2600 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
2603 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2604 inline void emitRexW(int r, int x, int b)
2606 emitRex(true, r, x, b);
2609 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2610 // regRequiresRex() to check other registers (i.e. address base & index).
2611 inline void emitRexIf(bool condition, int r, int x, int b)
2613 if (condition) emitRex(false, r, x, b);
2616 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2617 inline void emitRexIfNeeded(int r, int x, int b)
2619 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
2622 // No REX prefix bytes on 32-bit x86.
2623 inline bool regRequiresRex(int) { return false; }
2624 inline bool byteRegRequiresRex(int) { return false; }
2625 inline void emitRexIf(bool, int, int, int) {}
2626 inline void emitRexIfNeeded(int, int, int) {}
2629 void putModRm(ModRmMode mode, int reg, RegisterID rm)
2631 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
2634 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
2636 ASSERT(mode != ModRmRegister);
2638 putModRm(mode, reg, hasSib);
2639 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
2642 void registerModRM(int reg, RegisterID rm)
2644 putModRm(ModRmRegister, reg, rm);
2647 void memoryModRM(int reg, RegisterID base, int offset)
2649 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2651 if ((base == hasSib) || (base == hasSib2)) {
2653 if (base == hasSib) {
2655 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
2656 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
2657 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2658 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2659 m_buffer.putByteUnchecked(offset);
2661 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2662 m_buffer.putIntUnchecked(offset);
2666 if (!offset && (base != noBase) && (base != noBase2))
2668 if (!offset && (base != noBase))
2670 putModRm(ModRmMemoryNoDisp, reg, base);
2671 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2672 putModRm(ModRmMemoryDisp8, reg, base);
2673 m_buffer.putByteUnchecked(offset);
2675 putModRm(ModRmMemoryDisp32, reg, base);
2676 m_buffer.putIntUnchecked(offset);
2681 void memoryModRM_disp8(int reg, RegisterID base, int offset)
2683 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2684 ASSERT(CAN_SIGN_EXTEND_8_32(offset));
2686 if ((base == hasSib) || (base == hasSib2)) {
2688 if (base == hasSib) {
2690 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2691 m_buffer.putByteUnchecked(offset);
2693 putModRm(ModRmMemoryDisp8, reg, base);
2694 m_buffer.putByteUnchecked(offset);
2698 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2700 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2702 if ((base == hasSib) || (base == hasSib2)) {
2704 if (base == hasSib) {
2706 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2707 m_buffer.putIntUnchecked(offset);
2709 putModRm(ModRmMemoryDisp32, reg, base);
2710 m_buffer.putIntUnchecked(offset);
2714 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2716 ASSERT(index != noIndex);
2719 if (!offset && (base != noBase) && (base != noBase2))
2721 if (!offset && (base != noBase))
2723 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2724 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2725 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2726 m_buffer.putByteUnchecked(offset);
2728 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2729 m_buffer.putIntUnchecked(offset);
2734 void memoryModRM(int reg, const void* address)
2736 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2737 putModRm(ModRmMemoryNoDisp, reg, noBase);
2738 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2743 AssemblerBuffer m_buffer;
2745 int m_indexOfLastWatchpoint;
2746 int m_indexOfTailOfLastWatchpoint;
2751 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2753 #endif // X86Assembler_h