2 * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
29 #include "AbortReason.h"
30 #include "CodeLocation.h"
31 #include "MacroAssemblerCodeRef.h"
33 #include <wtf/CryptographicallyRandomNumber.h>
34 #include <wtf/Noncopyable.h>
35 #include <wtf/SharedTask.h>
36 #include <wtf/WeakRandom.h>
42 inline bool isARMv7IDIVSupported()
44 #if HAVE(ARM_IDIV_INSTRUCTIONS)
62 #if CPU(X86_64) || CPU(X86)
69 inline bool isX86_64()
78 inline bool optimizeForARMv7IDIVSupported()
80 return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations();
83 inline bool optimizeForARM64()
85 return isARM64() && Options::useArchitectureSpecificOptimizations();
88 inline bool optimizeForX86()
90 return isX86() && Options::useArchitectureSpecificOptimizations();
93 inline bool optimizeForX86_64()
95 return isX86_64() && Options::useArchitectureSpecificOptimizations();
98 class AllowMacroScratchRegisterUsage;
99 class DisallowMacroScratchRegisterUsage;
106 template <class AssemblerType, class MacroAssemblerType>
107 class AbstractMacroAssembler {
109 friend class JITWriteBarrierBase;
110 typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType;
111 typedef AssemblerType AssemblerType_T;
113 typedef MacroAssemblerCodePtr CodePtr;
114 typedef MacroAssemblerCodeRef CodeRef;
118 typedef typename AssemblerType::RegisterID RegisterID;
119 typedef typename AssemblerType::FPRegisterID FPRegisterID;
121 static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); }
122 static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); }
124 static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
125 static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
127 // Section 1: MacroAssembler operand types
129 // The following types are used as operands to MacroAssembler operations,
130 // describing immediate and memory operands to the instructions to be planted.
139 static Scale timesPtr()
141 if (sizeof(void*) == 4)
150 // Describes a simple base-offset address.
152 explicit Address(RegisterID base, int32_t offset = 0)
158 Address withOffset(int32_t additionalOffset)
160 return Address(base, offset + additionalOffset);
163 BaseIndex indexedBy(RegisterID index, Scale) const;
169 struct ExtendedAddress {
170 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
182 // This class is used for explicit 'load' and 'store' operations
183 // (as opposed to situations in which a memory operand is provided
184 // to a generic operation, such as an integer arithmetic instruction).
186 // In the case of a load (or store) operation we want to permit
187 // addresses to be implicitly constructed, e.g. the two calls:
189 // load32(Address(addrReg), destReg);
190 // load32(addrReg, destReg);
192 // Are equivalent, and the explicit wrapping of the Address in the former
194 struct ImplicitAddress {
195 ImplicitAddress(RegisterID base)
201 ImplicitAddress(Address address)
203 , offset(address.offset)
213 // Describes a complex addressing mode.
215 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
228 BaseIndex withOffset(int32_t additionalOffset)
230 return BaseIndex(base, index, scale, offset + additionalOffset);
236 // Describes an memory operand given by a pointer. For regular load & store
237 // operations an unwrapped void* will be used, rather than using this.
238 struct AbsoluteAddress {
239 explicit AbsoluteAddress(const void* ptr)
249 // A pointer sized immediate operand to an instruction - this is wrapped
250 // in a class requiring explicit construction in order to differentiate
251 // from pointers used as absolute addresses to memory operations
252 struct TrustedImmPtr {
255 explicit TrustedImmPtr(const void* value)
260 // This is only here so that TrustedImmPtr(0) does not confuse the C++
261 // overload handling rules.
262 explicit TrustedImmPtr(int value)
265 ASSERT_UNUSED(value, !value);
268 explicit TrustedImmPtr(size_t value)
269 : m_value(reinterpret_cast<void*>(value))
275 return reinterpret_cast<intptr_t>(m_value);
281 struct ImmPtr : private TrustedImmPtr
283 explicit ImmPtr(const void* value)
284 : TrustedImmPtr(value)
288 TrustedImmPtr asTrustedImmPtr() { return *this; }
293 // A 32bit immediate operand to an instruction - this is wrapped in a
294 // class requiring explicit construction in order to prevent RegisterIDs
295 // (which are implemented as an enum) from accidentally being passed as
297 struct TrustedImm32 {
300 explicit TrustedImm32(int32_t value)
306 explicit TrustedImm32(TrustedImmPtr ptr)
307 : m_value(ptr.asIntptr())
316 struct Imm32 : private TrustedImm32 {
317 explicit Imm32(int32_t value)
318 : TrustedImm32(value)
322 explicit Imm32(TrustedImmPtr ptr)
327 const TrustedImm32& asTrustedImm32() const { return *this; }
333 // A 64bit immediate operand to an instruction - this is wrapped in a
334 // class requiring explicit construction in order to prevent RegisterIDs
335 // (which are implemented as an enum) from accidentally being passed as
337 struct TrustedImm64 {
340 explicit TrustedImm64(int64_t value)
345 #if CPU(X86_64) || CPU(ARM64)
346 explicit TrustedImm64(TrustedImmPtr ptr)
347 : m_value(ptr.asIntptr())
355 struct Imm64 : private TrustedImm64
357 explicit Imm64(int64_t value)
358 : TrustedImm64(value)
361 #if CPU(X86_64) || CPU(ARM64)
362 explicit Imm64(TrustedImmPtr ptr)
367 const TrustedImm64& asTrustedImm64() const { return *this; }
370 // Section 2: MacroAssembler code buffer handles
372 // The following types are used to reference items in the code buffer
373 // during JIT code generation. For example, the type Jump is used to
374 // track the location of a jump instruction so that it may later be
375 // linked to a label marking its destination.
380 // A Label records a point in the generated instruction stream, typically such that
381 // it may be used as a destination for a jump.
383 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
384 friend class AbstractMacroAssembler;
385 friend struct DFG::OSRExit;
387 friend class MacroAssemblerCodeRef;
388 friend class LinkBuffer;
389 friend class Watchpoint;
396 Label(AbstractMacroAssemblerType* masm)
397 : m_label(masm->m_assembler.label())
399 masm->invalidateAllTempRegisters();
402 bool operator==(const Label& other) const { return m_label == other.m_label; }
404 bool isSet() const { return m_label.isSet(); }
406 AssemblerLabel m_label;
409 // ConvertibleLoadLabel:
411 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
414 // loadPtr(Address(a, i), b)
418 // addPtr(TrustedImmPtr(i), a, b)
419 class ConvertibleLoadLabel {
420 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
421 friend class AbstractMacroAssembler;
422 friend class LinkBuffer;
425 ConvertibleLoadLabel()
429 ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
430 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
434 bool isSet() const { return m_label.isSet(); }
436 AssemblerLabel m_label;
441 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
442 // patched after the code has been generated.
444 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
445 friend class AbstractMacroAssembler;
446 friend class LinkBuffer;
452 DataLabelPtr(AbstractMacroAssemblerType* masm)
453 : m_label(masm->m_assembler.label())
457 bool isSet() const { return m_label.isSet(); }
460 AssemblerLabel m_label;
465 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
466 // patched after the code has been generated.
468 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
469 friend class AbstractMacroAssembler;
470 friend class LinkBuffer;
476 DataLabel32(AbstractMacroAssemblerType* masm)
477 : m_label(masm->m_assembler.label())
481 AssemblerLabel label() const { return m_label; }
484 AssemblerLabel m_label;
489 // A DataLabelCompact is used to refer to a location in the code containing a
490 // compact immediate to be patched after the code has been generated.
491 class DataLabelCompact {
492 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
493 friend class AbstractMacroAssembler;
494 friend class LinkBuffer;
500 DataLabelCompact(AbstractMacroAssemblerType* masm)
501 : m_label(masm->m_assembler.label())
505 DataLabelCompact(AssemblerLabel label)
510 AssemblerLabel label() const { return m_label; }
513 AssemblerLabel m_label;
518 // A Call object is a reference to a call instruction that has been planted
519 // into the code buffer - it is typically used to link the call, setting the
520 // relative offset such that when executed it will call to the desired
523 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
524 friend class AbstractMacroAssembler;
533 LinkableNearTail = 0x7,
541 Call(AssemblerLabel jmp, Flags flags)
547 bool isFlagSet(Flags flag)
549 return m_flags & flag;
552 static Call fromTailJump(Jump jump)
554 return Call(jump.m_label, Linkable);
557 AssemblerLabel m_label;
564 // A jump object is a reference to a jump instruction that has been planted
565 // into the code buffer - it is typically used to link the jump, setting the
566 // relative offset such that when executed it will jump to the desired
569 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
570 friend class AbstractMacroAssembler;
572 friend struct DFG::OSRExit;
573 friend class LinkBuffer;
580 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
581 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
584 , m_condition(condition)
588 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
591 , m_condition(condition)
595 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
598 , m_condition(condition)
600 , m_compareRegister(compareRegister)
602 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
605 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
608 , m_condition(condition)
609 , m_bitNumber(bitNumber)
610 , m_compareRegister(compareRegister)
612 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
615 Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
621 Jump(AssemblerLabel jmp)
630 result.m_label = m_label;
634 void link(AbstractMacroAssemblerType* masm) const
636 masm->invalidateAllTempRegisters();
638 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
639 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
643 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
645 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
646 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
647 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
648 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
650 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
652 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
654 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
658 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
660 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
661 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
665 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
667 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
668 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
669 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
670 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
672 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
674 masm->m_assembler.linkJump(m_label, label.m_label);
678 bool isSet() const { return m_label.isSet(); }
681 AssemblerLabel m_label;
683 ARMv7Assembler::JumpType m_type;
684 ARMv7Assembler::Condition m_condition;
686 ARM64Assembler::JumpType m_type;
687 ARM64Assembler::Condition m_condition;
689 unsigned m_bitNumber;
690 ARM64Assembler::RegisterID m_compareRegister;
693 SH4Assembler::JumpType m_type;
697 struct PatchableJump {
702 explicit PatchableJump(Jump jump)
707 operator Jump&() { return m_jump; }
714 // A JumpList is a set of Jump objects.
715 // All jumps in the set will be linked to the same destination.
718 typedef Vector<Jump, 2> JumpVector;
728 void link(AbstractMacroAssemblerType* masm) const
730 size_t size = m_jumps.size();
731 for (size_t i = 0; i < size; ++i)
732 m_jumps[i].link(masm);
735 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
737 size_t size = m_jumps.size();
738 for (size_t i = 0; i < size; ++i)
739 m_jumps[i].linkTo(label, masm);
742 void append(Jump jump)
744 m_jumps.append(jump);
747 void append(const JumpList& other)
749 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
754 return !m_jumps.size();
762 const JumpVector& jumps() const { return m_jumps; }
769 // Section 3: Misc admin methods
771 Label labelIgnoringWatchpoints()
774 result.m_label = m_assembler.labelIgnoringWatchpoints();
778 Label labelIgnoringWatchpoints()
789 void padBeforePatch()
791 // Rely on the fact that asking for a label already does the padding.
795 Label watchpointLabel()
798 result.m_label = m_assembler.labelForWatchpoint();
804 m_assembler.align(16);
808 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
809 class RegisterAllocationOffset {
811 RegisterAllocationOffset(unsigned offset)
816 void checkOffsets(unsigned low, unsigned high)
818 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
825 void addRegisterAllocationAtOffset(unsigned offset)
827 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
830 void clearRegisterAllocationOffsets()
832 m_registerAllocationForOffsets.clear();
835 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
837 if (offset1 > offset2)
838 std::swap(offset1, offset2);
840 size_t size = m_registerAllocationForOffsets.size();
841 for (size_t i = 0; i < size; ++i)
842 m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
846 template<typename T, typename U>
847 static ptrdiff_t differenceBetween(T from, U to)
849 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
852 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
854 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
857 unsigned debugOffset() { return m_assembler.debugOffset(); }
859 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
861 AssemblerType::cacheFlush(code, size);
864 #if ENABLE(MASM_PROBE)
867 #define DECLARE_REGISTER(_type, _regName) \
869 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
870 #undef DECLARE_REGISTER
872 static const char* gprName(RegisterID regID)
875 #define DECLARE_REGISTER(_type, _regName) \
876 case RegisterID::_regName: \
878 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
879 #undef DECLARE_REGISTER
881 RELEASE_ASSERT_NOT_REACHED();
885 static const char* fprName(FPRegisterID regID)
888 #define DECLARE_REGISTER(_type, _regName) \
889 case FPRegisterID::_regName: \
891 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
892 #undef DECLARE_REGISTER
894 RELEASE_ASSERT_NOT_REACHED();
898 void*& gpr(RegisterID regID)
901 #define DECLARE_REGISTER(_type, _regName) \
902 case RegisterID::_regName: \
904 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
905 #undef DECLARE_REGISTER
907 RELEASE_ASSERT_NOT_REACHED();
911 double& fpr(FPRegisterID regID)
914 #define DECLARE_REGISTER(_type, _regName) \
915 case FPRegisterID::_regName: \
917 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
918 #undef DECLARE_REGISTER
920 RELEASE_ASSERT_NOT_REACHED();
926 typedef void (*ProbeFunction)(struct ProbeContext*);
928 struct ProbeContext {
929 ProbeFunction probeFunction;
934 // Convenience methods:
935 void*& gpr(RegisterID regID) { return cpu.gpr(regID); }
936 double& fpr(FPRegisterID regID) { return cpu.fpr(regID); }
937 const char* gprName(RegisterID regID) { return cpu.gprName(regID); }
938 const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); }
941 // This function emits code to preserve the CPUState (e.g. registers),
942 // call a user supplied probe function, and restore the CPUState before
943 // continuing with other JIT generated code.
945 // The user supplied probe function will be called with a single pointer to
946 // a ProbeContext struct (defined above) which contains, among other things,
947 // the preserved CPUState. This allows the user probe function to inspect
948 // the CPUState at that point in the JIT generated code.
950 // If the user probe function alters the register values in the ProbeContext,
951 // the altered values will be loaded into the CPU registers when the probe
954 // The ProbeContext is stack allocated and is only valid for the duration
955 // of the call to the user probe function.
957 // Note: probe() should be implemented by the target specific MacroAssembler.
958 // This prototype is only provided here to document the interface.
960 void probe(ProbeFunction, void* arg1, void* arg2);
962 #endif // ENABLE(MASM_PROBE)
964 AssemblerType m_assembler;
966 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
968 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
971 static void linkPointer(void* code, AssemblerLabel label, void* value)
973 AssemblerType::linkPointer(code, label, value);
976 static void* getLinkerAddress(void* code, AssemblerLabel label)
978 return AssemblerType::getRelocatedAddress(code, label);
981 static unsigned getLinkerCallReturnOffset(Call call)
983 return AssemblerType::getCallReturnOffset(call.m_label);
986 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
988 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
991 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
993 switch (nearCall.callMode()) {
994 case NearCallMode::Tail:
995 AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation());
997 case NearCallMode::Regular:
998 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
1001 RELEASE_ASSERT_NOT_REACHED();
1004 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
1006 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
1009 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
1011 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
1014 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
1016 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
1019 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
1021 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
1024 static void replaceWithLoad(CodeLocationConvertibleLoad label)
1026 AssemblerType::replaceWithLoad(label.dataLocation());
1029 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
1031 AssemblerType::replaceWithAddressComputation(label.dataLocation());
1034 template<typename Functor>
1035 void addLinkTask(const Functor& functor)
1037 m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor));
1040 void emitNops(size_t memoryToFillWithNopsInBytes)
1042 AssemblerBuffer& buffer = m_assembler.buffer();
1043 size_t startCodeSize = buffer.codeSize();
1044 size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes;
1045 buffer.ensureSpace(memoryToFillWithNopsInBytes);
1046 bool isCopyingToExecutableMemory = false;
1047 AssemblerType::fillNops(static_cast<char*>(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes, isCopyingToExecutableMemory);
1048 buffer.setCodeSize(targetCodeSize);
1052 AbstractMacroAssembler()
1055 invalidateAllTempRegisters();
1060 if (!m_randomSourceIsInitialized) {
1061 m_randomSourceIsInitialized = true;
1062 m_randomSource.setSeed(cryptographicallyRandomNumber());
1064 return m_randomSource.getUint32();
1067 bool m_randomSourceIsInitialized { false };
1068 WeakRandom m_randomSource;
1070 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1071 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
1074 static bool haveScratchRegisterForBlinding()
1078 static RegisterID scratchRegisterForBlinding()
1080 UNREACHABLE_FOR_PLATFORM();
1081 return firstRegister();
1083 static bool canBlind() { return false; }
1084 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
1085 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
1087 class CachedTempRegister {
1088 friend class DataLabelPtr;
1089 friend class DataLabel32;
1090 friend class DataLabelCompact;
1095 CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
1097 , m_registerID(registerID)
1099 , m_validBit(1 << static_cast<unsigned>(registerID))
1101 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
1104 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
1106 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
1108 bool value(intptr_t& value)
1111 return m_masm->isTempRegisterValid(m_validBit);
1114 void setValue(intptr_t value)
1117 m_masm->setTempRegisterValid(m_validBit);
1120 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
1123 AbstractMacroAssemblerType* m_masm;
1124 RegisterID m_registerID;
1126 unsigned m_validBit;
1129 ALWAYS_INLINE void invalidateAllTempRegisters()
1131 m_tempRegistersValidBits = 0;
1134 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
1136 return (m_tempRegistersValidBits & registerMask);
1139 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
1141 m_tempRegistersValidBits &= ~registerMask;
1144 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
1146 m_tempRegistersValidBits |= registerMask;
1149 friend class AllowMacroScratchRegisterUsage;
1150 friend class DisallowMacroScratchRegisterUsage;
1151 unsigned m_tempRegistersValidBits;
1152 bool m_allowScratchRegister { true };
1154 Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks;
1156 friend class LinkBuffer;
1157 }; // class AbstractMacroAssembler
1159 template <class AssemblerType, class MacroAssemblerType>
1160 inline typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::BaseIndex
1161 AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::Address::indexedBy(
1162 typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::RegisterID index,
1163 typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::Scale scale) const
1165 return BaseIndex(base, index, scale, offset);
1170 #endif // ENABLE(ASSEMBLER)
1172 #endif // AbstractMacroAssembler_h