+2016-02-17 Benjamin Poulain <bpoulain@apple.com>
+
+ [JSC] ARM64: Support the immediate format used for bit operations in Air
+ https://bugs.webkit.org/show_bug.cgi?id=154327
+
+ Reviewed by Filip Pizlo.
+
+ ARM64 supports a pretty rich form of immediates for bit operation.
+ There are two formats used to encode repeating patterns and common
+ input in a dense form.
+
+ In this patch, I add 2 new type of Arg: BitImm32 and BitImm64.
+ Those represents the valid immediate forms for bit operation.
+ On x86, any 32bits value is valid. On ARM64, all the encoding
+ form are tried and the immediate is used when possible.
+
+ The arg type Imm64 is renamed to BigImm to better represent what
+ it is: an immediate that does not fit into Imm.
+
+ * assembler/ARM64Assembler.h:
+ (JSC::LogicalImmediate::create32): Deleted.
+ (JSC::LogicalImmediate::create64): Deleted.
+ (JSC::LogicalImmediate::value): Deleted.
+ (JSC::LogicalImmediate::isValid): Deleted.
+ (JSC::LogicalImmediate::is64bit): Deleted.
+ (JSC::LogicalImmediate::LogicalImmediate): Deleted.
+ (JSC::LogicalImmediate::mask): Deleted.
+ (JSC::LogicalImmediate::partialHSB): Deleted.
+ (JSC::LogicalImmediate::highestSetBit): Deleted.
+ (JSC::LogicalImmediate::findBitRange): Deleted.
+ (JSC::LogicalImmediate::encodeLogicalImmediate): Deleted.
+ * assembler/AssemblerCommon.h:
+ (JSC::ARM64LogicalImmediate::create32):
+ (JSC::ARM64LogicalImmediate::create64):
+ (JSC::ARM64LogicalImmediate::value):
+ (JSC::ARM64LogicalImmediate::isValid):
+ (JSC::ARM64LogicalImmediate::is64bit):
+ (JSC::ARM64LogicalImmediate::ARM64LogicalImmediate):
+ (JSC::ARM64LogicalImmediate::mask):
+ (JSC::ARM64LogicalImmediate::partialHSB):
+ (JSC::ARM64LogicalImmediate::highestSetBit):
+ (JSC::ARM64LogicalImmediate::findBitRange):
+ (JSC::ARM64LogicalImmediate::encodeLogicalImmediate):
+ * assembler/MacroAssemblerARM64.h:
+ (JSC::MacroAssemblerARM64::and64):
+ (JSC::MacroAssemblerARM64::or64):
+ (JSC::MacroAssemblerARM64::xor64):
+ * b3/B3LowerToAir.cpp:
+ (JSC::B3::Air::LowerToAir::bitImm):
+ (JSC::B3::Air::LowerToAir::bitImm64):
+ (JSC::B3::Air::LowerToAir::appendBinOp):
+ * b3/air/AirArg.cpp:
+ (JSC::B3::Air::Arg::dump):
+ (WTF::printInternal):
+ * b3/air/AirArg.h:
+ (JSC::B3::Air::Arg::bitImm):
+ (JSC::B3::Air::Arg::bitImm64):
+ (JSC::B3::Air::Arg::isBitImm):
+ (JSC::B3::Air::Arg::isBitImm64):
+ (JSC::B3::Air::Arg::isSomeImm):
+ (JSC::B3::Air::Arg::value):
+ (JSC::B3::Air::Arg::isGP):
+ (JSC::B3::Air::Arg::isFP):
+ (JSC::B3::Air::Arg::hasType):
+ (JSC::B3::Air::Arg::isValidBitImmForm):
+ (JSC::B3::Air::Arg::isValidBitImm64Form):
+ (JSC::B3::Air::Arg::isValidForm):
+ (JSC::B3::Air::Arg::asTrustedImm32):
+ (JSC::B3::Air::Arg::asTrustedImm64):
+ * b3/air/AirOpcode.opcodes:
+ * b3/air/opcode_generator.rb:
+
2016-02-17 Keith Miller <keith_miller@apple.com>
Spread operator should be allowed when not the first argument of parameter list
int m_value;
};
-class LogicalImmediate {
-public:
- static LogicalImmediate create32(uint32_t value)
- {
- // Check for 0, -1 - these cannot be encoded.
- if (!value || !~value)
- return InvalidLogicalImmediate;
-
- // First look for a 32-bit pattern, then for repeating 16-bit
- // patterns, 8-bit, 4-bit, and finally 2-bit.
-
- unsigned hsb, lsb;
- bool inverted;
- if (findBitRange<32>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<32>(hsb, lsb, inverted);
-
- if ((value & 0xffff) != (value >> 16))
- return InvalidLogicalImmediate;
- value &= 0xffff;
-
- if (findBitRange<16>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<16>(hsb, lsb, inverted);
-
- if ((value & 0xff) != (value >> 8))
- return InvalidLogicalImmediate;
- value &= 0xff;
-
- if (findBitRange<8>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<8>(hsb, lsb, inverted);
-
- if ((value & 0xf) != (value >> 4))
- return InvalidLogicalImmediate;
- value &= 0xf;
-
- if (findBitRange<4>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<4>(hsb, lsb, inverted);
-
- if ((value & 0x3) != (value >> 2))
- return InvalidLogicalImmediate;
- value &= 0x3;
-
- if (findBitRange<2>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<2>(hsb, lsb, inverted);
-
- return InvalidLogicalImmediate;
- }
-
- static LogicalImmediate create64(uint64_t value)
- {
- // Check for 0, -1 - these cannot be encoded.
- if (!value || !~value)
- return InvalidLogicalImmediate;
-
- // Look for a contiguous bit range.
- unsigned hsb, lsb;
- bool inverted;
- if (findBitRange<64>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<64>(hsb, lsb, inverted);
-
- // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
- if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
- return create32(static_cast<uint32_t>(value));
- return InvalidLogicalImmediate;
- }
-
- int value() const
- {
- ASSERT(isValid());
- return m_value;
- }
-
- bool isValid() const
- {
- return m_value != InvalidLogicalImmediate;
- }
-
- bool is64bit() const
- {
- return m_value & (1 << 12);
- }
-
-private:
- LogicalImmediate(int value)
- : m_value(value)
- {
- }
-
- // Generate a mask with bits in the range hsb..0 set, for example:
- // hsb:63 = 0xffffffffffffffff
- // hsb:42 = 0x000007ffffffffff
- // hsb: 0 = 0x0000000000000001
- static uint64_t mask(unsigned hsb)
- {
- ASSERT(hsb < 64);
- return 0xffffffffffffffffull >> (63 - hsb);
- }
-
- template<unsigned N>
- static void partialHSB(uint64_t& value, unsigned&result)
- {
- if (value & (0xffffffffffffffffull << N)) {
- result += N;
- value >>= N;
- }
- }
-
- // Find the bit number of the highest bit set in a non-zero value, for example:
- // 0x8080808080808080 = hsb:63
- // 0x0000000000000001 = hsb: 0
- // 0x000007ffffe00000 = hsb:42
- static unsigned highestSetBit(uint64_t value)
- {
- ASSERT(value);
- unsigned hsb = 0;
- partialHSB<32>(value, hsb);
- partialHSB<16>(value, hsb);
- partialHSB<8>(value, hsb);
- partialHSB<4>(value, hsb);
- partialHSB<2>(value, hsb);
- partialHSB<1>(value, hsb);
- return hsb;
- }
-
- // This function takes a value and a bit width, where value obeys the following constraints:
- // * bits outside of the width of the value must be zero.
- // * bits within the width of value must neither be all clear or all set.
- // The input is inspected to detect values that consist of either two or three contiguous
- // ranges of bits. The output range hsb..lsb will describe the second range of the value.
- // if the range is set, inverted will be false, and if the range is clear, inverted will
- // be true. For example (with width 8):
- // 00001111 = hsb:3, lsb:0, inverted:false
- // 11110000 = hsb:3, lsb:0, inverted:true
- // 00111100 = hsb:5, lsb:2, inverted:false
- // 11000011 = hsb:5, lsb:2, inverted:true
- template<unsigned width>
- static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
- {
- ASSERT(value & mask(width - 1));
- ASSERT(value != mask(width - 1));
- ASSERT(!(value & ~mask(width - 1)));
-
- // Detect cases where the top bit is set; if so, flip all the bits & set invert.
- // This halves the number of patterns we need to look for.
- const uint64_t msb = 1ull << (width - 1);
- if ((inverted = (value & msb)))
- value ^= mask(width - 1);
-
- // Find the highest set bit in value, generate a corresponding mask & flip all
- // bits under it.
- hsb = highestSetBit(value);
- value ^= mask(hsb);
- if (!value) {
- // If this cleared the value, then the range hsb..0 was all set.
- lsb = 0;
- return true;
- }
-
- // Try making one more mask, and flipping the bits!
- lsb = highestSetBit(value);
- value ^= mask(lsb);
- if (!value) {
- // Success - but lsb actually points to the hsb of a third range - add one
- // to get to the lsb of the mid range.
- ++lsb;
- return true;
- }
-
- return false;
- }
-
- // Encodes the set of immN:immr:imms fields found in a logical immediate.
- template<unsigned width>
- static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
- {
- // Check width is a power of 2!
- ASSERT(!(width & (width -1)));
- ASSERT(width <= 64 && width >= 2);
- ASSERT(hsb >= lsb);
- ASSERT(hsb < width);
-
- int immN = 0;
- int imms = 0;
- int immr = 0;
-
- // For 64-bit values this is easy - just set immN to true, and imms just
- // contains the bit number of the highest set bit of the set range. For
- // values with narrower widths, these are encoded by a leading set of
- // one bits, followed by a zero bit, followed by the remaining set of bits
- // being the high bit of the range. For a 32-bit immediate there are no
- // leading one bits, just a zero followed by a five bit number. For a
- // 16-bit immediate there is one one bit, a zero bit, and then a four bit
- // bit-position, etc.
- if (width == 64)
- immN = 1;
- else
- imms = 63 & ~(width + width - 1);
-
- if (inverted) {
- // if width is 64 & hsb is 62, then we have a value something like:
- // 0x80000000ffffffff (in this case with lsb 32).
- // The ror should be by 1, imms (effectively set width minus 1) is
- // 32. Set width is full width minus cleared width.
- immr = (width - 1) - hsb;
- imms |= (width - ((hsb - lsb) + 1)) - 1;
- } else {
- // if width is 64 & hsb is 62, then we have a value something like:
- // 0x7fffffff00000000 (in this case with lsb 32).
- // The value is effectively rol'ed by lsb, which is equivalent to
- // a ror by width - lsb (or 0, in the case where lsb is 0). imms
- // is hsb - lsb.
- immr = (width - lsb) & (width - 1);
- imms |= hsb - lsb;
- }
-
- return immN << 12 | immr << 6 | imms;
- }
-
- static const int InvalidLogicalImmediate = -1;
-
- int m_value;
-};
+typedef ARM64LogicalImmediate LogicalImmediate;
inline uint16_t getHalfword(uint64_t value, int which)
{
return isInt9(value);
}
+class ARM64LogicalImmediate {
+public:
+ static ARM64LogicalImmediate create32(uint32_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // First look for a 32-bit pattern, then for repeating 16-bit
+ // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<32>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+ if ((value & 0xffff) != (value >> 16))
+ return InvalidLogicalImmediate;
+ value &= 0xffff;
+
+ if (findBitRange<16>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+ if ((value & 0xff) != (value >> 8))
+ return InvalidLogicalImmediate;
+ value &= 0xff;
+
+ if (findBitRange<8>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+ if ((value & 0xf) != (value >> 4))
+ return InvalidLogicalImmediate;
+ value &= 0xf;
+
+ if (findBitRange<4>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+ if ((value & 0x3) != (value >> 2))
+ return InvalidLogicalImmediate;
+ value &= 0x3;
+
+ if (findBitRange<2>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+ return InvalidLogicalImmediate;
+ }
+
+ static ARM64LogicalImmediate create64(uint64_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // Look for a contiguous bit range.
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<64>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+ // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+ if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
+ return create32(static_cast<uint32_t>(value));
+ return InvalidLogicalImmediate;
+ }
+
+ int value() const
+ {
+ ASSERT(isValid());
+ return m_value;
+ }
+
+ bool isValid() const
+ {
+ return m_value != InvalidLogicalImmediate;
+ }
+
+ bool is64bit() const
+ {
+ return m_value & (1 << 12);
+ }
+
+private:
+ ARM64LogicalImmediate(int value)
+ : m_value(value)
+ {
+ }
+
+ // Generate a mask with bits in the range hsb..0 set, for example:
+ // hsb:63 = 0xffffffffffffffff
+ // hsb:42 = 0x000007ffffffffff
+ // hsb: 0 = 0x0000000000000001
+ static uint64_t mask(unsigned hsb)
+ {
+ ASSERT(hsb < 64);
+ return 0xffffffffffffffffull >> (63 - hsb);
+ }
+
+ template<unsigned N>
+ static void partialHSB(uint64_t& value, unsigned&result)
+ {
+ if (value & (0xffffffffffffffffull << N)) {
+ result += N;
+ value >>= N;
+ }
+ }
+
+ // Find the bit number of the highest bit set in a non-zero value, for example:
+ // 0x8080808080808080 = hsb:63
+ // 0x0000000000000001 = hsb: 0
+ // 0x000007ffffe00000 = hsb:42
+ static unsigned highestSetBit(uint64_t value)
+ {
+ ASSERT(value);
+ unsigned hsb = 0;
+ partialHSB<32>(value, hsb);
+ partialHSB<16>(value, hsb);
+ partialHSB<8>(value, hsb);
+ partialHSB<4>(value, hsb);
+ partialHSB<2>(value, hsb);
+ partialHSB<1>(value, hsb);
+ return hsb;
+ }
+
+ // This function takes a value and a bit width, where value obeys the following constraints:
+ // * bits outside of the width of the value must be zero.
+ // * bits within the width of value must neither be all clear or all set.
+ // The input is inspected to detect values that consist of either two or three contiguous
+ // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+ // if the range is set, inverted will be false, and if the range is clear, inverted will
+ // be true. For example (with width 8):
+ // 00001111 = hsb:3, lsb:0, inverted:false
+ // 11110000 = hsb:3, lsb:0, inverted:true
+ // 00111100 = hsb:5, lsb:2, inverted:false
+ // 11000011 = hsb:5, lsb:2, inverted:true
+ template<unsigned width>
+ static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+ {
+ ASSERT(value & mask(width - 1));
+ ASSERT(value != mask(width - 1));
+ ASSERT(!(value & ~mask(width - 1)));
+
+ // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+ // This halves the number of patterns we need to look for.
+ const uint64_t msb = 1ull << (width - 1);
+ if ((inverted = (value & msb)))
+ value ^= mask(width - 1);
+
+ // Find the highest set bit in value, generate a corresponding mask & flip all
+ // bits under it.
+ hsb = highestSetBit(value);
+ value ^= mask(hsb);
+ if (!value) {
+ // If this cleared the value, then the range hsb..0 was all set.
+ lsb = 0;
+ return true;
+ }
+
+ // Try making one more mask, and flipping the bits!
+ lsb = highestSetBit(value);
+ value ^= mask(lsb);
+ if (!value) {
+ // Success - but lsb actually points to the hsb of a third range - add one
+ // to get to the lsb of the mid range.
+ ++lsb;
+ return true;
+ }
+
+ return false;
+ }
+
+ // Encodes the set of immN:immr:imms fields found in a logical immediate.
+ template<unsigned width>
+ static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+ {
+ // Check width is a power of 2!
+ ASSERT(!(width & (width -1)));
+ ASSERT(width <= 64 && width >= 2);
+ ASSERT(hsb >= lsb);
+ ASSERT(hsb < width);
+
+ int immN = 0;
+ int imms = 0;
+ int immr = 0;
+
+ // For 64-bit values this is easy - just set immN to true, and imms just
+ // contains the bit number of the highest set bit of the set range. For
+ // values with narrower widths, these are encoded by a leading set of
+ // one bits, followed by a zero bit, followed by the remaining set of bits
+ // being the high bit of the range. For a 32-bit immediate there are no
+ // leading one bits, just a zero followed by a five bit number. For a
+ // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+ // bit-position, etc.
+ if (width == 64)
+ immN = 1;
+ else
+ imms = 63 & ~(width + width - 1);
+
+ if (inverted) {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x80000000ffffffff (in this case with lsb 32).
+ // The ror should be by 1, imms (effectively set width minus 1) is
+ // 32. Set width is full width minus cleared width.
+ immr = (width - 1) - hsb;
+ imms |= (width - ((hsb - lsb) + 1)) - 1;
+ } else {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x7fffffff00000000 (in this case with lsb 32).
+ // The value is effectively rol'ed by lsb, which is equivalent to
+ // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+ // is hsb - lsb.
+ immr = (width - lsb) & (width - 1);
+ imms |= hsb - lsb;
+ }
+
+ return immN << 12 | immr << 6 | imms;
+ }
+
+ static const int InvalidLogicalImmediate = -1;
+
+ int m_value;
+};
+
+
} // namespace JSC.
#endif // AssemblerCommon_h
m_assembler.and_<64>(dest, src1, src2);
}
+ void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, src, dataTempRegister);
+ }
+
void and64(RegisterID src, RegisterID dest)
{
m_assembler.and_<64>(dest, dest, src);
signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.orr<64>(dest, src, dataTempRegister);
}
-
+
+ void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<64>(dest, src, dataTempRegister);
+ }
+
void or64(TrustedImm64 imm, RegisterID dest)
{
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
xor64(imm, dest, dest);
}
+ void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn<64>(dest, src);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.eor<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<64>(dest, src, dataTempRegister);
+ }
+ }
+
void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
if (imm.m_value == -1)
return Arg();
}
+ Arg bitImm(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImmForm(intValue))
+ return Arg::bitImm(intValue);
+ }
+ return Arg();
+ }
+
+ Arg bitImm64(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImm64Form(intValue))
+ return Arg::bitImm64(intValue);
+ }
+ return Arg();
+ }
+
Arg immOrTmp(Value* value)
{
if (Arg result = imm(value))
}
}
+ if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
+ if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm64(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm64(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
append(opcode, tmp(left), imm(right), result);
return;
if (imm(value.value()))
arg = imm(value.value());
else if (value.value()->hasInt64())
- arg = Arg::imm64(value.value()->asInt64());
+ arg = Arg::bigImm(value.value()->asInt64());
else if (value.value()->hasDouble() && canBeInternal(value.value())) {
commitInternal(value.value());
- arg = Arg::imm64(bitwise_cast<int64_t>(value.value()->asDouble()));
+ arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble()));
} else
arg = tmp(value.value());
break;
if (imm(m_value))
append(Move, imm(m_value), tmp(m_value));
else
- append(Move, Arg::imm64(m_value->asInt()), tmp(m_value));
+ append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
return;
}
switch (arg.kind()) {
case Arg::Tmp:
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
break;
default:
if (!arg.isStackMemory())
return ValueRep::reg(arg.reg());
break;
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
return ValueRep::constant(arg.value());
break;
case Arg::Addr:
case Imm:
out.print("$", m_offset);
return;
- case Imm64:
+ case BigImm:
+ out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
+ return;
+ case BitImm:
+ out.print("$", m_offset);
+ return;
+ case BitImm64:
out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
return;
case Addr:
case Arg::Imm:
out.print("Imm");
return;
- case Arg::Imm64:
- out.print("Imm64");
+ case Arg::BigImm:
+ out.print("BigImm");
+ return;
+ case Arg::BitImm:
+ out.print("BitImm");
+ return;
+ case Arg::BitImm64:
+ out.print("BitImm64");
return;
case Arg::Addr:
out.print("Addr");
Tmp,
// This is an immediate that the instruction will materialize. Imm is the immediate that can be
- // inlined into most instructions, while Imm64 indicates a constant materialization and is
+ // inlined into most instructions, while BigImm indicates a constant materialization and is
// usually only usable with Move. Specials may also admit it, for example for stackmaps used for
// OSR exit and tail calls.
+ // BitImm is an immediate for Bitwise operation (And, Xor, etc).
Imm,
- Imm64,
+ BigImm,
+ BitImm,
+ BitImm64,
// These are the addresses. Instructions may load from (Use), store to (Def), or evaluate
// (UseAddr) addresses.
return result;
}
- static Arg imm64(int64_t value)
+ static Arg bigImm(int64_t value)
{
Arg result;
- result.m_kind = Imm64;
+ result.m_kind = BigImm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bitImm(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bitImm64(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm64;
result.m_offset = value;
return result;
}
static Arg immPtr(const void* address)
{
- return imm64(bitwise_cast<intptr_t>(address));
+ return bigImm(bitwise_cast<intptr_t>(address));
}
static Arg addr(Air::Tmp base, int32_t offset = 0)
return kind() == Imm;
}
- bool isImm64() const
+ bool isBigImm() const
+ {
+ return kind() == BigImm;
+ }
+
+ bool isBitImm() const
+ {
+ return kind() == BitImm;
+ }
+
+ bool isBitImm64() const
{
- return kind() == Imm64;
+ return kind() == BitImm64;
}
bool isSomeImm() const
{
- return isImm() || isImm64();
+ return isImm() || isBigImm() || isBitImm() || isBitImm64();
}
bool isAddr() const
int64_t value() const
{
- ASSERT(kind() == Imm || kind() == Imm64);
+ ASSERT(isSomeImm());
return m_offset;
}
void* pointerValue() const
{
- ASSERT(kind() == Imm64);
+ ASSERT(kind() == BigImm);
return bitwise_cast<void*>(static_cast<intptr_t>(m_offset));
}
{
switch (kind()) {
case Imm:
- case Imm64:
+ case BigImm:
+ case BitImm:
+ case BitImm64:
case Addr:
case Index:
case Stack:
{
switch (kind()) {
case Imm:
+ case BitImm:
+ case BitImm64:
case RelCond:
case ResCond:
case DoubleCond:
case Index:
case Stack:
case CallArg:
- case Imm64: // Yes, we allow Imm64 as a double immediate. We use this for implementing stackmaps.
+ case BigImm: // Yes, we allow BigImm as a double immediate. We use this for implementing stackmaps.
return true;
case Tmp:
return isFPTmp();
{
switch (kind()) {
case Imm:
+ case BitImm:
+ case BitImm64:
case Special:
case Tmp:
return true;
return false;
}
+ static bool isValidBitImmForm(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create32(value).isValid();
+ return false;
+ }
+
+ static bool isValidBitImm64Form(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create64(value).isValid();
+ return false;
+ }
+
static bool isValidAddrForm(int32_t offset, Optional<Width> width = Nullopt)
{
if (isX86())
return true;
case Imm:
return isValidImmForm(value());
- case Imm64:
+ case BigImm:
return true;
+ case BitImm:
+ return isValidBitImmForm(value());
+ case BitImm64:
+ return isValidBitImm64Form(value());
case Addr:
case Stack:
case CallArg:
MacroAssembler::TrustedImm32 asTrustedImm32() const
{
- ASSERT(isImm());
+ ASSERT(isImm() || isBitImm());
return MacroAssembler::TrustedImm32(static_cast<int32_t>(m_offset));
}
#if USE(JSVALUE64)
MacroAssembler::TrustedImm64 asTrustedImm64() const
{
- ASSERT(isImm64());
+ ASSERT(isBigImm() || isBitImm64());
return MacroAssembler::TrustedImm64(value());
}
#endif
MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
{
if (is64Bit())
- ASSERT(isImm64());
+ ASSERT(isBigImm());
else
ASSERT(isImm());
return MacroAssembler::TrustedImmPtr(pointerValue());
if (is32Bit())
break;
return false;
- case Arg::Imm64:
+ case Arg::BigImm:
if (is64Bit())
break;
return false;
{
switch (inst.args[calleeArgOffset].kind()) {
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
jit.call(scratchRegister);
break;
else {
ASSERT(pair.src().isSomeImm());
ASSERT(move == Move32);
- result.append(Inst(Move, origin, Arg::imm64(pair.src().value()), scratch));
+ result.append(Inst(Move, origin, Arg::bigImm(pair.src().value()), scratch));
}
result.append(Inst(moveForWidth(pair.width()), origin, scratch, pair.dst()));
returnScratch(scratchIndex, scratch);
if (Arg::isValidImmForm(alias->constant))
arg = Arg::imm(alias->constant);
else
- arg = Arg::imm64(alias->constant);
+ arg = Arg::bigImm(alias->constant);
didThings = true;
return;
}
# Argument kinds:
# Tmp => temporary or register
# Imm => 32-bit immediate int
-# Imm64 => TrustedImm64
+# BigImm => TrustedImm64
# Addr => address as temporary/register+offset
# Index => BaseIndex address
# Abs => AbsoluteAddress
And32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
64: And64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
x86_64: And64 U:G:64, UD:G:64
Tmp, Tmp
Or32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
64: Or64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
64: Or64 U:G:64, UD:G:64
Tmp, Tmp
Xor32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
64: Xor64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
64: Xor64 U:G:64, UD:G:64
Tmp, Tmp
Move U:G:Ptr, D:G:Ptr
Tmp, Tmp
Imm, Tmp as signExtend32ToPtr
- Imm64, Tmp
+ BigImm, Tmp
Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
Index, Tmp as loadPtr
Tmp, Addr as storePtr
end
def isKind(token)
- token =~ /\A((Tmp)|(Imm)|(Imm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
+ token =~ /\A((Tmp)|(Imm)|(BigImm)|(BitImm)|(BitImm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
end
def isArch(token)
def consumeKind
result = token.string
- parseError("Expected kind (Imm, Imm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
+ parseError("Expected kind (Imm, BigImm, BitImm, BitImm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
advance
result
end
parseError("Form has wrong number of arguments for overload") unless kinds.length == signature.length
kinds.each_with_index {
| kind, index |
- if kind.name == "Imm" or kind.name == "Imm64"
+ if kind.name == "Imm" or kind.name == "BigImm" or kind.name == "BitImm" or kind.name == "BitImm64"
if signature[index].role != "U"
parseError("Form has an immediate for a non-use argument")
end
outp.puts "switch (#{columnGetter[columnIndex]}) {"
groups.each_pair {
| key, value |
- outp.puts "#if USE(JSVALUE64)" if key == "Imm64"
+ outp.puts "#if USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
Kind.argKinds(key).each {
| argKind |
outp.puts "case Arg::#{argKind}:"
}
matchForms(outp, speed, value, columnIndex + 1, columnGetter, filter, callback)
outp.puts "break;"
- outp.puts "#endif // USE(JSVALUE64)" if key == "Imm64"
+ outp.puts "#endif // USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
}
outp.puts "default:"
outp.puts "break;"
when "Imm"
outp.puts "if (!Arg::isValidImmForm(args[#{index}].value()))"
outp.puts "OPGEN_RETURN(false);"
+ when "BitImm"
+ outp.puts "if (!Arg::isValidBitImmForm(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "BitImm64"
+ outp.puts "if (!Arg::isValidBitImm64Form(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
when "Addr"
if arg.role == "UA"
outp.puts "if (args[#{index}].isStack() && args[#{index}].stackSlot()->isSpill())"
when "Index"
outp.puts "if (!Arg::isValidIndexForm(args[#{index}].scale(), args[#{index}].offset(), #{arg.widthCode}))"
outp.puts "OPGEN_RETURN(false);"
- when "Imm64"
+ when "BigImm"
when "RelCond"
when "ResCond"
when "DoubleCond"
else
outp.print "args[#{index}].fpr()"
end
- when "Imm"
+ when "Imm", "BitImm"
outp.print "args[#{index}].asTrustedImm32()"
- when "Imm64"
+ when "BigImm", "BitImm64"
outp.print "args[#{index}].asTrustedImm64()"
when "Addr"
outp.print "args[#{index}].asAddress()"
(*map)[value] = new T(value);
T* ptr = (*map)[value];
- block->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(ptr)), scratch);
+ block->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(ptr)), scratch);
block->append(move, nullptr, Arg::addr(scratch), tmp);
}
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
int64_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
int64_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
int64_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
int64_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
int32_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
int32_t things[3];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
Vector<int32_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move32, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int32_t)));
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
}
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
int32_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
int64_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
int64_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
double things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
double things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));