+2010-01-04 Maciej Stachowiak <mjs@apple.com>
+
+ Reviewed by Adam Barth.
+
+ Reorganize, document and rename CPU() platform macros.
+ https://bugs.webkit.org/show_bug.cgi?id=33145
+ ExecutableAllocatorSymbian appears to have buggy ARM version check
+ https://bugs.webkit.org/show_bug.cgi?id=33138
+
+ * wtf/Platform.h:
+ Rename all macros related to detection of particular CPUs or
+ classes of CPUs to CPU(), reorganize and document them.
+
+ All remaining changes are adapting to the renames, plus fixing the
+ second bug cited above.
+
+ * assembler/ARMAssembler.cpp:
+ * assembler/ARMAssembler.h:
+ * assembler/ARMv7Assembler.h:
+ * assembler/AbstractMacroAssembler.h:
+ (JSC::AbstractMacroAssembler::Imm32::Imm32):
+ * assembler/MacroAssembler.h:
+ * assembler/MacroAssemblerARM.cpp:
+ * assembler/MacroAssemblerARM.h:
+ * assembler/MacroAssemblerCodeRef.h:
+ (JSC::MacroAssemblerCodePtr::MacroAssemblerCodePtr):
+ * assembler/MacroAssemblerX86.h:
+ * assembler/MacroAssemblerX86Common.h:
+ * assembler/MacroAssemblerX86_64.h:
+ * assembler/X86Assembler.h:
+ (JSC::X86Registers::):
+ (JSC::X86Assembler::):
+ (JSC::X86Assembler::movl_mEAX):
+ (JSC::X86Assembler::movl_EAXm):
+ (JSC::X86Assembler::repatchLoadPtrToLEA):
+ (JSC::X86Assembler::X86InstructionFormatter::memoryModRM):
+ * jit/ExecutableAllocator.h:
+ * jit/ExecutableAllocatorFixedVMPool.cpp:
+ * jit/ExecutableAllocatorPosix.cpp:
+ * jit/ExecutableAllocatorSymbian.cpp:
+ (JSC::ExecutableAllocator::intializePageSize):
+ * jit/JIT.cpp:
+ * jit/JIT.h:
+ * jit/JITArithmetic.cpp:
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::beginUninterruptedSequence):
+ (JSC::JIT::restoreArgumentReferenceForTrampoline):
+ (JSC::JIT::emitCount):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITStubs.cpp:
+ (JSC::JITThunks::JITThunks):
+ * jit/JITStubs.h:
+ * runtime/Collector.cpp:
+ (JSC::currentThreadStackBase):
+ (JSC::getPlatformThreadRegisters):
+ (JSC::otherThreadStackPointer):
+ * wrec/WREC.h:
+ * wrec/WRECGenerator.cpp:
+ (JSC::WREC::Generator::generateEnter):
+ (JSC::WREC::Generator::generateReturnSuccess):
+ (JSC::WREC::Generator::generateReturnFailure):
+ * wrec/WRECGenerator.h:
+ * wtf/FastMalloc.cpp:
+ * wtf/TCSpinLock.h:
+ (TCMalloc_SpinLock::Lock):
+ (TCMalloc_SpinLock::Unlock):
+ (TCMalloc_SlowLock):
+ * wtf/Threading.h:
+ * wtf/dtoa.cpp:
+ * yarr/RegexJIT.cpp:
+ (JSC::Yarr::RegexGenerator::generateEnter):
+ (JSC::Yarr::RegexGenerator::generateReturn):
+ * yarr/RegexJIT.h:
+
2010-01-04 Maciej Stachowiak <mjs@apple.com>
Reviewed by Adam Barth.
#include "config.h"
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "ARMAssembler.h"
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "AssemblerBufferWithConstantPool.h"
#include <wtf/Assertions.h>
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif // ARMAssembler_h
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#include "AssemblerBuffer.h"
#include <wtf/Assertions.h>
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#endif // ARMAssembler_h
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
-#if PLATFORM(ARM)
+#if CPU(ARM)
, m_isPointer(false)
#endif
{
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
-#if PLATFORM(ARM)
+#if CPU(ARM)
, m_isPointer(true)
#endif
{
#endif
int32_t m_value;
-#if PLATFORM(ARM)
+#if CPU(ARM)
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
#if ENABLE(ASSEMBLER)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
#include "MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
#include "MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
-#elif PLATFORM(X86)
+#elif CPU(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
#include "MacroAssemblerX86_64.h"
namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
using MacroAssemblerBase::branch16;
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
using MacroAssemblerBase::branchPtr;
using MacroAssemblerBase::branchTestPtr;
#endif
// Ptr methods
// On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
-#if !PLATFORM(X86_64)
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
void addPtr(RegisterID src, RegisterID dest)
{
add32(src, dest);
#include "config.h"
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "MacroAssemblerARM.h"
const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
-#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
{
ARMWord op2;
}
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "ARMAssembler.h"
#include "AbstractMacroAssembler.h"
m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
-#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+#if CPU(ARMV5_OR_LOWER)
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
#else
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
}
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif // MacroAssemblerARM_h
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
// into the processor are decorated with the bottom bit set, indicating that this is
// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
}
explicit MacroAssemblerCodePtr(void* value)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// Decorate the pointer as a thumb code pointer.
: m_value(reinterpret_cast<char*>(value) + 1)
#else
}
void* executableAddress() const { return m_value; }
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// To use this pointer as a data address remove the decoration.
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
#else
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(X86)
+#if ENABLE(ASSEMBLER) && CPU(X86)
#include "MacroAssemblerX86Common.h"
m_assembler.movl_i32r(imm.m_value, dest);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void move(RegisterID src, RegisterID dest)
{
// Note: on 64-bit this is is a full register move; perhaps it would be
// x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
friend class MacroAssemblerX86;
-#if PLATFORM(X86)
+#if CPU(X86)
#if PLATFORM(MAC)
// All X86 Macs are guaranteed to support at least SSE2,
static SSE2CheckState s_sse2CheckState;
#endif // PLATFORM(MAC)
-#elif !defined(NDEBUG) // PLATFORM(X86)
+#elif !defined(NDEBUG) // CPU(X86)
// On x86-64 we should never be checking for SSE2 in a non-debug build,
// but non debug add this method to keep the asserts above happy.
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
#include "MacroAssemblerX86Common.h"
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
#include <stdint.h>
esi,
edi,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
r8,
r9,
r10,
OP_XOR_GvEv = 0x33,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
PRE_REX = 0x40,
#endif
OP_PUSH_EAX = 0x50,
OP_POP_EAX = 0x58,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
OP_MOVSXD_GvEv = 0x63,
#endif
PRE_OPERAND_SIZE = 0x66,
// Arithmetic operations:
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void adcl_im(int imm, void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void addq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void andq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void orq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void subq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xorq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void sarq_CLr(RegisterID dst)
{
m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void cmpq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void testq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xchgq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
void movl_mEAX(void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
void movl_EAXm(void* addr)
{
m_formatter.oneByteOp(OP_MOV_OvEAX);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
#endif
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
{
m_formatter.oneByteOp(OP_LEA, dst, base, offset);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void leaq_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void cvtsi2sd_mr(void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void movsd_mr(void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
static void repatchLoadPtrToLEA(void* where)
{
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
// Skip over the prefix byte.
where = reinterpret_cast<char*>(where) + 1;
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
}
#endif
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// Quad-word-sized operands:
//
// Used to format 64-bit operantions, planting a REX.w prefix.
static const RegisterID noBase = X86Registers::ebp;
static const RegisterID hasSib = X86Registers::esp;
static const RegisterID noIndex = X86Registers::esp;
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
static const RegisterID noBase2 = X86Registers::r13;
static const RegisterID hasSib2 = X86Registers::r12;
void memoryModRM(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
m_buffer.putIntUnchecked(offset);
}
} else {
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
void memoryModRM_disp32(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
{
ASSERT(index != noIndex);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
}
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void memoryModRM(int reg, void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
#endif // X86Assembler_h
#endif
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
static void cacheFlush(void*, size_t)
{
}
-#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)
+#elif CPU(ARM_THUMB2) && PLATFORM(IPHONE)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
sys_icache_invalidate(code, size);
}
-#elif PLATFORM(ARM_THUMB2) && PLATFORM(LINUX)
+#elif CPU(ARM_THUMB2) && PLATFORM(LINUX)
static void cacheFlush(void* code, size_t size)
{
asm volatile (
{
User::IMB_Range(code, static_cast<char*>(code) + size);
}
-#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX)
+#elif CPU(ARM_TRADITIONAL) && PLATFORM(LINUX)
static void cacheFlush(void* code, size_t size)
{
asm volatile (
#include <errno.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64)
+#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && CPU(X86_64)
#include "TCSpinLock.h"
#include <mach/mach_init.h>
namespace JSC {
-#if !(PLATFORM(MAC) && PLATFORM(X86_64))
+#if !(PLATFORM(MAC) && CPU(X86_64))
void ExecutableAllocator::intializePageSize()
{
ASSERT_UNUSED(result, !result);
}
-#endif // !(PLATFORM(MAC) && PLATFORM(X86_64))
+#endif // !(PLATFORM(MAC) && CPU(X86_64))
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
void ExecutableAllocator::intializePageSize()
{
-#if PLATFORM_ARM_ARCH(5)
+#if CPU(ARMV5_OR_LOWER)
// The moving memory model (as used in ARMv5 and earlier platforms)
// on Symbian OS limits the number of chunks for each process to 16.
// To mitigate this limitation increase the pagesize to
#include "JIT.h"
// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC)
+#if ENABLE(ASSEMBLER) && CPU(X86) && !PLATFORM(MAC)
#include "MacroAssembler.h"
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
#endif
// on x86/x86-64 it is ecx for performance reasons, since the
// MacroAssembler will need to plant register swaps if it is not -
// however the code will still function correctly.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
static const RegisterID returnValueRegister = X86Registers::eax;
static const RegisterID cachedResultRegister = X86Registers::eax;
static const RegisterID firstArgumentRegister = X86Registers::edi;
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif PLATFORM(X86)
+#elif CPU(X86)
static const RegisterID returnValueRegister = X86Registers::eax;
static const RegisterID cachedResultRegister = X86Registers::eax;
// On x86 we always use fastcall conventions = but on
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
static const RegisterID returnValueRegister = ARMRegisters::r0;
static const RegisterID cachedResultRegister = ARMRegisters::r0;
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
static const RegisterID returnValueRegister = ARMRegisters::r0;
static const RegisterID cachedResultRegister = ARMRegisters::r0;
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
-#if PLATFORM(X86)
+#if CPU(X86)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 7;
static const int patchOffsetPutByIdExternalLoad = 13;
static const int patchOffsetMethodCheckProtoObj = 11;
static const int patchOffsetMethodCheckProtoStruct = 18;
static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 4;
static const int patchOffsetPutByIdExternalLoad = 16;
void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch);
void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
static const int patchOffsetPutByIdExternalLoad = 20;
static const int patchOffsetMethodCheckProtoObj = 20;
static const int patchOffsetMethodCheckProtoStruct = 30;
static const int patchOffsetMethodCheckPutFunction = 50;
-#elif PLATFORM(X86)
+#elif CPU(X86)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 7;
static const int patchOffsetPutByIdExternalLoad = 13;
static const int patchOffsetMethodCheckProtoObj = 11;
static const int patchOffsetMethodCheckProtoStruct = 18;
static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
static const int patchOffsetPutByIdExternalLoad = 26;
static const int patchOffsetMethodCheckProtoObj = 24;
static const int patchOffsetMethodCheckProtoStruct = 34;
static const int patchOffsetMethodCheckPutFunction = 58;
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 4;
static const int patchOffsetPutByIdExternalLoad = 16;
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
stubCall.call(dst);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64)
+#else // CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
{
}
-#endif // PLATFORM(X86) || PLATFORM(X86_64)
+#endif // CPU(X86) || CPU(X86_64)
/* ------------------------------ END: OP_MOD ------------------------------ */
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
stubCall.call(result);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64)
+#else // CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
ASSERT_NOT_REACHED();
}
-#endif // PLATFORM(X86) || PLATFORM(X86_64)
+#endif // CPU(X86) || CPU(X86_64)
/* ------------------------------ END: OP_MOD ------------------------------ */
ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
{
-#if PLATFORM(ARM_TRADITIONAL)
+#if CPU(ARM_TRADITIONAL)
#ifndef NDEBUG
// Ensure the label after the sequence can also fit
insnSpace += sizeof(ARMWord);
#endif
-#if PLATFORM(ARM)
+#if CPU(ARM)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
loadPtr(address, linkRegister);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64)
+#else // CPU(X86) || CPU(X86_64)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
-#if PLATFORM(X86)
+#if CPU(X86)
// Within a trampoline the return address will be on the stack at this point.
addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
move(stackPointerRegister, firstArgumentRegister);
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
#if ENABLE(SAMPLING_COUNTERS)
ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
{
-#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+#if CPU(X86_64) // Or any other 64-bit plattform.
addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
-#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+#elif CPU(X86) // Or any other little-endian 32-bit plattform.
intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
#endif
#if ENABLE(OPCODE_SAMPLING)
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
{
move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
#endif
#if ENABLE(CODEBLOCK_SAMPLING)
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
{
move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
jump(regT0);
-#if PLATFORM(X86) || PLATFORM(ARM_TRADITIONAL)
+#if CPU(X86) || CPU(ARM_TRADITIONAL)
Label nativeCallThunk = align();
preserveReturnAddressAfterCall(regT0);
emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-#if PLATFORM(X86)
+#if CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
// Allocate stack space for our arglist
call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
+#elif CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
#define THUMB_FUNC_PARAM(name)
#endif
-#if PLATFORM(LINUX) && PLATFORM(X86_64)
+#if PLATFORM(LINUX) && CPU(X86_64)
#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
#else
#define SYMBOL_STRING_RELOCATION(name) SYMBOL_STRING(name)
#elif PLATFORM(AIX)
// IBM's own file format
#define HIDE_SYMBOL(name) ".lglobl " #name
-#elif PLATFORM(LINUX) || PLATFORM(FREEBSD) || PLATFORM(OPENBSD) || PLATFORM(SOLARIS) || (PLATFORM(HPUX) && PLATFORM(IA64)) || PLATFORM(SYMBIAN) || PLATFORM(NETBSD)
+#elif PLATFORM(LINUX) || PLATFORM(FREEBSD) || PLATFORM(OPENBSD) || PLATFORM(SOLARIS) || (PLATFORM(HPUX) && CPU(IA64)) || PLATFORM(SYMBIAN) || PLATFORM(NETBSD)
// ELF platform
#define HIDE_SYMBOL(name) ".hidden " #name
#else
#if USE(JSVALUE32_64)
-#if COMPILER(GCC) && PLATFORM(X86)
+#if COMPILER(GCC) && CPU(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(X86_64)
+#elif COMPILER(GCC) && CPU(X86_64)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
"bx lr" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL)
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
}
}
-#endif // COMPILER(GCC) && PLATFORM(X86)
+#endif // COMPILER(GCC) && CPU(X86)
#else // USE(JSVALUE32_64)
-#if COMPILER(GCC) && PLATFORM(X86)
+#if COMPILER(GCC) && CPU(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(X86_64)
+#elif COMPILER(GCC) && CPU(X86_64)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
"bx lr" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL)
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
asm volatile (
".text\n"
}
}
-#endif // COMPILER(GCC) && PLATFORM(X86)
+#endif // COMPILER(GCC) && CPU(X86)
#endif // USE(JSVALUE32_64)
{
JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
} \
} while (0)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
); \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
-#elif PLATFORM(ARM_TRADITIONAL) && COMPILER(GCC)
+#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
#if USE(JSVALUE32_64)
#define THUNK_RETURN_ADDRESS_OFFSET 64
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
};
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
-#elif PLATFORM(X86)
+#elif CPU(X86)
#if COMPILER(MSVC)
#pragma pack(push)
#pragma pack(4)
#if COMPILER(MSVC)
#pragma pack(pop)
#endif // COMPILER(MSVC)
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
struct JITStackFrame {
JITStubArg padding; // Unused
JITStubArg args[7];
#define STUB_ARGS_DECLARATION void** args
#define STUB_ARGS (args)
- #if PLATFORM(X86) && COMPILER(MSVC)
+ #if CPU(X86) && COMPILER(MSVC)
#define JIT_STUB __fastcall
- #elif PLATFORM(X86) && COMPILER(GCC)
+ #elif CPU(X86) && COMPILER(GCC)
#define JIT_STUB __attribute__ ((fastcall))
#else
#define JIT_STUB
#endif
#endif
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
struct VoidPtrPair {
void* first;
void* second;
#if PLATFORM(DARWIN)
pthread_t thread = pthread_self();
return pthread_get_stackaddr_np(thread);
-#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
+#elif PLATFORM(WIN_OS) && CPU(X86) && COMPILER(MSVC)
// offset 0x18 from the FS segment register gives a pointer to
// the thread information block for the current thread
NT_TIB* pTib;
MOV pTib, EAX
}
return static_cast<void*>(pTib->StackBase);
-#elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
+#elif PLATFORM(WIN_OS) && CPU(X86_64) && COMPILER(MSVC)
PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
return reinterpret_cast<void*>(pTib->StackBase);
-#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
+#elif PLATFORM(WIN_OS) && CPU(X86) && COMPILER(GCC)
// offset 0x18 from the FS segment register gives a pointer to
// the thread information block for the current thread
NT_TIB* pTib;
#if PLATFORM(DARWIN)
-#if PLATFORM(X86)
+#if CPU(X86)
typedef i386_thread_state_t PlatformThreadRegisters;
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
typedef x86_thread_state64_t PlatformThreadRegisters;
-#elif PLATFORM(PPC)
+#elif CPU(PPC)
typedef ppc_thread_state_t PlatformThreadRegisters;
-#elif PLATFORM(PPC64)
+#elif CPU(PPC64)
typedef ppc_thread_state64_t PlatformThreadRegisters;
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
typedef arm_thread_state_t PlatformThreadRegisters;
#else
#error Unknown Architecture
#endif
-#elif PLATFORM(WIN_OS)&& PLATFORM(X86)
+#elif PLATFORM(WIN_OS)&& CPU(X86)
typedef CONTEXT PlatformThreadRegisters;
#else
#error Need a thread register struct for this platform
{
#if PLATFORM(DARWIN)
-#if PLATFORM(X86)
+#if CPU(X86)
unsigned user_count = sizeof(regs)/sizeof(int);
thread_state_flavor_t flavor = i386_THREAD_STATE;
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
unsigned user_count = x86_THREAD_STATE64_COUNT;
thread_state_flavor_t flavor = x86_THREAD_STATE64;
-#elif PLATFORM(PPC)
+#elif CPU(PPC)
unsigned user_count = PPC_THREAD_STATE_COUNT;
thread_state_flavor_t flavor = PPC_THREAD_STATE;
-#elif PLATFORM(PPC64)
+#elif CPU(PPC64)
unsigned user_count = PPC_THREAD_STATE64_COUNT;
thread_state_flavor_t flavor = PPC_THREAD_STATE64;
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
unsigned user_count = ARM_THREAD_STATE_COUNT;
thread_state_flavor_t flavor = ARM_THREAD_STATE;
#else
return user_count * sizeof(usword_t);
// end PLATFORM(DARWIN)
-#elif PLATFORM(WIN_OS) && PLATFORM(X86)
+#elif PLATFORM(WIN_OS) && CPU(X86)
regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
GetThreadContext(platformThread, ®s);
return sizeof(CONTEXT);
#if __DARWIN_UNIX03
-#if PLATFORM(X86)
+#if CPU(X86)
return reinterpret_cast<void*>(regs.__esp);
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
return reinterpret_cast<void*>(regs.__rsp);
-#elif PLATFORM(PPC) || PLATFORM(PPC64)
+#elif CPU(PPC) || CPU(PPC64)
return reinterpret_cast<void*>(regs.__r1);
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
return reinterpret_cast<void*>(regs.__sp);
#else
#error Unknown Architecture
#else // !__DARWIN_UNIX03
-#if PLATFORM(X86)
+#if CPU(X86)
return reinterpret_cast<void*>(regs.esp);
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
return reinterpret_cast<void*>(regs.rsp);
-#elif (PLATFORM(PPC) || PLATFORM(PPC64))
+#elif CPU(PPC) || CPU(PPC64)
return reinterpret_cast<void*>(regs.r1);
#else
#error Unknown Architecture
#endif // __DARWIN_UNIX03
// end PLATFORM(DARWIN)
-#elif PLATFORM(X86) && PLATFORM(WIN_OS)
+#elif CPU(X86) && PLATFORM(WIN_OS)
return reinterpret_cast<void*>((uintptr_t) regs.Esp);
#else
#error Need a way to get the stack pointer for another thread on this platform
#include <wtf/unicode/Unicode.h>
-#if COMPILER(GCC) && PLATFORM(X86)
+#if COMPILER(GCC) && CPU(X86)
#define WREC_CALL __attribute__ ((regparm (3)))
#else
#define WREC_CALL
void Generator::generateEnter()
{
-#if PLATFORM(X86)
+#if CPU(X86)
// On x86 edi & esi are callee preserved registers.
push(X86Registers::edi);
push(X86Registers::esi);
store32(index, Address(output, 4)); // match end
// Restore callee save registers.
-#if PLATFORM(X86)
+#if CPU(X86)
pop(X86Registers::esi);
pop(X86Registers::edi);
#endif
pop();
move(Imm32(-1), returnRegister);
-#if PLATFORM(X86)
+#if CPU(X86)
pop(X86Registers::esi);
pop(X86Registers::edi);
#endif
{
}
-#if PLATFORM(X86)
+#if CPU(X86)
static const RegisterID input = X86Registers::eax;
static const RegisterID index = X86Registers::edx;
static const RegisterID length = X86Registers::ecx;
static const RegisterID returnRegister = X86Registers::eax;
#endif
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
static const RegisterID input = X86Registers::edi;
static const RegisterID index = X86Registers::esi;
static const RegisterID length = X86Registers::edx;
};
#if defined(WTF_CHANGES)
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
// can be excluded from the PageMap key.
// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
#endif
+
+/* ==== CPU() - the target CPU architecture ==== */
+
+/* This also defines CPU(BIG_ENDIAN) or CPU(MIDDLE_ENDIAN) or neither, as appropriate. */
+
+
+/* CPU(ALPHA) - DEC Alpha */
+#if defined(__alpha__)
+#define WTF_CPU_ALPHA 1
+#endif
+
+/* CPU(IA64) - Itanium / IA-64 */
+#if defined(__ia64__)
+#define WTF_CPU_IA64 1
+#endif
+
+/* CPU(PPC) - PowerPC 32-bit */
+#if defined(__ppc__) \
+ || defined(__PPC__) \
+ || defined(__powerpc__) \
+ || defined(__powerpc) \
+ || defined(__POWERPC__) \
+ || defined(_M_PPC) \
+ || defined(__PPC)
+#define WTF_CPU_PPC 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(PPC64) - PowerPC 64-bit */
+#if defined(__ppc64__) \
+ || defined(__PPC64__)
+#define WTF_CPU_PPC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SH4) - SuperH SH-4 */
+#if defined(__SH4__)
+#define WTF_CPU_SH4 1
+#endif
+
+/* CPU(SPARC32) - SPARC 32-bit */
+#if defined(__sparc) && !defined(__arch64__) || defined(__sparcv8)
+#define WTF_CPU_SPARC32 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC64) - SPARC 64-bit */
+#if defined(__sparc__) && defined(__arch64__) || defined (__sparcv9)
+#define WTF_CPU_SPARC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC) - any SPARC, true for CPU(SPARC32) and CPU(SPARC64) */
+#if CPU(SPARC32) || CPU(SPARC64)
+#define WTF_CPU_SPARC
+#endif
+
+/* CPU(X86) - i386 / x86 32-bit */
+#if defined(__i386__) \
+ || defined(i386) \
+ || defined(_M_IX86) \
+ || defined(_X86_) \
+ || defined(__THW_INTEL)
+#define WTF_CPU_X86 1
+#endif
+
+/* CPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */
+#if defined(__x86_64__) \
+ || defined(_M_X64)
+#define WTF_CPU_X86_64 1
+#endif
+
+/* CPU(ARM) - ARM, any version*/
+#if defined(arm) \
+ || defined(__arm__)
+#define WTF_CPU_ARM 1
+
+#if defined(__ARMEB__)
+#define WTF_CPU_BIG_ENDIAN 1
+
+#elif !defined(__ARM_EABI__) \
+ && !defined(__EABI__) \
+ && !defined(__VFP_FP__) \
+ && !defined(ANDROID)
+#define WTF_CPU_MIDDLE_ENDIAN 1
+
+#endif
+
+#define WTF_ARM_ARCH_AT_LEAST(N) (CPU(ARM) && WTF_ARM_ARCH_VERSION >= N)
+
+/* Set WTF_ARM_ARCH_VERSION */
+#if defined(__ARM_ARCH_4__) \
+ || defined(__ARM_ARCH_4T__) \
+ || defined(__MARM_ARMV4__) \
+ || defined(_ARMV4I_)
+#define WTF_ARM_ARCH_VERSION 4
+
+#elif defined(__ARM_ARCH_5__) \
+ || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__) \
+ || defined(__MARM_ARMV5__)
+#define WTF_ARM_ARCH_VERSION 5
+
+#elif defined(__ARM_ARCH_6__) \
+ || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARMV6__)
+#define WTF_ARM_ARCH_VERSION 6
+
+#elif defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__)
+#define WTF_ARM_ARCH_VERSION 7
+
+/* RVCT sets _TARGET_ARCH_ARM */
+#elif defined(__TARGET_ARCH_ARM)
+#define WTF_ARM_ARCH_VERSION __TARGET_ARCH_ARM
+
+#else
+#define WTF_ARM_ARCH_VERSION 0
+
+#endif
+
+/* Set WTF_THUMB_ARCH_VERSION */
+#if defined(__ARM_ARCH_4T__)
+#define WTF_THUMB_ARCH_VERSION 1
+
+#elif defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define WTF_THUMB_ARCH_VERSION 2
+
+#elif defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6M__)
+#define WTF_THUMB_ARCH_VERSION 3
+
+#elif defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_7__) \
+ || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7M__)
+#define WTF_THUMB_ARCH_VERSION 4
+
+/* RVCT sets __TARGET_ARCH_THUMB */
+#elif defined(__TARGET_ARCH_THUMB)
+#define WTF_THUMB_ARCH_VERSION __TARGET_ARCH_THUMB
+
+#else
+#define WTF_THUMB_ARCH_VERSION 0
+#endif
+
+
+/* CPU(ARMV5_OR_LOWER) - ARM instruction set v5 or earlier */
+/* On ARMv5 and below the natural alignment is required.
+ And there are some other differences for v5 or earlier. */
+#if !defined(ARMV5_OR_LOWER) && !CPU_ARM_ARCH_AT_LEAST(6)
+#define WTF_CPU_ARMV5_OR_LOWER 1
+#endif
+
+
+/* CPU(ARM_TRADITIONAL) - Thumb2 is not available, only traditional ARM (v4 or greater) */
+/* CPU(ARM_THUMB2) - Thumb2 instruction set is available */
+/* Only one of these will be defined. */
+#if !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2)
+# if defined(thumb2) || defined(__thumb2__) \
+ || ((defined(__thumb) || defined(__thumb__)) && WTF_THUMB_ARCH_VERSION == 4)
+# define WTF_CPU_ARM_TRADITIONAL 0
+# define WTF_CPU_ARM_THUMB2 1
+# elif WTF_ARM_ARCH_AT_LEAST(4)
+# define WTF_CPU_ARM_TRADITIONAL 1
+# define WTF_CPU_ARM_THUMB2 0
+# else
+# error "Not supported ARM architecture"
+# endif
+#elif CPU(ARM_TRADITIONAL) && CPU(ARM_THUMB2) /* Sanity Check */
+# error "Cannot use both of WTF_CPU_ARM_TRADITIONAL and WTF_CPU_ARM_THUMB2 platforms"
+#endif // !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2)
+
+#endif /* ARM */
+
+
+
/* Operating systems - low-level dependencies */
/* PLATFORM(DARWIN) */
#define WTF_PLATFORM_CAIRO 1
#endif
-/* CPU */
-
-/* PLATFORM(PPC) */
-#if defined(__ppc__) \
- || defined(__PPC__) \
- || defined(__powerpc__) \
- || defined(__powerpc) \
- || defined(__POWERPC__) \
- || defined(_M_PPC) \
- || defined(__PPC)
-#define WTF_PLATFORM_PPC 1
-#define WTF_PLATFORM_BIG_ENDIAN 1
-#endif
-
-/* PLATFORM(SPARC32) */
-#if defined(__sparc) && !defined(__arch64__) || defined(__sparcv8)
-#define WTF_PLATFORM_SPARC32 1
-#define WTF_PLATFORM_BIG_ENDIAN 1
-#endif
-
-#if PLATFORM(SPARC32) || PLATFORM(SPARC64)
-#define WTF_PLATFORM_SPARC
-#endif
-
-/* PLATFORM(PPC64) */
-#if defined(__ppc64__) \
- || defined(__PPC64__)
-#define WTF_PLATFORM_PPC64 1
-#define WTF_PLATFORM_BIG_ENDIAN 1
-#endif
-
-/* PLATFORM(ARM) */
-#define PLATFORM_ARM_ARCH(N) (PLATFORM(ARM) && ARM_ARCH_VERSION >= N)
-
-#if defined(arm) \
- || defined(__arm__)
-#define WTF_PLATFORM_ARM 1
-
-#if defined(__ARMEB__)
-#define WTF_PLATFORM_BIG_ENDIAN 1
-
-#elif !defined(__ARM_EABI__) \
- && !defined(__EABI__) \
- && !defined(__VFP_FP__) \
- && !defined(ANDROID)
-#define WTF_PLATFORM_MIDDLE_ENDIAN 1
-
-#endif
-
-/* Set ARM_ARCH_VERSION */
-#if defined(__ARM_ARCH_4__) \
- || defined(__ARM_ARCH_4T__) \
- || defined(__MARM_ARMV4__) \
- || defined(_ARMV4I_)
-#define ARM_ARCH_VERSION 4
-
-#elif defined(__ARM_ARCH_5__) \
- || defined(__ARM_ARCH_5T__) \
- || defined(__ARM_ARCH_5E__) \
- || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__) \
- || defined(__MARM_ARMV5__)
-#define ARM_ARCH_VERSION 5
-
-#elif defined(__ARM_ARCH_6__) \
- || defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6K__) \
- || defined(__ARM_ARCH_6Z__) \
- || defined(__ARM_ARCH_6ZK__) \
- || defined(__ARM_ARCH_6T2__) \
- || defined(__ARMV6__)
-#define ARM_ARCH_VERSION 6
-
-#elif defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__)
-#define ARM_ARCH_VERSION 7
-
-/* RVCT sets _TARGET_ARCH_ARM */
-#elif defined(__TARGET_ARCH_ARM)
-#define ARM_ARCH_VERSION __TARGET_ARCH_ARM
-
-#else
-#define ARM_ARCH_VERSION 0
-
-#endif
-
-/* Set THUMB_ARM_VERSION */
-#if defined(__ARM_ARCH_4T__)
-#define THUMB_ARCH_VERSION 1
-
-#elif defined(__ARM_ARCH_5T__) \
- || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__)
-#define THUMB_ARCH_VERSION 2
-
-#elif defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6K__) \
- || defined(__ARM_ARCH_6Z__) \
- || defined(__ARM_ARCH_6ZK__) \
- || defined(__ARM_ARCH_6M__)
-#define THUMB_ARCH_VERSION 3
-
-#elif defined(__ARM_ARCH_6T2__) \
- || defined(__ARM_ARCH_7__) \
- || defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__) \
- || defined(__ARM_ARCH_7M__)
-#define THUMB_ARCH_VERSION 4
-
-/* RVCT sets __TARGET_ARCH_THUMB */
-#elif defined(__TARGET_ARCH_THUMB)
-#define THUMB_ARCH_VERSION __TARGET_ARCH_THUMB
-
-#else
-#define THUMB_ARCH_VERSION 0
-#endif
-
-/* On ARMv5 and below the natural alignment is required. */
-#if !defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_ARCH_VERSION <= 5
-#define ARM_REQUIRE_NATURAL_ALIGNMENT 1
-#endif
-
-/* Defines two pseudo-platforms for ARM and Thumb-2 instruction set. */
-#if !defined(WTF_PLATFORM_ARM_TRADITIONAL) && !defined(WTF_PLATFORM_ARM_THUMB2)
-# if defined(thumb2) || defined(__thumb2__) \
- || ((defined(__thumb) || defined(__thumb__)) && THUMB_ARCH_VERSION == 4)
-# define WTF_PLATFORM_ARM_TRADITIONAL 0
-# define WTF_PLATFORM_ARM_THUMB2 1
-# elif PLATFORM_ARM_ARCH(4)
-# define WTF_PLATFORM_ARM_TRADITIONAL 1
-# define WTF_PLATFORM_ARM_THUMB2 0
-# else
-# error "Not supported ARM architecture"
-# endif
-#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(ARM_THUMB2) /* Sanity Check */
-# error "Cannot use both of WTF_PLATFORM_ARM_TRADITIONAL and WTF_PLATFORM_ARM_THUMB2 platforms"
-#endif // !defined(ARM_TRADITIONAL) && !defined(ARM_THUMB2)
-#endif /* ARM */
-
-/* PLATFORM(X86) */
-#if defined(__i386__) \
- || defined(i386) \
- || defined(_M_IX86) \
- || defined(_X86_) \
- || defined(__THW_INTEL)
-#define WTF_PLATFORM_X86 1
-#endif
-
-/* PLATFORM(X86_64) */
-#if defined(__x86_64__) \
- || defined(_M_X64)
-#define WTF_PLATFORM_X86_64 1
-#endif
-
-/* PLATFORM(IA64) */
-#if defined(__ia64__)
-#define WTF_PLATFORM_IA64 1
-#endif
-
-/* PLATFORM(ALPHA) */
-#if defined(__alpha__)
-#define WTF_PLATFORM_ALPHA 1
-#endif
-
-/* PLATFORM(SH4) */
-#if defined(__SH4__)
-#define WTF_PLATFORM_SH4 1
-#endif
-
-/* PLATFORM(SPARC64) */
-#if defined(__sparc__) && defined(__arch64__) || defined (__sparcv9)
-#define WTF_PLATFORM_SPARC64 1
-#define WTF_PLATFORM_BIG_ENDIAN 1
-#endif
/* PLATFORM(WINCE) && PLATFORM(QT)
We can not determine the endianess at compile time. For
#define WTF_PLATFORM_CF 1
#define WTF_USE_PTHREADS 1
#define HAVE_PTHREAD_RWLOCK 1
-#if !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_TIGER) && defined(__x86_64__)
+#if !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_TIGER) && CPU(X86_64)
#define WTF_USE_PLUGIN_HOST_PROCESS 1
#endif
#if !defined(ENABLE_MAC_JAVA_BRIDGE)
#endif
#if !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32) && !defined(WTF_USE_JSVALUE32_64)
-#if (PLATFORM(X86_64) && (PLATFORM(UNIX) || PLATFORM(WIN_OS))) || PLATFORM(IA64) || PLATFORM(ALPHA)
+#if (CPU(X86_64) && (PLATFORM(UNIX) || PLATFORM(WIN_OS))) || CPU(IA64) || CPU(ALPHA)
#define WTF_USE_JSVALUE64 1
-#elif PLATFORM(ARM) || PLATFORM(PPC64)
+#elif CPU(ARM) || CPU(PPC64)
#define WTF_USE_JSVALUE32 1
#elif PLATFORM(WIN_OS) && COMPILER(MINGW)
/* Using JSVALUE32_64 causes padding/alignement issues for JITStubArg
#if !defined(ENABLE_JIT)
/* The JIT is tested & working on x86_64 Mac */
-#if PLATFORM(X86_64) && PLATFORM(MAC)
+#if CPU(X86_64) && PLATFORM(MAC)
#define ENABLE_JIT 1
/* The JIT is tested & working on x86 Mac */
-#elif PLATFORM(X86) && PLATFORM(MAC)
+#elif CPU(X86) && PLATFORM(MAC)
#define ENABLE_JIT 1
#define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
-#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)
+#elif CPU(ARM_THUMB2) && PLATFORM(IPHONE)
#define ENABLE_JIT 1
/* The JIT is tested & working on x86 Windows */
-#elif PLATFORM(X86) && PLATFORM(WIN)
+#elif CPU(X86) && PLATFORM(WIN)
#define ENABLE_JIT 1
#endif
#if PLATFORM(QT)
-#if PLATFORM(X86_64) && PLATFORM(DARWIN)
+#if CPU(X86_64) && PLATFORM(DARWIN)
#define ENABLE_JIT 1
-#elif PLATFORM(X86) && PLATFORM(DARWIN)
+#elif CPU(X86) && PLATFORM(DARWIN)
#define ENABLE_JIT 1
#define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
-#elif PLATFORM(X86) && PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100
+#elif CPU(X86) && PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100
#define ENABLE_JIT 1
#define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
-#elif PLATFORM(X86) && PLATFORM(WIN_OS) && COMPILER(MSVC)
+#elif CPU(X86) && PLATFORM(WIN_OS) && COMPILER(MSVC)
#define ENABLE_JIT 1
#define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1
-#elif PLATFORM(X86) && PLATFORM(LINUX) && GCC_VERSION >= 40100
+#elif CPU(X86) && PLATFORM(LINUX) && GCC_VERSION >= 40100
#define ENABLE_JIT 1
#define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
-#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX)
+#elif CPU(ARM_TRADITIONAL) && PLATFORM(LINUX)
#define ENABLE_JIT 1
#endif
#endif /* PLATFORM(QT) */
#endif
#endif
-#if PLATFORM(X86) && COMPILER(MSVC)
+#if CPU(X86) && COMPILER(MSVC)
#define JSC_HOST_CALL __fastcall
-#elif PLATFORM(X86) && COMPILER(GCC)
+#elif CPU(X86) && COMPILER(GCC)
#define JSC_HOST_CALL __attribute__ ((fastcall))
#else
#define JSC_HOST_CALL
#if !defined(ENABLE_YARR_JIT)
/* YARR supports x86 & x86-64, and has been tested on Mac and Windows. */
-#if (PLATFORM(X86) && PLATFORM(MAC)) \
- || (PLATFORM(X86_64) && PLATFORM(MAC)) \
- || (PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)) \
- || (PLATFORM(X86) && PLATFORM(WIN))
+#if (CPU(X86) && PLATFORM(MAC)) \
+ || (CPU(X86_64) && PLATFORM(MAC)) \
+ || (CPU(ARM_THUMB2) && PLATFORM(IPHONE)) \
+ || (CPU(X86) && PLATFORM(WIN))
#define ENABLE_YARR 1
#define ENABLE_YARR_JIT 1
#endif
#if PLATFORM(QT)
-#if (PLATFORM(X86) && PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100) \
- || (PLATFORM(X86) && PLATFORM(WIN_OS) && COMPILER(MSVC)) \
- || (PLATFORM(X86) && PLATFORM(LINUX) && GCC_VERSION >= 40100) \
- || (PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX))
+#if (CPU(X86) && PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100) \
+ || (CPU(X86) && PLATFORM(WIN_OS) && COMPILER(MSVC)) \
+ || (CPU(X86) && PLATFORM(LINUX) && GCC_VERSION >= 40100) \
+ || (CPU(ARM_TRADITIONAL) && PLATFORM(LINUX))
#define ENABLE_YARR 1
#define ENABLE_YARR_JIT 1
#endif
#ifndef TCMALLOC_INTERNAL_SPINLOCK_H__
#define TCMALLOC_INTERNAL_SPINLOCK_H__
-#if (PLATFORM(X86) || PLATFORM(PPC)) && (COMPILER(GCC) || COMPILER(MSVC))
+#if (CPU(X86) || CPU(PPC)) && (COMPILER(GCC) || COMPILER(MSVC))
#include <time.h> /* For nanosleep() */
inline void Lock() {
int r;
#if COMPILER(GCC)
-#if PLATFORM(X86)
+#if CPU(X86)
__asm__ __volatile__
("xchgl %0, %1"
: "=r"(r), "=m"(lockword_)
inline void Unlock() {
#if COMPILER(GCC)
-#if PLATFORM(X86)
+#if CPU(X86)
__asm__ __volatile__
("movl $0, %0"
: "=m"(lockword_)
("isync\n\t"
"eieio\n\t"
"stw %1, %0"
-#if PLATFORM(DARWIN) || PLATFORM(PPC)
+#if PLATFORM(DARWIN) || CPU(PPC)
: "=o" (lockword_)
#else
: "=m" (lockword_)
while (true) {
int r;
#if COMPILER(GCC)
-#if PLATFORM(X86)
+#if CPU(X86)
__asm__ __volatile__
("xchgl %0, %1"
: "=r"(r), "=m"(*lockword)
inline int atomicIncrement(int volatile* addend) { return android_atomic_inc(addend); }
inline int atomicDecrement(int volatile* addend) { return android_atomic_dec(addend); }
-#elif COMPILER(GCC) && !PLATFORM(SPARC64) // sizeof(_Atomic_word) != sizeof(int) on sparc64 gcc
+#elif COMPILER(GCC) && !CPU(SPARC64) // sizeof(_Atomic_word) != sizeof(int) on sparc64 gcc
#define WTF_USE_LOCKFREE_THREADSAFESHARED 1
inline int atomicIncrement(int volatile* addend) { return __gnu_cxx::__exchange_and_add(addend, 1) + 1; }
#pragma warning(disable: 4554)
#endif
-#if PLATFORM(BIG_ENDIAN)
+#if CPU(BIG_ENDIAN)
#define IEEE_MC68k
-#elif PLATFORM(MIDDLE_ENDIAN)
+#elif CPU(MIDDLE_ENDIAN)
#define IEEE_ARM
#else
#define IEEE_8087
#define Pack_32
#endif
-#if PLATFORM(PPC64) || PLATFORM(X86_64)
+#if CPU(PPC64) || CPU(X86_64)
+// FIXME: should we enable this on all 64-bit CPUs?
// 64-bit emulation provided by the compiler is likely to be slower than dtoa own code on 32-bit hardware.
#define USE_LONG_LONG
#endif
class RegexGenerator : private MacroAssembler {
friend void jitCompileRegex(JSGlobalData* globalData, RegexCodeBlock& jitObject, const UString& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
-#if PLATFORM(ARM)
+#if CPU(ARM)
static const RegisterID input = ARMRegisters::r0;
static const RegisterID index = ARMRegisters::r1;
static const RegisterID length = ARMRegisters::r2;
static const RegisterID regT1 = ARMRegisters::r6;
static const RegisterID returnRegister = ARMRegisters::r0;
-#elif PLATFORM(X86)
+#elif CPU(X86)
static const RegisterID input = X86Registers::eax;
static const RegisterID index = X86Registers::edx;
static const RegisterID length = X86Registers::ecx;
static const RegisterID regT1 = X86Registers::esi;
static const RegisterID returnRegister = X86Registers::eax;
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
static const RegisterID input = X86Registers::edi;
static const RegisterID index = X86Registers::esi;
static const RegisterID length = X86Registers::edx;
void generateEnter()
{
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
push(X86Registers::ebp);
move(stackPointerRegister, X86Registers::ebp);
push(X86Registers::ebx);
-#elif PLATFORM(X86)
+#elif CPU(X86)
push(X86Registers::ebp);
move(stackPointerRegister, X86Registers::ebp);
// TODO: do we need spill registers to fill the output pointer if there are no sub captures?
#else
loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), output);
#endif
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
push(ARMRegisters::r4);
push(ARMRegisters::r5);
push(ARMRegisters::r6);
void generateReturn()
{
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
pop(X86Registers::ebx);
pop(X86Registers::ebp);
-#elif PLATFORM(X86)
+#elif CPU(X86)
pop(X86Registers::esi);
pop(X86Registers::edi);
pop(X86Registers::ebx);
pop(X86Registers::ebp);
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
pop(ARMRegisters::r6);
pop(ARMRegisters::r5);
pop(ARMRegisters::r4);
#include <pcre.h>
struct JSRegExp; // temporary, remove when fallback is removed.
-#if PLATFORM(X86) && !COMPILER(MSVC)
+#if CPU(X86) && !COMPILER(MSVC)
#define YARR_CALL __attribute__ ((regparm (3)))
#else
#define YARR_CALL
+2010-01-04 Maciej Stachowiak <mjs@apple.com>
+
+ Reviewed by Adam Barth.
+
+ Reorganize, document and rename CPU() platform macros.
+ https://bugs.webkit.org/show_bug.cgi?id=33145
+
+ * page/NavigatorBase.cpp:
+ * platform/text/AtomicString.cpp:
+ (WebCore::equal):
+ * platform/text/StringHash.h:
+ (WebCore::StringHash::equal):
+
2009-12-22 Philippe Normand <pnormand@igalia.com>
Reviewed by Eric Seidel.
#endif
#ifndef WEBCORE_NAVIGATOR_PLATFORM
-#if PLATFORM(MAC) && (PLATFORM(PPC) || PLATFORM(PPC64))
+#if PLATFORM(MAC) && (CPU(PPC) || CPU(PPC64))
#define WEBCORE_NAVIGATOR_PLATFORM "MacPPC"
-#elif PLATFORM(MAC) && (PLATFORM(X86) || PLATFORM(X86_64))
+#elif PLATFORM(MAC) && (CPU(X86) || CPU(X86_64))
#define WEBCORE_NAVIGATOR_PLATFORM "MacIntel"
#elif PLATFORM(WIN_OS)
#define WEBCORE_NAVIGATOR_PLATFORM "Win32"
if (string->length() != length)
return false;
-#if PLATFORM(ARM) || PLATFORM(SH4)
+ // FIXME: perhaps we should have a more abstract macro that indicates when
+ // going 4 bytes at a time is unsafe
+#if CPU(ARM) || CPU(SH4)
const UChar* stringCharacters = string->characters();
for (unsigned i = 0; i != length; ++i) {
if (*stringCharacters++ != *characters++)
if (aLength != bLength)
return false;
-#if PLATFORM(ARM) || PLATFORM(SH4)
+ // FIXME: perhaps we should have a more abstract macro that indicates when
+ // going 4 bytes at a time is unsafe
+#if CPU(ARM) || CPU(SH4)
const UChar* aChars = a->characters();
const UChar* bChars = b->characters();
for (unsigned i = 0; i != aLength; ++i) {
+2010-01-04 Maciej Stachowiak <mjs@apple.com>
+
+ Reviewed by Adam Barth.
+
+ Reorganize, document and rename CPU() platform macros.
+ https://bugs.webkit.org/show_bug.cgi?id=33145
+
+ * webkit/webkitwebsettings.cpp:
+ (webkit_get_user_agent):
+
2009-12-20 Gustavo Noronha Silva <gustavo.noronha@collabora.co.uk>
Reviewed by Xan Lopez.
// FIXME: platform/version detection can be shared.
#if PLATFORM(DARWIN)
-#if PLATFORM(X86)
+#if CPU(X86)
osVersion = g_strdup("Intel Mac OS X");
#else
osVersion = g_strdup("PPC Mac OS X");