Reviewed by Geoffrey Garen. (Patch by Cameron Zwarich <zwarich@apple.com>.)
Fixed https://bugs.webkit.org/show_bug.cgi?id=22482
REGRESSION (r37991): Occasionally see "Scene rendered incorrectly"
message when running the V8 Raytrace benchmark
Rolled out r37991. It didn't properly save xmm0, which is caller-save,
before calling helper functions.
SunSpider and v8 benchmarks show little change -- possibly a .2%
SunSpider regression, possibly a .2% v8 benchmark speedup.
* assembler/X86Assembler.h:
(JSC::X86Assembler::):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dump):
* bytecode/Instruction.h:
(JSC::Instruction::):
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::emitUnaryOp):
* bytecompiler/BytecodeGenerator.h:
(JSC::BytecodeGenerator::emitToJSNumber):
(JSC::BytecodeGenerator::emitTypeOf):
(JSC::BytecodeGenerator::emitGetPropertyNames):
* interpreter/Interpreter.cpp:
(JSC::Interpreter::privateExecute):
* interpreter/Interpreter.h:
* jit/JIT.cpp:
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
* jit/JIT.h:
* parser/Nodes.cpp:
(JSC::UnaryOpNode::emitBytecode):
(JSC::BinaryOpNode::emitBytecode):
(JSC::EqualNode::emitBytecode):
* parser/ResultType.h:
(JSC::ResultType::isReusable):
(JSC::ResultType::mightBeNumber):
* runtime/JSNumberCell.h:
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@38917
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2008-12-02 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Geoffrey Garen. (Patch by Cameron Zwarich <zwarich@apple.com>.)
+
+ Fixed https://bugs.webkit.org/show_bug.cgi?id=22482
+ REGRESSION (r37991): Occasionally see "Scene rendered incorrectly"
+ message when running the V8 Raytrace benchmark
+
+ Rolled out r37991. It didn't properly save xmm0, which is caller-save,
+ before calling helper functions.
+
+ SunSpider and v8 benchmarks show little change -- possibly a .2%
+ SunSpider regression, possibly a .2% v8 benchmark speedup.
+
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::dump):
+ * bytecode/Instruction.h:
+ (JSC::Instruction::):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::emitUnaryOp):
+ * bytecompiler/BytecodeGenerator.h:
+ (JSC::BytecodeGenerator::emitToJSNumber):
+ (JSC::BytecodeGenerator::emitTypeOf):
+ (JSC::BytecodeGenerator::emitGetPropertyNames):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::privateExecute):
+ * interpreter/Interpreter.h:
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompileSlowCases):
+ * jit/JIT.h:
+ * parser/Nodes.cpp:
+ (JSC::UnaryOpNode::emitBytecode):
+ (JSC::BinaryOpNode::emitBytecode):
+ (JSC::EqualNode::emitBytecode):
+ * parser/ResultType.h:
+ (JSC::ResultType::isReusable):
+ (JSC::ResultType::mightBeNumber):
+ * runtime/JSNumberCell.h:
+
2008-12-01 Gavin Barraclough <barraclough@apple.com>
Reviewed by Geoffrey Garen.
OP2_CVTSI2SD_VsdEd = 0x2A,
OP2_CVTTSD2SI_GdWsd = 0x2C,
OP2_UCOMISD_VsdWsd = 0x2E,
- OP2_XORPD_VsdWsd = 0x57,
OP2_ADDSD_VsdWsd = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_SUBSD_VsdWsd = 0x5C,
GROUP2_OP_SAR = 7,
GROUP3_OP_TEST = 0,
- GROUP3_OP_NEG = 3,
GROUP3_OP_IDIV = 7,
GROUP5_OP_CALLN = 2,
modRm_opr(GROUP3_OP_IDIV, dst);
}
- void negl_r(RegisterID dst)
- {
- m_buffer->putByte(OP_GROUP3_Ev);
- modRm_opr(GROUP3_OP_NEG, dst);
- }
-
void cdq()
{
m_buffer->putByte(OP_CDQ);
modRm_rm((RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
- void xorpd_mr(void* addr, XMMRegisterID dst)
- {
- m_buffer->putByte(PRE_SSE_66);
- m_buffer->putByte(OP_2BYTE_ESCAPE);
- m_buffer->putByte(OP2_XORPD_VsdWsd);
- modRm_rm((RegisterID)dst, addr);
- }
-#endif
-
void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
{
m_buffer->putByte(PRE_SSE_F2);
}
case op_negate: {
printUnaryOp(location, it, "negate");
- ++it;
break;
}
case op_add: {
#define Instruction_h
#include "Opcode.h"
-#include "ResultType.h"
#include <wtf/VectorTraits.h>
#define POLYMORPHIC_LIST_CACHE_SIZE 4
Structure* structure;
StructureChain* structureChain;
JSCell* jsCell;
- ResultType::Type resultType;
PolymorphicAccessStructureList* polymorphicStructures;
} u;
};
return dst;
}
-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, ResultType type)
+RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
{
emitOpcode(opcodeID);
instructions().append(dst->index());
instructions().append(src->index());
- if (opcodeID == op_negate)
- instructions().append(type.toInt());
return dst;
}
RegisterID* emitUnexpectedLoad(RegisterID* dst, bool);
RegisterID* emitUnexpectedLoad(RegisterID* dst, double);
- RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, ResultType);
+ RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src);
RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes);
RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2);
RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src);
RegisterID* emitMove(RegisterID* dst, RegisterID* src);
- RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src, ResultType::unknown()); }
+ RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src); }
RegisterID* emitPreInc(RegisterID* srcDst);
RegisterID* emitPreDec(RegisterID* srcDst);
RegisterID* emitPostInc(RegisterID* dst, RegisterID* srcDst);
RegisterID* emitPostDec(RegisterID* dst, RegisterID* srcDst);
RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype);
- RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src, ResultType::unknown()); }
+ RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); }
RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); }
RegisterID* emitResolve(RegisterID* dst, const Identifier& property);
PassRefPtr<Label> emitJumpSubroutine(RegisterID* retAddrDst, Label*);
void emitSubroutineReturn(RegisterID* retAddrSrc);
- RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base) { return emitUnaryOp(op_get_pnames, dst, base, ResultType::unknown()); }
+ RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base) { return emitUnaryOp(op_get_pnames, dst, base); }
RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* iter, Label* target);
RegisterID* emitCatch(RegisterID*, Label* start, Label* end);
callFrame[dst] = result;
}
- ++vPC;
NEXT_INSTRUCTION();
}
DEFINE_OPCODE(op_add) {
static JSObject* SFX_CALL cti_op_new_error(CTI_ARGS);
static void SFX_CALL cti_op_debug(CTI_ARGS);
- static JSValue* SFX_CALL cti_allocate_number(CTI_ARGS);
-
static JSValue* SFX_CALL cti_vm_throw(CTI_ARGS);
static void* SFX_CALL cti_vm_dontLazyLinkCall(CTI_ARGS);
static void* SFX_CALL cti_vm_lazyLinkCall(CTI_ARGS);
#endif
-extern "C" {
- static JSValue* FASTCALL allocateNumber(JSGlobalData* globalData) {
- JSValue* result = new (globalData) JSNumberCell(globalData);
- ASSERT(result);
- return result;
- }
-}
-
-ALWAYS_INLINE void JIT::emitAllocateNumber(JSGlobalData* globalData, unsigned bytecodeIndex)
-{
- __ movl_i32r(reinterpret_cast<intptr_t>(globalData), X86::ecx);
- emitNakedFastCall(bytecodeIndex, (void*)allocateNumber);
-}
-
ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, X86::RegisterID r)
{
JmpSrc call = __ call(r);
return call;
}
-ALWAYS_INLINE JmpSrc JIT::emitNakedFastCall(unsigned bytecodeIndex, void* function)
-{
- JmpSrc call = __ call();
- m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), bytecodeIndex));
- return call;
-}
-
ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_j helper)
{
#if ENABLE(OPCODE_SAMPLING)
emitFastArithReTagImmediate(reg);
}
-ALWAYS_INLINE JmpSrc JIT::emitArithIntToImmWithJump(RegisterID reg)
-{
- __ addl_rr(reg, reg);
- JmpSrc jmp = __ jo();
- emitFastArithReTagImmediate(reg);
- return jmp;
-}
-
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
{
__ shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
break;
}
case op_negate: {
- int srcVReg = instruction[i + 2].u.operand;
- emitGetVirtualRegister(srcVReg, X86::eax, i);
-
- __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
- JmpSrc notImmediate = __ je();
-
- __ cmpl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
- JmpSrc zeroImmediate = __ je();
- emitFastArithImmToInt(X86::eax);
- __ negl_r(X86::eax); // This can't overflow as we only have a 31bit int at this point
- JmpSrc overflow = emitArithIntToImmWithJump(X86::eax);
+ emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
+ emitCTICall(i, Interpreter::cti_op_negate);
emitPutVirtualRegister(instruction[i + 1].u.operand);
- JmpSrc immediateNegateSuccess = __ jmp();
-
- if (!isSSE2Present()) {
- __ link(zeroImmediate, __ label());
- __ link(overflow, __ label());
- __ link(notImmediate, __ label());
- emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
- emitCTICall(i, Interpreter::cti_op_negate);
- emitPutVirtualRegister(instruction[i + 1].u.operand);
- } else {
- // Slow case immediates
- m_slowCases.append(SlowCaseEntry(zeroImmediate, i));
- m_slowCases.append(SlowCaseEntry(overflow, i));
- __ link(notImmediate, __ label());
- ResultType resultType(instruction[i + 3].u.resultType);
- if (!resultType.definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(X86::eax, i, srcVReg);
- Structure* numberStructure = m_globalData->numberStructure.get();
- __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
- m_slowCases.append(SlowCaseEntry(__ jne(), i));
- }
- __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
- // We need 3 copies of the sign bit mask so we can assure alignment and pad for the 128bit load
- static double doubleSignBit[] = { -0.0, -0.0, -0.0 };
- __ xorpd_mr((void*)((((uintptr_t)doubleSignBit)+15)&~15), X86::xmm0);
- JmpSrc wasCell;
- if (!resultType.isReusableNumber())
- emitAllocateNumber(m_globalData, i);
-
- putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, instruction[i + 1].u.operand, &wasCell,
- X86::xmm1, X86::ecx, X86::edx);
- __ link(wasCell, __ label());
- }
- __ link(immediateNegateSuccess, __ label());
- i += 4;
+ i += 3;
break;
}
case op_resolve_skip: {
i += 5;
break;
}
- case op_negate: {
- __ link(iter->from, __ label());
- __ link((++iter)->from, __ label());
- ResultType resultType(instruction[i + 3].u.resultType);
- if (!resultType.definitelyIsNumber()) {
- if (linkSlowCaseIfNotJSCell(++iter, instruction[i + 2].u.operand))
- ++iter;
- __ link(iter->from, __ label());
- }
-
- emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
- emitCTICall(i, Interpreter::cti_op_negate);
- emitPutVirtualRegister(instruction[i + 1].u.operand);
- i += 4;
- break;
- }
case op_rshift: {
__ link(iter->from, __ label());
__ link((++iter)->from, __ label());
#define CTI_RETURN_ADDRESS_SLOT (ARGS[-1])
-#if COMPILER(MSVC)
-#define FASTCALL __fastcall
-#elif COMPILER(GCC)
-#define FASTCALL __attribute__ ((fastcall))
-#else
-#error Need to support fastcall calling convention in this compiler
-#endif
-
namespace JSC {
class CodeBlock;
void emitFastArithImmToInt(RegisterID);
void emitFastArithIntToImmOrSlowCase(RegisterID, unsigned bytecodeIndex);
void emitFastArithIntToImmNoCheck(RegisterID);
- JmpSrc emitArithIntToImmWithJump(RegisterID reg);
void emitTagAsBoolImmediate(RegisterID reg);
- void emitAllocateNumber(JSGlobalData*, unsigned);
-
JmpSrc emitNakedCall(unsigned bytecodeIndex, RegisterID);
JmpSrc emitNakedCall(unsigned bytecodeIndex, void* function);
- JmpSrc emitNakedFastCall(unsigned bytecodeIndex, void*);
JmpSrc emitCTICall(unsigned bytecodeIndex, CTIHelper_j);
JmpSrc emitCTICall(unsigned bytecodeIndex, CTIHelper_o);
JmpSrc emitCTICall(unsigned bytecodeIndex, CTIHelper_p);
RegisterID* UnaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
{
RegisterID* src = generator.emitNode(m_expr.get());
- return generator.emitUnaryOp(opcodeID(), generator.finalDestination(dst), src, m_expr->resultDescriptor());
+ return generator.emitUnaryOp(opcodeID(), generator.finalDestination(dst), src);
}
// ------------------------------ Binary Operation Nodes -----------------------------------
if (opcodeID == op_neq) {
if (m_expr1->isNull() || m_expr2->isNull()) {
RefPtr<RegisterID> src = generator.emitNode(dst, m_expr1->isNull() ? m_expr2.get() : m_expr1.get());
- return generator.emitUnaryOp(op_neq_null, generator.finalDestination(dst, src.get()), src.get(), ResultType::unknown());
+ return generator.emitUnaryOp(op_neq_null, generator.finalDestination(dst, src.get()), src.get());
}
}
{
if (m_expr1->isNull() || m_expr2->isNull()) {
RefPtr<RegisterID> src = generator.emitNode(dst, m_expr1->isNull() ? m_expr2.get() : m_expr1.get());
- return generator.emitUnaryOp(op_eq_null, generator.finalDestination(dst, src.get()), src.get(), ResultType::unknown());
+ return generator.emitUnaryOp(op_eq_null, generator.finalDestination(dst, src.get()), src.get());
}
RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(m_expr1.get(), m_rightHasAssignments, m_expr2->isPure(generator));
{
return (m_type & TypeReusable);
}
-
- bool isReusableNumber()
- {
- return isReusable() && definitelyIsNumber();
- }
bool definitelyIsNumber()
{
{
return !isNotNumber();
}
-
- int toInt()
- {
- return static_cast<int>(m_type);
- }
static ResultType nullType()
{
static PassRefPtr<Structure> createStructure(JSValue* proto) { return Structure::create(proto, TypeInfo(NumberType, NeedsThisConversion)); }
- JSNumberCell(JSGlobalData* globalData)
- : JSCell(globalData->numberStructure.get())
- {
- }
-
private:
JSNumberCell(JSGlobalData* globalData, double value)
: JSCell(globalData->numberStructure.get())