[Baseline] Merge loading functionalities
authorutatane.tea@gmail.com <utatane.tea@gmail.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 29 May 2018 20:04:14 +0000 (20:04 +0000)
committerutatane.tea@gmail.com <utatane.tea@gmail.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 29 May 2018 20:04:14 +0000 (20:04 +0000)
https://bugs.webkit.org/show_bug.cgi?id=185907

Reviewed by Saam Barati.

This patch unifies emitXXXLoad functions in 32bit and 64bit.

* jit/JITInlines.h:
(JSC::JIT::emitDoubleGetByVal):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitDoubleLoad):
(JSC::JIT::emitContiguousLoad):
(JSC::JIT::emitArrayStorageLoad):
(JSC::JIT::emitIntTypedArrayGetByVal):
(JSC::JIT::emitFloatTypedArrayGetByVal):
Define register usage first, and share the same code in 32bit and 64bit.

* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emitSlow_op_put_by_val):
Now C-stack is always enabled in JIT platform and temporary registers increases from 5 to 6 in x86.
We can remove this special handling.

(JSC::JIT::emitContiguousLoad): Deleted.
(JSC::JIT::emitDoubleLoad): Deleted.
(JSC::JIT::emitArrayStorageLoad): Deleted.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@232271 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp

index fdc5955..da2c960 100644 (file)
@@ -1,3 +1,31 @@
+2018-05-29  Yusuke Suzuki  <utatane.tea@gmail.com>
+
+        [Baseline] Merge loading functionalities
+        https://bugs.webkit.org/show_bug.cgi?id=185907
+
+        Reviewed by Saam Barati.
+
+        This patch unifies emitXXXLoad functions in 32bit and 64bit.
+
+        * jit/JITInlines.h:
+        (JSC::JIT::emitDoubleGetByVal):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emitDoubleLoad):
+        (JSC::JIT::emitContiguousLoad):
+        (JSC::JIT::emitArrayStorageLoad):
+        (JSC::JIT::emitIntTypedArrayGetByVal):
+        (JSC::JIT::emitFloatTypedArrayGetByVal):
+        Define register usage first, and share the same code in 32bit and 64bit.
+
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emitSlow_op_put_by_val):
+        Now C-stack is always enabled in JIT platform and temporary registers increases from 5 to 6 in x86.
+        We can remove this special handling.
+
+        (JSC::JIT::emitContiguousLoad): Deleted.
+        (JSC::JIT::emitDoubleLoad): Deleted.
+        (JSC::JIT::emitArrayStorageLoad): Deleted.
+
 2018-05-29  Saam Barati  <sbarati@apple.com>
 
         JSC should put bmalloc's scavenger into mini mode
index a800a74..30b0ddd 100644 (file)
 
 namespace JSC {
 
-#if USE(JSVALUE64)
 inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
 {
-    JumpList slowCases = emitDoubleLoad(instruction, badType);
-    moveDoubleTo64(fpRegT0, regT0);
-    sub64(tagTypeNumberRegister, regT0);
-    return slowCases;
-}
+#if USE(JSVALUE64)
+    JSValueRegs result = JSValueRegs(regT0);
 #else
-inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
-{
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+#endif
     JumpList slowCases = emitDoubleLoad(instruction, badType);
-    moveDoubleToInts(fpRegT0, regT0, regT1);
+    boxDouble(fpRegT0, result);
     return slowCases;
 }
-#endif // USE(JSVALUE64)
 
 ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
 {
index 656e5fd..5df52dd 100644 (file)
@@ -125,48 +125,6 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
 }
 
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
-    loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
-    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
-    load64(BaseIndex(regT2, regT1, TimesEight), regT0);
-    slowCases.append(branchTest64(Zero, regT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-
-    add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
-    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
-
-    load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
-    slowCases.append(branchTest64(Zero, regT0));
-    
-    return slowCases;
-}
-
 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
 {
     // base: regT0
@@ -1119,9 +1077,6 @@ void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
     emitWriteBarrier(arguments, value, ShouldFilterValue);
 }
 
-#endif // USE(JSVALUE64)
-
-#if USE(JSVALUE64)
 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
 {
     Jump valueNotCell;
@@ -1392,6 +1347,87 @@ void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddress
     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
 }
 
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    RegisterID scratch = regT3;
+#endif
+
+    JumpList slowCases;
+
+    badType = patchableBranch32(NotEqual, indexing, TrustedImm32(DoubleShape));
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
+    loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
+    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    JSValueRegs result = JSValueRegs(regT0);
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+    RegisterID scratch = regT3;
+#endif
+
+    JumpList slowCases;
+
+    badType = patchableBranch32(NotEqual, indexing, TrustedImm32(expectedShape));
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
+    loadValue(BaseIndex(scratch, property, TimesEight), result);
+    slowCases.append(branchIfEmpty(result));
+
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    JSValueRegs result = JSValueRegs(regT0);
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+    RegisterID scratch = regT3;
+#endif
+
+    JumpList slowCases;
+
+    add32(TrustedImm32(-ArrayStorageShape), indexing, scratch);
+    badType = patchableBranch32(Above, scratch, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, ArrayStorage::vectorLengthOffset())));
+
+    loadValue(BaseIndex(scratch, property, TimesEight, ArrayStorage::vectorOffset()), result);
+    slowCases.append(branchIfEmpty(result));
+
+    return slowCases;
+}
 
 JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
 {
@@ -1484,17 +1520,17 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
 #if USE(JSVALUE64)
     RegisterID base = regT0;
     RegisterID property = regT1;
-    RegisterID resultPayload = regT0;
+    JSValueRegs result = JSValueRegs(regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #else
     RegisterID base = regT0;
     RegisterID property = regT2;
-    RegisterID resultPayload = regT0;
-    RegisterID resultTag = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #endif
+    RegisterID resultPayload = result.payloadGPR();
     
     JumpList slowCases;
     
@@ -1530,22 +1566,12 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
         
         convertInt32ToDouble(resultPayload, fpRegT0);
         addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
-#if USE(JSVALUE64)
-        moveDoubleTo64(fpRegT0, resultPayload);
-        sub64(tagTypeNumberRegister, resultPayload);
-#else
-        moveDoubleToInts(fpRegT0, resultPayload, resultTag);
-#endif
-        
+        boxDouble(fpRegT0, result);
         done = jump();
         canBeInt.link(this);
     }
 
-#if USE(JSVALUE64)
-    or64(tagTypeNumberRegister, resultPayload);
-#else
-    move(TrustedImm32(JSValue::Int32Tag), resultTag);
-#endif
+    boxInt32(resultPayload, result);
     if (done.isSet())
         done.link(this);
     return slowCases;
@@ -1558,14 +1584,13 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
 #if USE(JSVALUE64)
     RegisterID base = regT0;
     RegisterID property = regT1;
-    RegisterID resultPayload = regT0;
+    JSValueRegs result = JSValueRegs(regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #else
     RegisterID base = regT0;
     RegisterID property = regT2;
-    RegisterID resultPayload = regT0;
-    RegisterID resultTag = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #endif
@@ -1596,12 +1621,7 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
     loadDouble(TrustedImmPtr(&NaN), fpRegT0);
     notNaN.link(this);
     
-#if USE(JSVALUE64)
-    moveDoubleTo64(fpRegT0, resultPayload);
-    sub64(tagTypeNumberRegister, resultPayload);
-#else
-    moveDoubleToInts(fpRegT0, resultPayload, resultTag);
-#endif
+    boxDouble(fpRegT0, result);
     return slowCases;    
 }
 
index f973aef..693a546 100644 (file)
@@ -184,48 +184,6 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
 }
 
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
-    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
-    slowCases.append(branchIfEmpty(regT1));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-    loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
-    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
-    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
-    load32(BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
-    load32(BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
-    slowCases.append(branchIfEmpty(regT1));
-    
-    return slowCases;
-}
-
 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
 {
     int dst = currentInstruction[1].u.operand;
@@ -499,31 +457,12 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
     
     bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
 
-#if CPU(X86)
-    // FIXME: We only have 5 temp registers, but need 6 to make this call, therefore we materialize
-    // our own call. When we finish moving JSC to the C call stack, we'll get another register so
-    // we can use the normal case.
-    unsigned pokeOffset = 0;
-    poke(GPRInfo::callFrameRegister, pokeOffset++);
-    emitLoad(base, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    emitLoad(property, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    emitLoad(value, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    poke(TrustedImmPtr(byValInfo), pokeOffset++);
-    Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize);
-#else
     // The register selection below is chosen to reduce register swapping on ARM.
     // Swapping shouldn't happen on other platforms.
     emitLoad(base, regT2, regT1);
     emitLoad(property, regT3, regT0);
     emitLoad(value, regT5, regT4);
     Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, JSValueRegs(regT2, regT1), JSValueRegs(regT3, regT0), JSValueRegs(regT5, regT4), byValInfo);
-#endif
 
     m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;