Rationalize the handling of PutById transitions a bit
authorfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 7 Apr 2016 17:59:24 +0000 (17:59 +0000)
committerfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 7 Apr 2016 17:59:24 +0000 (17:59 +0000)
https://bugs.webkit.org/show_bug.cgi?id=156330

Reviewed by Mark Lam.

* bytecode/PolymorphicAccess.cpp:
(JSC::AccessCase::generate): Get rid of the specialized slow calls. We can just use the failAndIgnore jump target. We just need to make sure that we don't make observable effects until we're done with all of the fast path checks.
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::addAccessCase): MadeNoChanges indicates that we should keep trying to repatch. Currently PutById transitions might trigger the case that addAccessCase() sees null, if the transition involves an indexing header. Doing repatching in that case is probably not good. But, we should just fix this the right way eventually.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@199162 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
Source/JavaScriptCore/bytecode/StructureStubInfo.cpp

index 2ffb4a1..3844310 100644 (file)
@@ -1,3 +1,15 @@
+2016-04-07  Filip Pizlo  <fpizlo@apple.com>
+
+        Rationalize the handling of PutById transitions a bit
+        https://bugs.webkit.org/show_bug.cgi?id=156330
+
+        Reviewed by Mark Lam.
+
+        * bytecode/PolymorphicAccess.cpp:
+        (JSC::AccessCase::generate): Get rid of the specialized slow calls. We can just use the failAndIgnore jump target. We just need to make sure that we don't make observable effects until we're done with all of the fast path checks.
+        * bytecode/StructureStubInfo.cpp:
+        (JSC::StructureStubInfo::addAccessCase): MadeNoChanges indicates that we should keep trying to repatch. Currently PutById transitions might trigger the case that addAccessCase() sees null, if the transition involves an indexing header. Doing repatching in that case is probably not good. But, we should just fix this the right way eventually.
+
 2016-04-07  Per Arne Vollan  <peavo@outlook.com>
 
         [Win] Fix for JSC stress test failures.
index dea2e16..c51af8b 100644 (file)
@@ -1034,7 +1034,8 @@ void AccessCase::generate(AccessGenerationState& state)
         } else if (verbose)
             dataLog("Don't have type.\n");
         
-        CCallHelpers::JumpList slowPath;
+        bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
+        bool reallocating = allocating && structure()->outOfLineCapacity();
 
         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
         allocator.lock(baseGPR);
@@ -1046,25 +1047,23 @@ void AccessCase::generate(AccessGenerationState& state)
 
         GPRReg scratchGPR2 = allocator.allocateScratchGPR();
         GPRReg scratchGPR3;
-        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
-            && structure()->outOfLineCapacity())
+        if (allocating)
             scratchGPR3 = allocator.allocateScratchGPR();
         else
             scratchGPR3 = InvalidGPRReg;
 
         ScratchRegisterAllocator::PreservedState preservedState =
             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+        
+        CCallHelpers::JumpList slowPath;
 
         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
 
-        bool scratchGPRHasStorage = false;
-        bool needsToMakeRoomOnStackForCCall = !preservedState.numberOfBytesPreserved && codeBlock->jitType() == JITCode::FTLJIT;
-
-        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
+        if (allocating) {
             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
             CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
 
-            if (!structure()->outOfLineCapacity()) {
+            if (!reallocating) {
                 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
                 slowPath.append(
                     jit.branchSubPtr(
@@ -1104,16 +1103,8 @@ void AccessCase::generate(AccessGenerationState& state)
                             -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
                 }
             }
-
-            jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
-            scratchGPRHasStorage = true;
         }
 
-        uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
-        jit.store32(
-            CCallHelpers::TrustedImm32(structureBits),
-            CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
-
         if (isInlineOffset(m_offset)) {
             jit.storeValue(
                 valueRegs,
@@ -1122,100 +1113,55 @@ void AccessCase::generate(AccessGenerationState& state)
                     JSObject::offsetOfInlineStorage() +
                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
         } else {
-            if (!scratchGPRHasStorage)
+            if (!allocating)
                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
             jit.storeValue(
                 valueRegs,
                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
         }
-
-        ScratchBuffer* scratchBuffer = nullptr;
-        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity())
-            scratchBuffer = vm.scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
-
-        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
-            CCallHelpers::Call callFlushWriteBarrierBuffer;
+        
+        if (allocating) {
             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
-            CCallHelpers::Jump needToFlush =
+            slowPath.append(
                 jit.branch32(
                     CCallHelpers::AboveOrEqual, scratchGPR2,
-                    CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity()));
+                    CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
 
             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
 
-            jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR);
+            jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
             jit.storePtr(
                 baseGPR,
                 CCallHelpers::BaseIndex(
-                    scratchGPR, scratchGPR2, CCallHelpers::ScalePtr,
+                    scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
                     static_cast<int32_t>(-sizeof(void*))));
-
-            CCallHelpers::Jump doneWithBarrier = jit.jump();
-            needToFlush.link(&jit);
-
-            // FIXME: We should restoreReusedRegistersByPopping() before this. Then, we wouldn't need
-            // padding in preserveReusedRegistersByPushing(). Or, maybe it would be even better if the
-            // barrier slow path was just the normal slow path, below.
-            // https://bugs.webkit.org/show_bug.cgi?id=149030
-            allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR2);
-            if (needsToMakeRoomOnStackForCCall)
-                jit.makeSpaceOnStackForCCall();
-            jit.setupArgumentsWithExecState(baseGPR);
-            callFlushWriteBarrierBuffer = jit.call();
-            if (needsToMakeRoomOnStackForCCall)
-                jit.reclaimSpaceOnStackForCCall();
-            allocator.restoreUsedRegistersFromScratchBufferForCall(
-                jit, scratchBuffer, scratchGPR2);
-
-            doneWithBarrier.link(&jit);
             ownerIsRememberedOrInEden.link(&jit);
-
-            state.callbacks.append(
-                [=] (LinkBuffer& linkBuffer) {
-                    linkBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer);
-                });
         }
         
+        // We set the new butterfly and the structure last. Doing it this way ensures that whatever
+        // we had done up to this point is forgotten if we choose to branch to slow path.
+        
+        if (allocating)
+            jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
+        
+        uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
+        jit.store32(
+            CCallHelpers::TrustedImm32(structureBits),
+            CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
+
         allocator.restoreReusedRegistersByPopping(jit, preservedState);
         state.succeed();
-
-        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
+        
+        if (allocator.didReuseRegisters()) {
             slowPath.link(&jit);
             allocator.restoreReusedRegistersByPopping(jit, preservedState);
-            allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR);
-            if (needsToMakeRoomOnStackForCCall)
-                jit.makeSpaceOnStackForCCall();
-            jit.store32(
-                CCallHelpers::TrustedImm32(state.originalCallSiteIndex().bits()),
-                CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
-#if USE(JSVALUE64)
-            jit.setupArgumentsWithExecState(
-                baseGPR,
-                CCallHelpers::TrustedImmPtr(newStructure()),
-                CCallHelpers::TrustedImm32(m_offset),
-                valueRegs.gpr());
-#else
-            jit.setupArgumentsWithExecState(
-                baseGPR,
-                CCallHelpers::TrustedImmPtr(newStructure()),
-                CCallHelpers::TrustedImm32(m_offset),
-                valueRegs.payloadGPR(), valueRegs.tagGPR());
-#endif
-            CCallHelpers::Call operationCall = jit.call();
-            if (needsToMakeRoomOnStackForCCall)
-                jit.reclaimSpaceOnStackForCCall();
-            allocator.restoreUsedRegistersFromScratchBufferForCall(jit, scratchBuffer, scratchGPR);
-            state.succeed();
-
-            state.callbacks.append(
-                [=] (LinkBuffer& linkBuffer) {
-                    linkBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
-                });
-        }
+            state.failAndIgnore.append(jit.jump());
+        } else
+            state.failAndIgnore.append(slowPath);
         return;
     }
 
index 88e0416..ace61f4 100644 (file)
@@ -110,7 +110,7 @@ AccessGenerationResult StructureStubInfo::addAccessCase(
     VM& vm = *codeBlock->vm();
     
     if (!accessCase)
-        return AccessGenerationResult::MadeNoChanges;
+        return AccessGenerationResult::GaveUp;
     
     if (cacheType == CacheType::Stub)
         return u.stub->regenerateWithCase(vm, codeBlock, *this, ident, WTFMove(accessCase));