[JSC] Use unalignedLoad for JSRopeString fiber accesses
authorysuzuki@apple.com <ysuzuki@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 26 Jul 2019 04:58:09 +0000 (04:58 +0000)
committerysuzuki@apple.com <ysuzuki@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 26 Jul 2019 04:58:09 +0000 (04:58 +0000)
https://bugs.webkit.org/show_bug.cgi?id=200148

Reviewed by Mark Lam.

JSRopeString always have some subsequent bytes that can be accessible because MarkedBlock has Footer.
We use WTF::unalignedLoad to get fibers. And it will be converted to one load CPU instruction.

* heap/MarkedBlock.h:
* runtime/JSString.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@247854 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/heap/MarkedBlock.h
Source/JavaScriptCore/runtime/JSString.h

index 8c01809..f9d12ac 100644 (file)
@@ -1,3 +1,16 @@
+2019-07-25  Yusuke Suzuki  <ysuzuki@apple.com>
+
+        [JSC] Use unalignedLoad for JSRopeString fiber accesses
+        https://bugs.webkit.org/show_bug.cgi?id=200148
+
+        Reviewed by Mark Lam.
+
+        JSRopeString always have some subsequent bytes that can be accessible because MarkedBlock has Footer.
+        We use WTF::unalignedLoad to get fibers. And it will be converted to one load CPU instruction.
+
+        * heap/MarkedBlock.h:
+        * runtime/JSString.h:
+
 2019-07-25  Ross Kirsling  <ross.kirsling@sony.com>
 
         Legacy numeric literals should not permit separators or BigInt
index 0c74c44..911533c 100644 (file)
@@ -302,6 +302,9 @@ public:
     static constexpr size_t footerSize = blockSize - payloadSize;
 
     static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Footer)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result");
+    // Some of JSCell types assume that the last JSCell in a MarkedBlock has a subsequent memory region (Footer) that can still safely accessed.
+    // For example, JSRopeString assumes that it can safely access up to 2 bytes beyond the JSRopeString cell.
+    static_assert(sizeof(Footer) >= sizeof(uint16_t));
     
     static MarkedBlock::Handle* tryCreate(Heap&, AlignedMemoryAllocator*);
         
index 6b5c6ef..a1644af 100644 (file)
@@ -270,9 +270,14 @@ public:
     static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "");
     class CompactFibers {
     public:
+        static constexpr uintptr_t addressMask = (1ULL << WTF_CPU_EFFECTIVE_ADDRESS_WIDTH) - 1;
         JSString* fiber1() const
         {
+#if CPU(LITTLE_ENDIAN)
+            return bitwise_cast<JSString*>(WTF::unalignedLoad<uintptr_t>(&m_fiber1Lower) & addressMask);
+#else
             return bitwise_cast<JSString*>(static_cast<uintptr_t>(m_fiber1Lower) | (static_cast<uintptr_t>(m_fiber1Upper) << 32));
+#endif
         }
 
         void initializeFiber1(JSString* fiber)
@@ -284,7 +289,14 @@ public:
 
         JSString* fiber2() const
         {
+#if CPU(LITTLE_ENDIAN)
+            // This access exceeds the sizeof(JSRopeString). But this is OK because JSRopeString is always allocated in MarkedBlock,
+            // and the last JSRopeString cell in the block has some subsequent bytes which are used for MarkedBlock::Footer.
+            // So the following access does not step over the page boundary in which the latter page does not have read permission.
+            return bitwise_cast<JSString*>(WTF::unalignedLoad<uintptr_t>(&m_fiber2Lower) & addressMask);
+#else
             return bitwise_cast<JSString*>(static_cast<uintptr_t>(m_fiber2Lower) | (static_cast<uintptr_t>(m_fiber2Upper) << 16));
+#endif
         }
         void initializeFiber2(JSString* fiber)
         {