Eliminate ResourceBuffer and use SharedBuffer directly instead
[WebKit-https.git] / Source / WebCore / loader / cache / CachedResource.cpp
index 5a4e4a7..ceee58c 100644 (file)
@@ -40,7 +40,6 @@
 #include "Logging.h"
 #include "MemoryCache.h"
 #include "PlatformStrategies.h"
-#include "ResourceBuffer.h"
 #include "ResourceHandle.h"
 #include "ResourceLoadScheduler.h"
 #include "SchemeRegistry.h"
@@ -108,10 +107,10 @@ DEFINE_DEBUG_ONLY_GLOBAL(RefCountedLeakCounter, cachedResourceLeakCounter, ("Cac
 
 CachedResource::CachedResource(const ResourceRequest& request, Type type, SessionID sessionID)
     : m_resourceRequest(request)
+    , m_decodedDataDeletionTimer(this, &CachedResource::decodedDataDeletionTimerFired, deadDecodedDataDeletionIntervalForResourceType(type))
     , m_sessionID(sessionID)
     , m_loadPriority(defaultPriorityForResourceType(type))
     , m_responseTimestamp(currentTime())
-    , m_decodedDataDeletionTimer(this, &CachedResource::decodedDataDeletionTimerFired, deadDecodedDataDeletionIntervalForResourceType(type))
     , m_lastDecodedAccessTime(0)
     , m_loadFinishTime(0)
     , m_encodedSize(0)
@@ -286,22 +285,22 @@ void CachedResource::checkNotify()
     if (isLoading() || stillNeedsLoad())
         return;
 
-    CachedResourceClientWalker<CachedResourceClient> w(m_clients);
-    while (CachedResourceClient* c = w.next())
-        c->notifyFinished(this);
+    CachedResourceClientWalker<CachedResourceClient> walker(m_clients);
+    while (CachedResourceClient* client = walker.next())
+        client->notifyFinished(this);
 }
 
-void CachedResource::addDataBuffer(ResourceBuffer*)
+void CachedResource::addDataBuffer(SharedBuffer&)
 {
-    ASSERT(m_options.dataBufferingPolicy() == BufferData);
+    ASSERT(dataBufferingPolicy() == BufferData);
 }
 
 void CachedResource::addData(const char*, unsigned)
 {
-    ASSERT(m_options.dataBufferingPolicy() == DoNotBufferData);
+    ASSERT(dataBufferingPolicy() == DoNotBufferData);
 }
 
-void CachedResource::finishLoading(ResourceBuffer*)
+void CachedResource::finishLoading(SharedBuffer*)
 {
     setLoading(false);
     checkNotify();
@@ -391,17 +390,15 @@ void CachedResource::addClient(CachedResourceClient* client)
         didAddClient(client);
 }
 
-void CachedResource::didAddClient(CachedResourceClient* c)
+void CachedResource::didAddClient(CachedResourceClient* client)
 {
     if (m_decodedDataDeletionTimer.isActive())
         m_decodedDataDeletionTimer.stop();
 
-    if (m_clientsAwaitingCallback.contains(c)) {
-        m_clients.add(c);
-        m_clientsAwaitingCallback.remove(c);
-    }
+    if (m_clientsAwaitingCallback.remove(client))
+        m_clients.add(client);
     if (!isLoading() && !stillNeedsLoad())
-        c->notifyFinished(this);
+        client->notifyFinished(this);
 }
 
 bool CachedResource::addClientToSet(CachedResourceClient* client)
@@ -423,7 +420,7 @@ bool CachedResource::addClientToSet(CachedResourceClient* client)
         // Therefore, rather than immediately sending callbacks on a cache hit like other CachedResources,
         // we schedule the callbacks and ensure we never finish synchronously.
         ASSERT(!m_clientsAwaitingCallback.contains(client));
-        m_clientsAwaitingCallback.add(client, CachedResourceCallback::schedule(this, client));
+        m_clientsAwaitingCallback.add(client, std::make_unique<Callback>(*this, *client));
         return false;
     }
 
@@ -731,27 +728,28 @@ void CachedResource::setLoadPriority(ResourceLoadPriority loadPriority)
     m_loadPriority = loadPriority;
 }
 
-CachedResource::CachedResourceCallback::CachedResourceCallback(CachedResource* resource, CachedResourceClient* client)
+inline CachedResource::Callback::Callback(CachedResource& resource, CachedResourceClient& client)
     : m_resource(resource)
     , m_client(client)
-    , m_callbackTimer(this, &CachedResourceCallback::timerFired)
+    , m_timer(this, &Callback::timerFired)
 {
-    m_callbackTimer.startOneShot(0);
+    m_timer.startOneShot(0);
 }
 
-void CachedResource::CachedResourceCallback::cancel()
+inline void CachedResource::Callback::cancel()
 {
-    if (m_callbackTimer.isActive())
-        m_callbackTimer.stop();
+    if (m_timer.isActive())
+        m_timer.stop();
 }
 
-void CachedResource::CachedResourceCallback::timerFired(Timer<CachedResourceCallback>&)
+void CachedResource::Callback::timerFired(Timer<Callback>&)
 {
-    m_resource->didAddClient(m_client);
+    m_resource.didAddClient(&m_client);
 }
 
 #if USE(FOUNDATION)
-void CachedResource::tryReplaceEncodedData(PassRefPtr<SharedBuffer> newBuffer)
+
+void CachedResource::tryReplaceEncodedData(SharedBuffer& newBuffer)
 {
     if (!m_data)
         return;
@@ -762,11 +760,12 @@ void CachedResource::tryReplaceEncodedData(PassRefPtr<SharedBuffer> newBuffer)
     // We have to do the memcmp because we can't tell if the replacement file backed data is for the
     // same resource or if we made a second request with the same URL which gave us a different
     // resource. We have seen this happen for cached POST resources.
-    if (m_data->size() != newBuffer->size() || memcmp(m_data->data(), newBuffer->data(), m_data->size()))
+    if (m_data->size() != newBuffer.size() || memcmp(m_data->data(), newBuffer.data(), m_data->size()))
         return;
 
-    m_data->tryReplaceSharedBufferContents(newBuffer.get());
+    m_data->tryReplaceContentsWithPlatformBuffer(newBuffer);
 }
+
 #endif
 
 }