Network Cache: Add thread-safe accessors for storage paths
[WebKit-https.git] / Source / WebKit2 / NetworkProcess / cache / NetworkCacheStorage.cpp
index 9fb86f0..10fd155 100644 (file)
@@ -43,6 +43,9 @@ namespace NetworkCache {
 
 static const char networkCacheSubdirectory[] = "WebKitCache";
 static const char versionDirectoryPrefix[] = "Version ";
+static const char recordsDirectoryName[] = "Records";
+static const char blobsDirectoryName[] = "Blobs";
+static const char bodyPostfix[] = "-body";
 
 static double computeRecordWorth(FileTimes);
 
@@ -62,17 +65,49 @@ static String makeVersionedDirectoryPath(const String& baseDirectoryPath)
     return WebCore::pathByAppendingComponent(baseDirectoryPath, versionSubdirectory);
 }
 
+static String makeRecordsDirectoryPath(const String& baseDirectoryPath)
+{
+    return WebCore::pathByAppendingComponent(makeVersionedDirectoryPath(baseDirectoryPath), recordsDirectoryName);
+}
+
+static String makeBlobDirectoryPath(const String& baseDirectoryPath)
+{
+    return WebCore::pathByAppendingComponent(makeVersionedDirectoryPath(baseDirectoryPath), blobsDirectoryName);
+}
+
 Storage::Storage(const String& baseDirectoryPath)
-    : m_baseDirectoryPath(baseDirectoryPath)
-    , m_directoryPath(makeVersionedDirectoryPath(baseDirectoryPath))
+    : m_basePath(baseDirectoryPath)
+    , m_recordsPath(makeRecordsDirectoryPath(baseDirectoryPath))
     , m_ioQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage", WorkQueue::Type::Concurrent))
     , m_backgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.background", WorkQueue::Type::Concurrent, WorkQueue::QOS::Background))
     , m_serialBackgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.serialBackground", WorkQueue::Type::Serial, WorkQueue::QOS::Background))
+    , m_blobStorage(makeBlobDirectoryPath(baseDirectoryPath))
 {
     deleteOldVersions();
     synchronize();
 }
 
+
+String Storage::basePath() const
+{
+    return m_basePath.isolatedCopy();
+}
+
+String Storage::versionPath() const
+{
+    return makeVersionedDirectoryPath(basePath());
+}
+
+String Storage::recordsPath() const
+{
+    return m_recordsPath.isolatedCopy();
+}
+
+size_t Storage::approximateSize() const
+{
+    return m_approximateSize + m_blobStorage.approximateSize();
+}
+
 void Storage::synchronize()
 {
     ASSERT(RunLoop::isMain());
@@ -83,14 +118,11 @@ void Storage::synchronize()
 
     LOG(NetworkCacheStorage, "(NetworkProcess) synchronizing cache");
 
-    StringCapture cachePathCapture(m_directoryPath);
-    backgroundIOQueue().dispatch([this, cachePathCapture] {
-        String cachePath = cachePathCapture.string();
-
+    backgroundIOQueue().dispatch([this] {
         auto filter = std::make_unique<ContentsFilter>();
         size_t size = 0;
         unsigned count = 0;
-        traverseCacheFiles(cachePath, [&filter, &size, &count](const String& fileName, const String& partitionPath) {
+        traverseCacheFiles(recordsPath(), [&filter, &size, &count](const String& fileName, const String& partitionPath) {
             Key::HashType hash;
             if (!Key::stringToHash(fileName, hash))
                 return;
@@ -99,7 +131,7 @@ void Storage::synchronize()
             WebCore::getFileSize(filePath, fileSize);
             if (!fileSize)
                 return;
-            filter->add(Key::toShortHash(hash));
+            filter->add(hash);
             size += fileSize;
             ++count;
         });
@@ -117,7 +149,9 @@ void Storage::synchronize()
             m_synchronizationInProgress = false;
         });
 
-        LOG(NetworkCacheStorage, "(NetworkProcess) cache synchronization completed approximateSize=%zu count=%d", size, count);
+        m_blobStorage.synchronize();
+
+        LOG(NetworkCacheStorage, "(NetworkProcess) cache synchronization completed size=%zu count=%d", size, count);
     });
 }
 
@@ -126,20 +160,20 @@ void Storage::addToContentsFilter(const Key& key)
     ASSERT(RunLoop::isMain());
 
     if (m_contentsFilter)
-        m_contentsFilter->add(key.shortHash());
+        m_contentsFilter->add(key.hash());
 
     // If we get new entries during filter synchronization take care to add them to the new filter as well.
     if (m_synchronizationInProgress)
-        m_contentsFilterHashesAddedDuringSynchronization.append(key.shortHash());
+        m_contentsFilterHashesAddedDuringSynchronization.append(key.hash());
 }
 
 bool Storage::mayContain(const Key& key) const
 {
     ASSERT(RunLoop::isMain());
-    return !m_contentsFilter || m_contentsFilter->mayContain(key.shortHash());
+    return !m_contentsFilter || m_contentsFilter->mayContain(key.hash());
 }
 
-static String directoryPathForKey(const Key& key, const String& cachePath)
+static String partitionPathForKey(const Key& key, const String& cachePath)
 {
     ASSERT(!key.partition().isEmpty());
     return WebCore::pathByAppendingComponent(cachePath, key.partition());
@@ -150,18 +184,19 @@ static String fileNameForKey(const Key& key)
     return key.hashAsString();
 }
 
-static String filePathForKey(const Key& key, const String& cachePath)
+static String recordPathForKey(const Key& key, const String& cachePath)
 {
-    return WebCore::pathByAppendingComponent(directoryPathForKey(key, cachePath), fileNameForKey(key));
+    return WebCore::pathByAppendingComponent(partitionPathForKey(key, cachePath), fileNameForKey(key));
 }
 
-static Ref<IOChannel> openFileForKey(const Key& key, IOChannel::Type type, const String& cachePath)
+static String bodyPathForRecordPath(const String& recordPath)
 {
-    auto directoryPath = directoryPathForKey(key, cachePath);
-    auto filePath = WebCore::pathByAppendingComponent(directoryPath, fileNameForKey(key));
-    if (type == IOChannel::Type::Create)
-        WebCore::makeAllDirectories(directoryPath);
-    return IOChannel::open(filePath, type);
+    return recordPath + bodyPostfix;
+}
+
+static String bodyPathForKey(const Key& key, const String& cachePath)
+{
+    return bodyPathForRecordPath(recordPathForKey(key, cachePath));
 }
 
 static unsigned hashData(const Data& data)
@@ -188,8 +223,7 @@ struct RecordMetaData {
     unsigned headerChecksum;
     uint64_t headerOffset;
     uint64_t headerSize;
-    unsigned bodyChecksum;
-    uint64_t bodyOffset;
+    SHA1::Digest bodyHash;
     uint64_t bodySize;
 };
 
@@ -208,14 +242,13 @@ static bool decodeRecordMetaData(RecordMetaData& metaData, const Data& fileData)
             return false;
         if (!decoder.decode(metaData.headerSize))
             return false;
-        if (!decoder.decode(metaData.bodyChecksum))
+        if (!decoder.decode(metaData.bodyHash))
             return false;
         if (!decoder.decode(metaData.bodySize))
             return false;
         if (!decoder.verifyChecksum())
             return false;
         metaData.headerOffset = decoder.currentOffset();
-        metaData.bodyOffset = WTF::roundUpToMultipleOf(pageSize(), metaData.headerOffset + metaData.headerSize);
         success = true;
         return false;
     });
@@ -233,10 +266,6 @@ static bool decodeRecordHeader(const Data& fileData, RecordMetaData& metaData, D
         LOG(NetworkCacheStorage, "(NetworkProcess) version mismatch");
         return false;
     }
-    if (metaData.headerOffset + metaData.headerSize > metaData.bodyOffset) {
-        LOG(NetworkCacheStorage, "(NetworkProcess) body offset mismatch");
-        return false;
-    }
 
     auto headerData = fileData.subrange(metaData.headerOffset, metaData.headerSize);
     if (metaData.headerChecksum != hashData(headerData)) {
@@ -247,11 +276,11 @@ static bool decodeRecordHeader(const Data& fileData, RecordMetaData& metaData, D
     return true;
 }
 
-static std::unique_ptr<Storage::Record> decodeRecord(const Data& fileData, int fd, const Key& key)
+static std::unique_ptr<Storage::Record> createRecord(const Data& recordData, const BlobStorage::Blob& bodyBlob, const Key& key)
 {
     RecordMetaData metaData;
     Data headerData;
-    if (!decodeRecordHeader(fileData, metaData, headerData))
+    if (!decodeRecordHeader(recordData, metaData, headerData))
         return nullptr;
 
     if (metaData.key != key)
@@ -261,29 +290,16 @@ static std::unique_ptr<Storage::Record> decodeRecord(const Data& fileData, int f
     auto timeStamp = std::chrono::system_clock::time_point(metaData.epochRelativeTimeStamp);
     if (timeStamp > std::chrono::system_clock::now())
         return nullptr;
-
-    Data bodyData;
-    if (metaData.bodySize) {
-        if (metaData.bodyOffset + metaData.bodySize != fileData.size())
-            return nullptr;
-
-        bodyData = mapFile(fd, metaData.bodyOffset, metaData.bodySize);
-        if (bodyData.isNull()) {
-            LOG(NetworkCacheStorage, "(NetworkProcess) map failed");
-            return nullptr;
-        }
-
-        if (metaData.bodyChecksum != hashData(bodyData)) {
-            LOG(NetworkCacheStorage, "(NetworkProcess) data checksum mismatch");
-            return nullptr;
-        }
-    }
+    if (metaData.bodySize != bodyBlob.data.size())
+        return nullptr;
+    if (metaData.bodyHash != bodyBlob.hash)
+        return nullptr;
 
     return std::make_unique<Storage::Record>(Storage::Record {
         metaData.key,
         timeStamp,
         headerData,
-        bodyData
+        bodyBlob.data
     });
 }
 
@@ -296,7 +312,7 @@ static Data encodeRecordMetaData(const RecordMetaData& metaData)
     encoder << metaData.epochRelativeTimeStamp;
     encoder << metaData.headerChecksum;
     encoder << metaData.headerSize;
-    encoder << metaData.bodyChecksum;
+    encoder << metaData.bodyHash;
     encoder << metaData.bodySize;
 
     encoder.encodeChecksum();
@@ -304,25 +320,18 @@ static Data encodeRecordMetaData(const RecordMetaData& metaData)
     return Data(encoder.buffer(), encoder.bufferSize());
 }
 
-static Data encodeRecordHeader(const Storage::Record& record)
+static Data encodeRecordHeader(const Storage::Record& record, SHA1::Digest bodyHash)
 {
     RecordMetaData metaData(record.key);
     metaData.epochRelativeTimeStamp = std::chrono::duration_cast<std::chrono::milliseconds>(record.timeStamp.time_since_epoch());
     metaData.headerChecksum = hashData(record.header);
     metaData.headerSize = record.header.size();
-    metaData.bodyChecksum = hashData(record.body);
+    metaData.bodyHash = bodyHash;
     metaData.bodySize = record.body.size();
 
     auto encodedMetaData = encodeRecordMetaData(metaData);
     auto headerData = concatenate(encodedMetaData, record.header);
-    if (!record.body.size())
-        return { headerData };
-
-    size_t dataOffset = WTF::roundUpToMultipleOf(pageSize(), headerData.size());
-    Vector<uint8_t, 4096> filler(dataOffset - headerData.size(), 0);
-    Data alignmentData(filler.data(), filler.size());
-
-    return concatenate(headerData, alignmentData);
+    return { headerData };
 }
 
 void Storage::remove(const Key& key)
@@ -333,15 +342,16 @@ void Storage::remove(const Key& key)
     // For simplicity we also don't reduce m_approximateSize on removals.
     // The next synchronization will update everything.
 
-    StringCapture filePathCapture(filePathForKey(key, m_directoryPath));
-    serialBackgroundIOQueue().dispatch([this, filePathCapture] {
-        WebCore::deleteFile(filePathCapture.string());
+    serialBackgroundIOQueue().dispatch([this, key] {
+        auto recordsPath = this->recordsPath();
+        WebCore::deleteFile(recordPathForKey(key, recordsPath));
+        m_blobStorage.remove(bodyPathForKey(key, recordsPath));
     });
 }
 
-void Storage::updateFileModificationTime(IOChannel& channel)
+void Storage::updateFileModificationTime(const String& path)
 {
-    StringCapture filePathCapture(channel.path());
+    StringCapture filePathCapture(path);
     serialBackgroundIOQueue().dispatch([filePathCapture] {
         updateFileModificationTimeIfNeeded(filePathCapture.string());
     });
@@ -352,31 +362,37 @@ void Storage::dispatchReadOperation(const ReadOperation& read)
     ASSERT(RunLoop::isMain());
     ASSERT(m_activeReadOperations.contains(&read));
 
-    StringCapture cachePathCapture(m_directoryPath);
-    ioQueue().dispatch([this, &read, cachePathCapture] {
-        RefPtr<IOChannel> channel = openFileForKey(read.key, IOChannel::Type::Read, cachePathCapture.string());
-        channel->read(0, std::numeric_limits<size_t>::max(), [this, channel, &read](Data& fileData, int error) {
-            if (error) {
-                remove(read.key);
-                read.completionHandler(nullptr);
-            } else {
-                auto record = decodeRecord(fileData, channel->fileDescriptor(), read.key);
-                bool success = read.completionHandler(WTF::move(record));
-                if (success)
-                    updateFileModificationTime(*channel);
-                else
-                    remove(read.key);
-            }
-
-            ASSERT(m_activeReadOperations.contains(&read));
-            m_activeReadOperations.remove(&read);
-            dispatchPendingReadOperations();
-
-            LOG(NetworkCacheStorage, "(NetworkProcess) read complete error=%d", error);
+    ioQueue().dispatch([this, &read] {
+        auto recordsPath = this->recordsPath();
+        auto recordPath = recordPathForKey(read.key, recordsPath);
+        auto bodyPath = bodyPathForKey(read.key, recordsPath);
+        // FIXME: Body and header retrieves can be done in parallel.
+        auto bodyBlob = m_blobStorage.get(bodyPath);
+
+        RefPtr<IOChannel> channel = IOChannel::open(recordPath, IOChannel::Type::Read);
+        channel->read(0, std::numeric_limits<size_t>::max(), [this, &read, bodyBlob](Data& fileData, int error) {
+            auto record = error ? nullptr : createRecord(fileData, bodyBlob, read.key);
+            finishReadOperation(read, WTF::move(record));
         });
     });
 }
 
+void Storage::finishReadOperation(const ReadOperation& read, std::unique_ptr<Record> record)
+{
+    ASSERT(RunLoop::isMain());
+
+    bool success = read.completionHandler(WTF::move(record));
+    if (success)
+        updateFileModificationTime(recordPathForKey(read.key, recordsPath()));
+    else
+        remove(read.key);
+    ASSERT(m_activeReadOperations.contains(&read));
+    m_activeReadOperations.remove(&read);
+    dispatchPendingReadOperations();
+
+    LOG(NetworkCacheStorage, "(NetworkProcess) read complete success=%d", success);
+}
+
 void Storage::dispatchPendingReadOperations()
 {
     ASSERT(RunLoop::isMain());
@@ -413,13 +429,90 @@ template <class T> bool retrieveFromMemory(const T& operations, const Key& key,
     return false;
 }
 
+void Storage::dispatchPendingWriteOperations()
+{
+    ASSERT(RunLoop::isMain());
+
+    const int maximumActiveWriteOperationCount { 3 };
+
+    while (!m_pendingWriteOperations.isEmpty()) {
+        if (m_activeWriteOperations.size() >= maximumActiveWriteOperationCount) {
+            LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel writes");
+            return;
+        }
+        auto writeOperation = m_pendingWriteOperations.takeFirst();
+        auto& write = *writeOperation;
+        m_activeWriteOperations.add(WTF::move(writeOperation));
+
+        dispatchWriteOperation(write);
+    }
+}
+
+void Storage::dispatchWriteOperation(const WriteOperation& write)
+{
+    ASSERT(RunLoop::isMain());
+    ASSERT(m_activeWriteOperations.contains(&write));
+
+    // This was added already when starting the store but filter might have been wiped.
+    addToContentsFilter(write.record.key);
+
+    backgroundIOQueue().dispatch([this, &write] {
+        auto recordsPath = this->recordsPath();
+        auto partitionPath = partitionPathForKey(write.record.key, recordsPath);
+        auto recordPath = recordPathForKey(write.record.key, recordsPath);
+        auto bodyPath = bodyPathForKey(write.record.key, recordsPath);
+
+        WebCore::makeAllDirectories(partitionPath);
+
+        // Store the body.
+        auto blob = m_blobStorage.add(bodyPath, write.record.body);
+        if (blob.data.isNull()) {
+            RunLoop::main().dispatch([this, &write] {
+                finishWriteOperation(write);
+            });
+            return;
+        }
+
+        // Tell the client we now have a disk-backed map for this data.
+        size_t minimumMapSize = pageSize();
+        if (blob.data.size() >= minimumMapSize && blob.data.isMap() && write.mappedBodyHandler) {
+            auto& mappedBodyHandler = write.mappedBodyHandler;
+            RunLoop::main().dispatch([blob, mappedBodyHandler] {
+                mappedBodyHandler(blob.data);
+            });
+        }
+
+        // Store the header and meta data.
+        auto encodedHeader = encodeRecordHeader(write.record, blob.hash);
+        auto channel = IOChannel::open(recordPath, IOChannel::Type::Create);
+        int fd = channel->fileDescriptor();
+        size_t headerSize = encodedHeader.size();
+        channel->write(0, encodedHeader, [this, &write, headerSize, fd](int error) {
+            // On error the entry still stays in the contents filter until next synchronization.
+            m_approximateSize += headerSize;
+            finishWriteOperation(write);
+
+            LOG(NetworkCacheStorage, "(NetworkProcess) write complete error=%d", error);
+        });
+    });
+}
+
+void Storage::finishWriteOperation(const WriteOperation& write)
+{
+    ASSERT(m_activeWriteOperations.contains(&write));
+    m_activeWriteOperations.remove(&write);
+    dispatchPendingWriteOperations();
+
+    shrinkIfNeeded();
+}
+
 void Storage::retrieve(const Key& key, unsigned priority, RetrieveCompletionHandler&& completionHandler)
 {
     ASSERT(RunLoop::isMain());
     ASSERT(priority <= maximumRetrievePriority);
     ASSERT(!key.isNull());
 
-    if (!m_maximumSize) {
+    if (!m_capacity) {
         completionHandler(nullptr);
         return;
     }
@@ -438,17 +531,15 @@ void Storage::retrieve(const Key& key, unsigned priority, RetrieveCompletionHand
     dispatchPendingReadOperations();
 }
 
-void Storage::store(const Record& record, StoreCompletionHandler&& completionHandler)
+void Storage::store(const Record& record, MappedBodyHandler&& mappedBodyHandler)
 {
     ASSERT(RunLoop::isMain());
     ASSERT(!record.key.isNull());
 
-    if (!m_maximumSize) {
-        completionHandler(false, { });
+    if (!m_capacity)
         return;
-    }
 
-    m_pendingWriteOperations.append(new WriteOperation { record, { }, WTF::move(completionHandler) });
+    m_pendingWriteOperations.append(new WriteOperation { record, WTF::move(mappedBodyHandler) });
 
     // Add key to the filter already here as we do lookups from the pending operations too.
     addToContentsFilter(record.key);
@@ -456,43 +547,27 @@ void Storage::store(const Record& record, StoreCompletionHandler&& completionHan
     dispatchPendingWriteOperations();
 }
 
-void Storage::update(const Record& updateRecord, const Record& existingRecord, StoreCompletionHandler&& completionHandler)
-{
-    ASSERT(RunLoop::isMain());
-    ASSERT(!existingRecord.key.isNull());
-    ASSERT(existingRecord.key == updateRecord.key);
-
-    if (!m_maximumSize) {
-        completionHandler(false, { });
-        return;
-    }
-
-    m_pendingWriteOperations.append(new WriteOperation { updateRecord, existingRecord, WTF::move(completionHandler) });
-
-    dispatchPendingWriteOperations();
-}
-
 void Storage::traverse(TraverseFlags flags, std::function<void (const Record*, const RecordInfo&)>&& traverseHandler)
 {
-    StringCapture cachePathCapture(m_directoryPath);
-    ioQueue().dispatch([this, flags, cachePathCapture, traverseHandler] {
-        String cachePath = cachePathCapture.string();
-        traverseCacheFiles(cachePath, [this, flags, &traverseHandler](const String& fileName, const String& partitionPath) {
-            auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
+    ioQueue().dispatch([this, flags, traverseHandler] {
+        traverseCacheFiles(recordsPath(), [this, flags, &traverseHandler](const String& fileName, const String& partitionPath) {
+            auto recordPath = WebCore::pathByAppendingComponent(partitionPath, fileName);
 
             RecordInfo info;
             if (flags & TraverseFlag::ComputeWorth)
-                info.worth = computeRecordWorth(fileTimes(filePath));
+                info.worth = computeRecordWorth(fileTimes(recordPath));
+            if (flags & TraverseFlag::ShareCount)
+                info.bodyShareCount = m_blobStorage.shareCount(bodyPathForRecordPath(recordPath));
 
-            auto channel = IOChannel::open(filePath, IOChannel::Type::Read);
-            const size_t headerReadSize = 16 << 10;
+            auto channel = IOChannel::open(recordPath, IOChannel::Type::Read);
             // FIXME: Traversal is slower than it should be due to lack of parallelism.
-            channel->readSync(0, headerReadSize, [this, &traverseHandler, &info](Data& fileData, int) {
+            channel->readSync(0, std::numeric_limits<size_t>::max(), [this, &traverseHandler, &info](Data& fileData, int) {
                 RecordMetaData metaData;
                 Data headerData;
                 if (decodeRecordHeader(fileData, metaData, headerData)) {
                     Record record { metaData.key, std::chrono::system_clock::time_point(metaData.epochRelativeTimeStamp), headerData, { } };
                     info.bodySize = metaData.bodySize;
+                    info.bodyHash = String::fromUTF8(SHA1::hexDigest(metaData.bodyHash));
                     traverseHandler(&record, info);
                 }
             });
@@ -503,121 +578,20 @@ void Storage::traverse(TraverseFlags flags, std::function<void (const Record*, c
     });
 }
 
-void Storage::dispatchPendingWriteOperations()
-{
-    ASSERT(RunLoop::isMain());
-
-    const int maximumActiveWriteOperationCount { 3 };
-
-    while (!m_pendingWriteOperations.isEmpty()) {
-        if (m_activeWriteOperations.size() >= maximumActiveWriteOperationCount) {
-            LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel writes");
-            return;
-        }
-        auto writeOperation = m_pendingWriteOperations.takeFirst();
-        auto& write = *writeOperation;
-        m_activeWriteOperations.add(WTF::move(writeOperation));
-
-        if (write.existingRecord && mayContain(write.record.key)) {
-            dispatchHeaderWriteOperation(write);
-            continue;
-        }
-        dispatchFullWriteOperation(write);
-    }
-}
-
-void Storage::dispatchFullWriteOperation(const WriteOperation& write)
-{
-    ASSERT(RunLoop::isMain());
-    ASSERT(m_activeWriteOperations.contains(&write));
-
-    // This was added already when starting the store but filter might have been wiped.
-    addToContentsFilter(write.record.key);
-
-    StringCapture cachePathCapture(m_directoryPath);
-    backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
-        auto encodedHeader = encodeRecordHeader(write.record);
-        auto headerAndBodyData = concatenate(encodedHeader, write.record.body);
-
-        auto channel = openFileForKey(write.record.key, IOChannel::Type::Create, cachePathCapture.string());
-        int fd = channel->fileDescriptor();
-        size_t bodyOffset = encodedHeader.size();
-
-        channel->write(0, headerAndBodyData, [this, &write, bodyOffset, fd](int error) {
-            size_t bodySize = write.record.body.size();
-            size_t totalSize = bodyOffset + bodySize;
-
-            // On error the entry still stays in the contents filter until next synchronization.
-            m_approximateSize += totalSize;
-
-            bool shouldMapBody = !error && bodySize >= pageSize();
-            auto bodyMap = shouldMapBody ? mapFile(fd, bodyOffset, bodySize) : Data();
-
-            write.completionHandler(!error, bodyMap);
-
-            ASSERT(m_activeWriteOperations.contains(&write));
-            m_activeWriteOperations.remove(&write);
-            dispatchPendingWriteOperations();
-
-            LOG(NetworkCacheStorage, "(NetworkProcess) write complete error=%d", error);
-        });
-    });
-
-    shrinkIfNeeded();
-}
-
-void Storage::dispatchHeaderWriteOperation(const WriteOperation& write)
-{
-    ASSERT(RunLoop::isMain());
-    ASSERT(write.existingRecord);
-    ASSERT(m_activeWriteOperations.contains(&write));
-    ASSERT(mayContain(write.record.key));
-
-    // Try to update the header of an existing entry.
-    StringCapture cachePathCapture(m_directoryPath);
-    backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
-        auto headerData = encodeRecordHeader(write.record);
-        auto existingHeaderData = encodeRecordHeader(write.existingRecord.value());
-
-        bool pageRoundedHeaderSizeChanged = headerData.size() != existingHeaderData.size();
-        if (pageRoundedHeaderSizeChanged) {
-            LOG(NetworkCacheStorage, "(NetworkProcess) page-rounded header size changed, storing full entry");
-            RunLoop::main().dispatch([this, &write] {
-                dispatchFullWriteOperation(write);
-            });
-            return;
-        }
-
-        auto channel = openFileForKey(write.record.key, IOChannel::Type::Write, cachePathCapture.string());
-        channel->write(0, headerData, [this, &write](int error) {
-            LOG(NetworkCacheStorage, "(NetworkProcess) update complete error=%d", error);
-
-            if (error)
-                remove(write.record.key);
-
-            write.completionHandler(!error, { });
-
-            ASSERT(m_activeWriteOperations.contains(&write));
-            m_activeWriteOperations.remove(&write);
-            dispatchPendingWriteOperations();
-        });
-    });
-}
-
-void Storage::setMaximumSize(size_t size)
+void Storage::setCapacity(size_t capacity)
 {
     ASSERT(RunLoop::isMain());
 
 #if !ASSERT_DISABLED
-    const size_t assumedAverageRecordSize = 50 << 20;
-    size_t maximumRecordCount = size / assumedAverageRecordSize;
+    const size_t assumedAverageRecordSize = 50 << 10;
+    size_t maximumRecordCount = capacity / assumedAverageRecordSize;
     // ~10 bits per element are required for <1% false positive rate.
     size_t effectiveBloomFilterCapacity = ContentsFilter::tableSize / 10;
     // If this gets hit it might be time to increase the filter size.
     ASSERT(maximumRecordCount < effectiveBloomFilterCapacity);
 #endif
 
-    m_maximumSize = size;
+    m_capacity = capacity;
 
     shrinkIfNeeded();
 }
@@ -631,17 +605,18 @@ void Storage::clear()
         m_contentsFilter->clear();
     m_approximateSize = 0;
 
-    StringCapture directoryPathCapture(m_directoryPath);
-
-    ioQueue().dispatch([directoryPathCapture] {
-        String directoryPath = directoryPathCapture.string();
-        traverseDirectory(directoryPath, DT_DIR, [&directoryPath](const String& subdirName) {
-            String subdirPath = WebCore::pathByAppendingComponent(directoryPath, subdirName);
+    ioQueue().dispatch([this] {
+        auto recordsPath = this->recordsPath();
+        traverseDirectory(recordsPath, DT_DIR, [&recordsPath](const String& subdirName) {
+            String subdirPath = WebCore::pathByAppendingComponent(recordsPath, subdirName);
             traverseDirectory(subdirPath, DT_REG, [&subdirPath](const String& fileName) {
                 WebCore::deleteFile(WebCore::pathByAppendingComponent(subdirPath, fileName));
             });
             WebCore::deleteEmptyDirectory(subdirPath);
         });
+
+        // This cleans unreferences blobs.
+        m_blobStorage.synchronize();
     });
 }
 
@@ -660,24 +635,30 @@ static double computeRecordWorth(FileTimes times)
     return duration<double>(accessAge) / age;
 }
 
-
-static double deletionProbability(FileTimes times)
+static double deletionProbability(FileTimes times, unsigned bodyShareCount)
 {
     static const double maximumProbability { 0.33 };
+    static const unsigned maximumEffectiveShareCount { 5 };
 
     auto worth = computeRecordWorth(times);
 
     // Adjust a bit so the most valuable entries don't get deleted at all.
     auto effectiveWorth = std::min(1.1 * worth, 1.);
 
-    return (1 - effectiveWorth) * maximumProbability;
+    auto probability =  (1 - effectiveWorth) * maximumProbability;
+
+    // It is less useful to remove an entry that shares its body data.
+    if (bodyShareCount)
+        probability /= std::min(bodyShareCount, maximumEffectiveShareCount);
+
+    return probability;
 }
 
 void Storage::shrinkIfNeeded()
 {
     ASSERT(RunLoop::isMain());
 
-    if (m_approximateSize > m_maximumSize)
+    if (approximateSize() > m_capacity)
         shrink();
 }
 
@@ -689,27 +670,31 @@ void Storage::shrink()
         return;
     m_shrinkInProgress = true;
 
-    LOG(NetworkCacheStorage, "(NetworkProcess) shrinking cache approximateSize=%zu, m_maximumSize=%zu", static_cast<size_t>(m_approximateSize), m_maximumSize);
+    LOG(NetworkCacheStorage, "(NetworkProcess) shrinking cache approximateSize=%zu capacity=%zu", approximateSize(), m_capacity);
 
-    StringCapture cachePathCapture(m_directoryPath);
-    backgroundIOQueue().dispatch([this, cachePathCapture] {
-        String cachePath = cachePathCapture.string();
-        traverseCacheFiles(cachePath, [](const String& fileName, const String& partitionPath) {
-            auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
+    backgroundIOQueue().dispatch([this] {
+        auto recordsPath = this->recordsPath();
+        traverseCacheFiles(recordsPath, [this](const String& fileName, const String& partitionPath) {
+            auto recordPath = WebCore::pathByAppendingComponent(partitionPath, fileName);
+            auto bodyPath = bodyPathForRecordPath(recordPath);
+
+            auto times = fileTimes(recordPath);
+            unsigned bodyShareCount = m_blobStorage.shareCount(bodyPath);
+            auto probability = deletionProbability(times, bodyShareCount);
 
-            auto times = fileTimes(filePath);
-            auto probability = deletionProbability(times);
             bool shouldDelete = randomNumber() < probability;
 
-            LOG(NetworkCacheStorage, "Deletion probability=%f shouldDelete=%d", probability, shouldDelete);
+            LOG(NetworkCacheStorage, "Deletion probability=%f bodyLinkCount=%d shouldDelete=%d", probability, bodyShareCount, shouldDelete);
 
-            if (shouldDelete)
-                WebCore::deleteFile(filePath);
+            if (shouldDelete) {
+                WebCore::deleteFile(recordPath);
+                m_blobStorage.remove(bodyPath);
+            }
         });
 
         // Let system figure out if they are really empty.
-        traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
-            auto partitionPath = WebCore::pathByAppendingComponent(cachePath, subdirName);
+        traverseDirectory(recordsPath, DT_DIR, [&recordsPath](const String& subdirName) {
+            auto partitionPath = WebCore::pathByAppendingComponent(recordsPath, subdirName);
             WebCore::deleteEmptyDirectory(partitionPath);
         });
 
@@ -726,9 +711,8 @@ void Storage::shrink()
 void Storage::deleteOldVersions()
 {
     // Delete V1 cache.
-    StringCapture cachePathCapture(m_baseDirectoryPath);
-    backgroundIOQueue().dispatch([cachePathCapture] {
-        String cachePath = cachePathCapture.string();
+    backgroundIOQueue().dispatch([this] {
+        auto cachePath = basePath();
         traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
             if (subdirName.startsWith(versionDirectoryPrefix))
                 return;
@@ -739,6 +723,7 @@ void Storage::deleteOldVersions()
             WebCore::deleteEmptyDirectory(partitionPath);
         });
     });
+    // FIXME: Delete V2 cache.
 }
 
 }