[WK2] NetworkCache retrievals sometimes fail on browser startup
[WebKit-https.git] / Source / WebKit2 / NetworkProcess / cache / NetworkCacheStorage.cpp
1 /*
2  * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "NetworkCacheStorage.h"
28
29 #if ENABLE(NETWORK_CACHE)
30
31 #include "Logging.h"
32 #include "NetworkCacheCoders.h"
33 #include "NetworkCacheFileSystemPosix.h"
34 #include "NetworkCacheIOChannel.h"
35 #include <wtf/PageBlock.h>
36 #include <wtf/RandomNumber.h>
37 #include <wtf/RunLoop.h>
38 #include <wtf/text/CString.h>
39 #include <wtf/text/StringBuilder.h>
40
41 namespace WebKit {
42 namespace NetworkCache {
43
44 static const char networkCacheSubdirectory[] = "WebKitCache";
45 static const char versionDirectoryPrefix[] = "Version ";
46
47 std::unique_ptr<Storage> Storage::open(const String& cachePath)
48 {
49     ASSERT(RunLoop::isMain());
50
51     String networkCachePath = WebCore::pathByAppendingComponent(cachePath, networkCacheSubdirectory);
52     if (!WebCore::makeAllDirectories(networkCachePath))
53         return nullptr;
54     return std::unique_ptr<Storage>(new Storage(networkCachePath));
55 }
56
57 static String makeVersionedDirectoryPath(const String& baseDirectoryPath)
58 {
59     String versionSubdirectory = versionDirectoryPrefix + String::number(Storage::version);
60     return WebCore::pathByAppendingComponent(baseDirectoryPath, versionSubdirectory);
61 }
62
63 Storage::Storage(const String& baseDirectoryPath)
64     : m_baseDirectoryPath(baseDirectoryPath)
65     , m_directoryPath(makeVersionedDirectoryPath(baseDirectoryPath))
66     , m_ioQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage", WorkQueue::Type::Concurrent))
67     , m_backgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.background", WorkQueue::Type::Concurrent, WorkQueue::QOS::Background))
68     , m_serialBackgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.serialBackground", WorkQueue::Type::Serial, WorkQueue::QOS::Background))
69 {
70     deleteOldVersions();
71     initialize();
72 }
73
74 void Storage::initialize()
75 {
76     ASSERT(RunLoop::isMain());
77
78     StringCapture cachePathCapture(m_directoryPath);
79
80     backgroundIOQueue().dispatch([this, cachePathCapture] {
81         String cachePath = cachePathCapture.string();
82         traverseCacheFiles(cachePath, [this](const String& fileName, const String& partitionPath) {
83             Key::HashType hash;
84             if (!Key::stringToHash(fileName, hash))
85                 return;
86             unsigned shortHash = Key::toShortHash(hash);
87             RunLoop::main().dispatch([this, shortHash] {
88                 m_contentsFilter.add(shortHash);
89             });
90             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
91             long long fileSize = 0;
92             WebCore::getFileSize(filePath, fileSize);
93             m_approximateSize += fileSize;
94         });
95         m_hasPopulatedContentsFilter = true;
96     });
97 }
98
99 static String directoryPathForKey(const Key& key, const String& cachePath)
100 {
101     ASSERT(!key.partition().isEmpty());
102     return WebCore::pathByAppendingComponent(cachePath, key.partition());
103 }
104
105 static String fileNameForKey(const Key& key)
106 {
107     return key.hashAsString();
108 }
109
110 static String filePathForKey(const Key& key, const String& cachePath)
111 {
112     return WebCore::pathByAppendingComponent(directoryPathForKey(key, cachePath), fileNameForKey(key));
113 }
114
115 static Ref<IOChannel> openFileForKey(const Key& key, IOChannel::Type type, const String& cachePath)
116 {
117     auto directoryPath = directoryPathForKey(key, cachePath);
118     auto filePath = WebCore::pathByAppendingComponent(directoryPath, fileNameForKey(key));
119     if (type == IOChannel::Type::Create)
120         WebCore::makeAllDirectories(directoryPath);
121     return IOChannel::open(filePath, type);
122 }
123
124 static unsigned hashData(const Data& data)
125 {
126     StringHasher hasher;
127     data.apply([&hasher](const uint8_t* data, size_t size) {
128         hasher.addCharacters(data, size);
129         return true;
130     });
131     return hasher.hash();
132 }
133
134 struct EntryMetaData {
135     EntryMetaData() { }
136     explicit EntryMetaData(const Key& key)
137         : cacheStorageVersion(Storage::version)
138         , key(key)
139     { }
140
141     unsigned cacheStorageVersion;
142     Key key;
143     std::chrono::milliseconds timeStamp;
144     unsigned headerChecksum;
145     uint64_t headerOffset;
146     uint64_t headerSize;
147     unsigned bodyChecksum;
148     uint64_t bodyOffset;
149     uint64_t bodySize;
150 };
151
152 static bool decodeEntryMetaData(EntryMetaData& metaData, const Data& fileData)
153 {
154     bool success = false;
155     fileData.apply([&metaData, &success](const uint8_t* data, size_t size) {
156         Decoder decoder(data, size);
157         if (!decoder.decode(metaData.cacheStorageVersion))
158             return false;
159         if (!decoder.decode(metaData.key))
160             return false;
161         if (!decoder.decode(metaData.timeStamp))
162             return false;
163         if (!decoder.decode(metaData.headerChecksum))
164             return false;
165         if (!decoder.decode(metaData.headerSize))
166             return false;
167         if (!decoder.decode(metaData.bodyChecksum))
168             return false;
169         if (!decoder.decode(metaData.bodySize))
170             return false;
171         if (!decoder.verifyChecksum())
172             return false;
173         metaData.headerOffset = decoder.currentOffset();
174         metaData.bodyOffset = WTF::roundUpToMultipleOf(pageSize(), metaData.headerOffset + metaData.headerSize);
175         success = true;
176         return false;
177     });
178     return success;
179 }
180
181 static bool decodeEntryHeader(const Data& fileData, EntryMetaData& metaData, Data& data)
182 {
183     if (!decodeEntryMetaData(metaData, fileData)) {
184         LOG(NetworkCacheStorage, "(NetworkProcess) meta data decode failure");
185         return false;
186     }
187
188     if (metaData.cacheStorageVersion != Storage::version) {
189         LOG(NetworkCacheStorage, "(NetworkProcess) version mismatch");
190         return false;
191     }
192     if (metaData.headerOffset + metaData.headerSize > metaData.bodyOffset) {
193         LOG(NetworkCacheStorage, "(NetworkProcess) body offset mismatch");
194         return false;
195     }
196
197     auto headerData = fileData.subrange(metaData.headerOffset, metaData.headerSize);
198     if (metaData.headerChecksum != hashData(headerData)) {
199         LOG(NetworkCacheStorage, "(NetworkProcess) header checksum mismatch");
200         return false;
201     }
202     data = { headerData };
203     return true;
204 }
205
206 static std::unique_ptr<Storage::Entry> decodeEntry(const Data& fileData, int fd, const Key& key)
207 {
208     EntryMetaData metaData;
209     Data headerData;
210     if (!decodeEntryHeader(fileData, metaData, headerData))
211         return nullptr;
212
213     if (metaData.key != key)
214         return nullptr;
215     if (metaData.bodyOffset + metaData.bodySize != fileData.size())
216         return nullptr;
217
218     auto bodyData = mapFile(fd, metaData.bodyOffset, metaData.bodySize);
219     if (bodyData.isNull()) {
220         LOG(NetworkCacheStorage, "(NetworkProcess) map failed");
221         return nullptr;
222     }
223
224     if (metaData.bodyChecksum != hashData(bodyData)) {
225         LOG(NetworkCacheStorage, "(NetworkProcess) data checksum mismatch");
226         return nullptr;
227     }
228
229     return std::make_unique<Storage::Entry>(Storage::Entry {
230         metaData.key,
231         metaData.timeStamp,
232         headerData,
233         bodyData
234     });
235 }
236
237 static Data encodeEntryMetaData(const EntryMetaData& entry)
238 {
239     Encoder encoder;
240
241     encoder << entry.cacheStorageVersion;
242     encoder << entry.key;
243     encoder << entry.timeStamp;
244     encoder << entry.headerChecksum;
245     encoder << entry.headerSize;
246     encoder << entry.bodyChecksum;
247     encoder << entry.bodySize;
248
249     encoder.encodeChecksum();
250
251     return Data(encoder.buffer(), encoder.bufferSize());
252 }
253
254 static Data encodeEntryHeader(const Storage::Entry& entry)
255 {
256     EntryMetaData metaData(entry.key);
257     metaData.timeStamp = entry.timeStamp;
258     metaData.headerChecksum = hashData(entry.header);
259     metaData.headerSize = entry.header.size();
260     metaData.bodyChecksum = hashData(entry.body);
261     metaData.bodySize = entry.body.size();
262
263     auto encodedMetaData = encodeEntryMetaData(metaData);
264     auto headerData = concatenate(encodedMetaData, entry.header);
265     if (!entry.body.size())
266         return { headerData };
267
268     size_t dataOffset = WTF::roundUpToMultipleOf(pageSize(), headerData.size());
269     Vector<uint8_t, 4096> filler(dataOffset - headerData.size(), 0);
270     Data alignmentData(filler.data(), filler.size());
271
272     return concatenate(headerData, alignmentData);
273 }
274
275 void Storage::remove(const Key& key)
276 {
277     ASSERT(RunLoop::isMain());
278
279     // For simplicity we don't reduce m_approximateSize on removals.
280     // The next cache shrink will update the size.
281
282     if (m_contentsFilter.mayContain(key.shortHash()))
283         m_contentsFilter.remove(key.shortHash());
284
285     StringCapture filePathCapture(filePathForKey(key, m_directoryPath));
286     serialBackgroundIOQueue().dispatch([this, filePathCapture] {
287         WebCore::deleteFile(filePathCapture.string());
288     });
289 }
290
291 void Storage::updateFileAccessTime(IOChannel& channel)
292 {
293     StringCapture filePathCapture(channel.path());
294     serialBackgroundIOQueue().dispatch([filePathCapture] {
295         updateFileAccessTimeIfNeeded(filePathCapture.string());
296     });
297 }
298
299 void Storage::dispatchReadOperation(const ReadOperation& read)
300 {
301     ASSERT(RunLoop::isMain());
302     ASSERT(m_activeReadOperations.contains(&read));
303
304     StringCapture cachePathCapture(m_directoryPath);
305     ioQueue().dispatch([this, &read, cachePathCapture] {
306         RefPtr<IOChannel> channel = openFileForKey(read.key, IOChannel::Type::Read, cachePathCapture.string());
307         channel->read(0, std::numeric_limits<size_t>::max(), [this, channel, &read](Data& fileData, int error) {
308             if (error) {
309                 remove(read.key);
310                 read.completionHandler(nullptr);
311             } else {
312                 auto entry = decodeEntry(fileData, channel->fileDescriptor(), read.key);
313                 bool success = read.completionHandler(WTF::move(entry));
314                 if (success)
315                     updateFileAccessTime(*channel);
316                 else
317                     remove(read.key);
318             }
319
320             ASSERT(m_activeReadOperations.contains(&read));
321             m_activeReadOperations.remove(&read);
322             dispatchPendingReadOperations();
323
324             LOG(NetworkCacheStorage, "(NetworkProcess) read complete error=%d", error);
325         });
326     });
327 }
328
329 void Storage::dispatchPendingReadOperations()
330 {
331     ASSERT(RunLoop::isMain());
332
333     const int maximumActiveReadOperationCount = 5;
334
335     for (int priority = maximumRetrievePriority; priority >= 0; --priority) {
336         if (m_activeReadOperations.size() > maximumActiveReadOperationCount) {
337             LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel retrieves");
338             return;
339         }
340         auto& pendingRetrieveQueue = m_pendingReadOperationsByPriority[priority];
341         if (pendingRetrieveQueue.isEmpty())
342             continue;
343         auto readOperation = pendingRetrieveQueue.takeFirst();
344         auto& read = *readOperation;
345         m_activeReadOperations.add(WTF::move(readOperation));
346         dispatchReadOperation(read);
347     }
348 }
349
350 template <class T> bool retrieveFromMemory(const T& operations, const Key& key, Storage::RetrieveCompletionHandler& completionHandler)
351 {
352     for (auto& operation : operations) {
353         if (operation->entry.key == key) {
354             LOG(NetworkCacheStorage, "(NetworkProcess) found write operation in progress");
355             auto entry = operation->entry;
356             RunLoop::main().dispatch([entry, completionHandler] {
357                 completionHandler(std::make_unique<Storage::Entry>(entry));
358             });
359             return true;
360         }
361     }
362     return false;
363 }
364
365 void Storage::retrieve(const Key& key, unsigned priority, RetrieveCompletionHandler&& completionHandler)
366 {
367     ASSERT(RunLoop::isMain());
368     ASSERT(priority <= maximumRetrievePriority);
369     ASSERT(!key.isNull());
370
371     if (!m_maximumSize) {
372         completionHandler(nullptr);
373         return;
374     }
375
376     if (!cacheMayContain(key.shortHash())) {
377         completionHandler(nullptr);
378         return;
379     }
380
381     if (retrieveFromMemory(m_pendingWriteOperations, key, completionHandler))
382         return;
383     if (retrieveFromMemory(m_activeWriteOperations, key, completionHandler))
384         return;
385
386     m_pendingReadOperationsByPriority[priority].append(new ReadOperation { key, WTF::move(completionHandler) });
387     dispatchPendingReadOperations();
388 }
389
390 void Storage::store(const Entry& entry, StoreCompletionHandler&& completionHandler)
391 {
392     ASSERT(RunLoop::isMain());
393     ASSERT(!entry.key.isNull());
394
395     if (!m_maximumSize) {
396         completionHandler(false, { });
397         return;
398     }
399
400     m_pendingWriteOperations.append(new WriteOperation { entry, { }, WTF::move(completionHandler) });
401
402     // Add key to the filter already here as we do lookups from the pending operations too.
403     m_contentsFilter.add(entry.key.shortHash());
404
405     dispatchPendingWriteOperations();
406 }
407
408 void Storage::update(const Entry& updateEntry, const Entry& existingEntry, StoreCompletionHandler&& completionHandler)
409 {
410     ASSERT(RunLoop::isMain());
411     ASSERT(!existingEntry.key.isNull());
412     ASSERT(existingEntry.key == updateEntry.key);
413
414     if (!m_maximumSize) {
415         completionHandler(false, { });
416         return;
417     }
418
419     m_pendingWriteOperations.append(new WriteOperation { updateEntry, existingEntry, WTF::move(completionHandler) });
420
421     dispatchPendingWriteOperations();
422 }
423
424 void Storage::traverse(std::function<void (const Entry*)>&& traverseHandler)
425 {
426     StringCapture cachePathCapture(m_directoryPath);
427     ioQueue().dispatch([this, cachePathCapture, traverseHandler] {
428         String cachePath = cachePathCapture.string();
429         traverseCacheFiles(cachePath, [this, &traverseHandler](const String& fileName, const String& partitionPath) {
430             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
431             auto channel = IOChannel::open(filePath, IOChannel::Type::Read);
432             const size_t headerReadSize = 16 << 10;
433             // FIXME: Traversal is slower than it should be due to lack of parallelism.
434             channel->readSync(0, headerReadSize, [this, &traverseHandler](Data& fileData, int) {
435                 EntryMetaData metaData;
436                 Data headerData;
437                 if (decodeEntryHeader(fileData, metaData, headerData)) {
438                     Entry entry { metaData.key, metaData.timeStamp, headerData, { } };
439                     traverseHandler(&entry);
440                 }
441             });
442         });
443         RunLoop::main().dispatch([this, traverseHandler] {
444             traverseHandler(nullptr);
445         });
446     });
447 }
448
449 void Storage::dispatchPendingWriteOperations()
450 {
451     ASSERT(RunLoop::isMain());
452
453     const int maximumActiveWriteOperationCount { 3 };
454
455     while (!m_pendingWriteOperations.isEmpty()) {
456         if (m_activeWriteOperations.size() >= maximumActiveWriteOperationCount) {
457             LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel writes");
458             return;
459         }
460         auto writeOperation = m_pendingWriteOperations.takeFirst();
461         auto& write = *writeOperation;
462         m_activeWriteOperations.add(WTF::move(writeOperation));
463
464         if (write.existingEntry && cacheMayContain(write.entry.key.shortHash())) {
465             dispatchHeaderWriteOperation(write);
466             continue;
467         }
468         dispatchFullWriteOperation(write);
469     }
470 }
471
472 void Storage::dispatchFullWriteOperation(const WriteOperation& write)
473 {
474     ASSERT(RunLoop::isMain());
475     ASSERT(m_activeWriteOperations.contains(&write));
476
477     if (!m_contentsFilter.mayContain(write.entry.key.shortHash()))
478         m_contentsFilter.add(write.entry.key.shortHash());
479
480     StringCapture cachePathCapture(m_directoryPath);
481     backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
482         auto encodedHeader = encodeEntryHeader(write.entry);
483         auto headerAndBodyData = concatenate(encodedHeader, write.entry.body);
484
485         auto channel = openFileForKey(write.entry.key, IOChannel::Type::Create, cachePathCapture.string());
486         int fd = channel->fileDescriptor();
487         size_t bodyOffset = encodedHeader.size();
488
489         channel->write(0, headerAndBodyData, [this, &write, bodyOffset, fd](int error) {
490             LOG(NetworkCacheStorage, "(NetworkProcess) write complete error=%d", error);
491             if (error) {
492                 if (m_contentsFilter.mayContain(write.entry.key.shortHash()))
493                     m_contentsFilter.remove(write.entry.key.shortHash());
494             }
495             size_t bodySize = write.entry.body.size();
496             size_t totalSize = bodyOffset + bodySize;
497
498             m_approximateSize += totalSize;
499
500             bool shouldMapBody = !error && bodySize >= pageSize();
501             auto bodyMap = shouldMapBody ? mapFile(fd, bodyOffset, bodySize) : Data();
502
503             write.completionHandler(!error, bodyMap);
504
505             ASSERT(m_activeWriteOperations.contains(&write));
506             m_activeWriteOperations.remove(&write);
507             dispatchPendingWriteOperations();
508         });
509     });
510
511     shrinkIfNeeded();
512 }
513
514 void Storage::dispatchHeaderWriteOperation(const WriteOperation& write)
515 {
516     ASSERT(RunLoop::isMain());
517     ASSERT(write.existingEntry);
518     ASSERT(m_activeWriteOperations.contains(&write));
519     ASSERT(cacheMayContain(write.entry.key.shortHash()));
520
521     // Try to update the header of an existing entry.
522     StringCapture cachePathCapture(m_directoryPath);
523     backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
524         auto headerData = encodeEntryHeader(write.entry);
525         auto existingHeaderData = encodeEntryHeader(write.existingEntry.value());
526
527         bool pageRoundedHeaderSizeChanged = headerData.size() != existingHeaderData.size();
528         if (pageRoundedHeaderSizeChanged) {
529             LOG(NetworkCacheStorage, "(NetworkProcess) page-rounded header size changed, storing full entry");
530             RunLoop::main().dispatch([this, &write] {
531                 dispatchFullWriteOperation(write);
532             });
533             return;
534         }
535
536         auto channel = openFileForKey(write.entry.key, IOChannel::Type::Write, cachePathCapture.string());
537         channel->write(0, headerData, [this, &write](int error) {
538             LOG(NetworkCacheStorage, "(NetworkProcess) update complete error=%d", error);
539
540             if (error)
541                 remove(write.entry.key);
542
543             write.completionHandler(!error, { });
544
545             ASSERT(m_activeWriteOperations.contains(&write));
546             m_activeWriteOperations.remove(&write);
547             dispatchPendingWriteOperations();
548         });
549     });
550 }
551
552 void Storage::setMaximumSize(size_t size)
553 {
554     ASSERT(RunLoop::isMain());
555     m_maximumSize = size;
556
557     shrinkIfNeeded();
558 }
559
560 void Storage::clear()
561 {
562     ASSERT(RunLoop::isMain());
563     LOG(NetworkCacheStorage, "(NetworkProcess) clearing cache");
564
565     m_contentsFilter.clear();
566     m_approximateSize = 0;
567
568     StringCapture directoryPathCapture(m_directoryPath);
569
570     ioQueue().dispatch([directoryPathCapture] {
571         String directoryPath = directoryPathCapture.string();
572         traverseDirectory(directoryPath, DT_DIR, [&directoryPath](const String& subdirName) {
573             String subdirPath = WebCore::pathByAppendingComponent(directoryPath, subdirName);
574             traverseDirectory(subdirPath, DT_REG, [&subdirPath](const String& fileName) {
575                 WebCore::deleteFile(WebCore::pathByAppendingComponent(subdirPath, fileName));
576             });
577             WebCore::deleteEmptyDirectory(subdirPath);
578         });
579     });
580 }
581
582 static double deletionProbability(FileTimes times)
583 {
584     static const double maximumProbability { 0.33 };
585
586     using namespace std::chrono;
587     auto age = system_clock::now() - times.creation;
588     auto accessAge = times.access - times.creation;
589
590     // For sanity.
591     if (age <= seconds::zero() || accessAge < seconds::zero() || accessAge > age)
592         return maximumProbability;
593
594     // We like old entries that have been accessed recently.
595     auto relativeValue = duration<double>(accessAge) / age;
596
597     // Adjust a bit so the most valuable entries don't get deleted at all.
598     auto effectiveValue = std::min(1.1 * relativeValue, 1.);
599
600     return (1 - effectiveValue) * maximumProbability;
601 }
602
603 void Storage::shrinkIfNeeded()
604 {
605     ASSERT(RunLoop::isMain());
606
607     if (m_approximateSize <= m_maximumSize)
608         return;
609     if (m_shrinkInProgress)
610         return;
611     m_shrinkInProgress = true;
612
613     LOG(NetworkCacheStorage, "(NetworkProcess) shrinking cache approximateSize=%zu, m_maximumSize=%zu", static_cast<size_t>(m_approximateSize), m_maximumSize);
614
615     m_approximateSize = 0;
616
617     StringCapture cachePathCapture(m_directoryPath);
618     backgroundIOQueue().dispatch([this, cachePathCapture] {
619         String cachePath = cachePathCapture.string();
620         traverseCacheFiles(cachePath, [this](const String& fileName, const String& partitionPath) {
621             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
622
623             auto times = fileTimes(filePath);
624             auto probability = deletionProbability(times);
625             bool shouldDelete = randomNumber() < probability;
626
627             LOG(NetworkCacheStorage, "Deletion probability=%f shouldDelete=%d", probability, shouldDelete);
628
629             if (!shouldDelete) {
630                 long long fileSize = 0;
631                 WebCore::getFileSize(filePath, fileSize);
632                 m_approximateSize += fileSize;
633                 return;
634             }
635
636             WebCore::deleteFile(filePath);
637             Key::HashType hash;
638             if (!Key::stringToHash(fileName, hash))
639                 return;
640             unsigned shortHash = Key::toShortHash(hash);
641             RunLoop::main().dispatch([this, shortHash] {
642                 if (m_contentsFilter.mayContain(shortHash))
643                     m_contentsFilter.remove(shortHash);
644             });
645         });
646
647         // Let system figure out if they are really empty.
648         traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
649             auto partitionPath = WebCore::pathByAppendingComponent(cachePath, subdirName);
650             WebCore::deleteEmptyDirectory(partitionPath);
651         });
652
653         m_shrinkInProgress = false;
654
655         LOG(NetworkCacheStorage, "(NetworkProcess) cache shrink completed approximateSize=%zu", static_cast<size_t>(m_approximateSize));
656     });
657 }
658
659 void Storage::deleteOldVersions()
660 {
661     // Delete V1 cache.
662     StringCapture cachePathCapture(m_baseDirectoryPath);
663     backgroundIOQueue().dispatch([cachePathCapture] {
664         String cachePath = cachePathCapture.string();
665         traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
666             if (subdirName.startsWith(versionDirectoryPrefix))
667                 return;
668             String partitionPath = WebCore::pathByAppendingComponent(cachePath, subdirName);
669             traverseDirectory(partitionPath, DT_REG, [&partitionPath](const String& fileName) {
670                 WebCore::deleteFile(WebCore::pathByAppendingComponent(partitionPath, fileName));
671             });
672             WebCore::deleteEmptyDirectory(partitionPath);
673         });
674     });
675 }
676
677 }
678 }
679
680 #endif