6916d02872e829e6ef735e17bdf9a0df8626b5b0
[WebKit-https.git] / Source / WebKit2 / NetworkProcess / cache / NetworkCacheStorage.cpp
1 /*
2  * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "NetworkCacheStorage.h"
28
29 #if ENABLE(NETWORK_CACHE)
30
31 #include "Logging.h"
32 #include "NetworkCacheCoders.h"
33 #include "NetworkCacheFileSystemPosix.h"
34 #include "NetworkCacheIOChannel.h"
35 #include <wtf/PageBlock.h>
36 #include <wtf/RandomNumber.h>
37 #include <wtf/RunLoop.h>
38 #include <wtf/text/CString.h>
39 #include <wtf/text/StringBuilder.h>
40
41 namespace WebKit {
42 namespace NetworkCache {
43
44 static const char networkCacheSubdirectory[] = "WebKitCache";
45 static const char versionDirectoryPrefix[] = "Version ";
46
47 static double computeRecordWorth(FileTimes);
48
49 std::unique_ptr<Storage> Storage::open(const String& cachePath)
50 {
51     ASSERT(RunLoop::isMain());
52
53     String networkCachePath = WebCore::pathByAppendingComponent(cachePath, networkCacheSubdirectory);
54     if (!WebCore::makeAllDirectories(networkCachePath))
55         return nullptr;
56     return std::unique_ptr<Storage>(new Storage(networkCachePath));
57 }
58
59 static String makeVersionedDirectoryPath(const String& baseDirectoryPath)
60 {
61     String versionSubdirectory = versionDirectoryPrefix + String::number(Storage::version);
62     return WebCore::pathByAppendingComponent(baseDirectoryPath, versionSubdirectory);
63 }
64
65 Storage::Storage(const String& baseDirectoryPath)
66     : m_baseDirectoryPath(baseDirectoryPath)
67     , m_directoryPath(makeVersionedDirectoryPath(baseDirectoryPath))
68     , m_ioQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage", WorkQueue::Type::Concurrent))
69     , m_backgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.background", WorkQueue::Type::Concurrent, WorkQueue::QOS::Background))
70     , m_serialBackgroundIOQueue(WorkQueue::create("com.apple.WebKit.Cache.Storage.serialBackground", WorkQueue::Type::Serial, WorkQueue::QOS::Background))
71 {
72     deleteOldVersions();
73     initialize();
74 }
75
76 void Storage::initialize()
77 {
78     ASSERT(RunLoop::isMain());
79
80     StringCapture cachePathCapture(m_directoryPath);
81
82     backgroundIOQueue().dispatch([this, cachePathCapture] {
83         String cachePath = cachePathCapture.string();
84         traverseCacheFiles(cachePath, [this](const String& fileName, const String& partitionPath) {
85             Key::HashType hash;
86             if (!Key::stringToHash(fileName, hash))
87                 return;
88             unsigned shortHash = Key::toShortHash(hash);
89             RunLoop::main().dispatch([this, shortHash] {
90                 m_contentsFilter.add(shortHash);
91             });
92             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
93             long long fileSize = 0;
94             WebCore::getFileSize(filePath, fileSize);
95             m_approximateSize += fileSize;
96         });
97         m_hasPopulatedContentsFilter = true;
98     });
99 }
100
101 static String directoryPathForKey(const Key& key, const String& cachePath)
102 {
103     ASSERT(!key.partition().isEmpty());
104     return WebCore::pathByAppendingComponent(cachePath, key.partition());
105 }
106
107 static String fileNameForKey(const Key& key)
108 {
109     return key.hashAsString();
110 }
111
112 static String filePathForKey(const Key& key, const String& cachePath)
113 {
114     return WebCore::pathByAppendingComponent(directoryPathForKey(key, cachePath), fileNameForKey(key));
115 }
116
117 static Ref<IOChannel> openFileForKey(const Key& key, IOChannel::Type type, const String& cachePath)
118 {
119     auto directoryPath = directoryPathForKey(key, cachePath);
120     auto filePath = WebCore::pathByAppendingComponent(directoryPath, fileNameForKey(key));
121     if (type == IOChannel::Type::Create)
122         WebCore::makeAllDirectories(directoryPath);
123     return IOChannel::open(filePath, type);
124 }
125
126 static unsigned hashData(const Data& data)
127 {
128     StringHasher hasher;
129     data.apply([&hasher](const uint8_t* data, size_t size) {
130         hasher.addCharacters(data, size);
131         return true;
132     });
133     return hasher.hash();
134 }
135
136 struct RecordMetaData {
137     RecordMetaData() { }
138     explicit RecordMetaData(const Key& key)
139         : cacheStorageVersion(Storage::version)
140         , key(key)
141     { }
142
143     unsigned cacheStorageVersion;
144     Key key;
145     // FIXME: Add encoder/decoder for time_point.
146     std::chrono::milliseconds epochRelativeTimeStamp;
147     unsigned headerChecksum;
148     uint64_t headerOffset;
149     uint64_t headerSize;
150     unsigned bodyChecksum;
151     uint64_t bodyOffset;
152     uint64_t bodySize;
153 };
154
155 static bool decodeRecordMetaData(RecordMetaData& metaData, const Data& fileData)
156 {
157     bool success = false;
158     fileData.apply([&metaData, &success](const uint8_t* data, size_t size) {
159         Decoder decoder(data, size);
160         if (!decoder.decode(metaData.cacheStorageVersion))
161             return false;
162         if (!decoder.decode(metaData.key))
163             return false;
164         if (!decoder.decode(metaData.epochRelativeTimeStamp))
165             return false;
166         if (!decoder.decode(metaData.headerChecksum))
167             return false;
168         if (!decoder.decode(metaData.headerSize))
169             return false;
170         if (!decoder.decode(metaData.bodyChecksum))
171             return false;
172         if (!decoder.decode(metaData.bodySize))
173             return false;
174         if (!decoder.verifyChecksum())
175             return false;
176         metaData.headerOffset = decoder.currentOffset();
177         metaData.bodyOffset = WTF::roundUpToMultipleOf(pageSize(), metaData.headerOffset + metaData.headerSize);
178         success = true;
179         return false;
180     });
181     return success;
182 }
183
184 static bool decodeRecordHeader(const Data& fileData, RecordMetaData& metaData, Data& data)
185 {
186     if (!decodeRecordMetaData(metaData, fileData)) {
187         LOG(NetworkCacheStorage, "(NetworkProcess) meta data decode failure");
188         return false;
189     }
190
191     if (metaData.cacheStorageVersion != Storage::version) {
192         LOG(NetworkCacheStorage, "(NetworkProcess) version mismatch");
193         return false;
194     }
195     if (metaData.headerOffset + metaData.headerSize > metaData.bodyOffset) {
196         LOG(NetworkCacheStorage, "(NetworkProcess) body offset mismatch");
197         return false;
198     }
199
200     auto headerData = fileData.subrange(metaData.headerOffset, metaData.headerSize);
201     if (metaData.headerChecksum != hashData(headerData)) {
202         LOG(NetworkCacheStorage, "(NetworkProcess) header checksum mismatch");
203         return false;
204     }
205     data = { headerData };
206     return true;
207 }
208
209 static std::unique_ptr<Storage::Record> decodeRecord(const Data& fileData, int fd, const Key& key)
210 {
211     RecordMetaData metaData;
212     Data headerData;
213     if (!decodeRecordHeader(fileData, metaData, headerData))
214         return nullptr;
215
216     if (metaData.key != key)
217         return nullptr;
218
219     // Sanity check against time stamps in future.
220     auto timeStamp = std::chrono::system_clock::time_point(metaData.epochRelativeTimeStamp);
221     if (timeStamp > std::chrono::system_clock::now())
222         return nullptr;
223
224     Data bodyData;
225     if (metaData.bodySize) {
226         if (metaData.bodyOffset + metaData.bodySize != fileData.size())
227             return nullptr;
228
229         bodyData = mapFile(fd, metaData.bodyOffset, metaData.bodySize);
230         if (bodyData.isNull()) {
231             LOG(NetworkCacheStorage, "(NetworkProcess) map failed");
232             return nullptr;
233         }
234
235         if (metaData.bodyChecksum != hashData(bodyData)) {
236             LOG(NetworkCacheStorage, "(NetworkProcess) data checksum mismatch");
237             return nullptr;
238         }
239     }
240
241     return std::make_unique<Storage::Record>(Storage::Record {
242         metaData.key,
243         timeStamp,
244         headerData,
245         bodyData
246     });
247 }
248
249 static Data encodeRecordMetaData(const RecordMetaData& metaData)
250 {
251     Encoder encoder;
252
253     encoder << metaData.cacheStorageVersion;
254     encoder << metaData.key;
255     encoder << metaData.epochRelativeTimeStamp;
256     encoder << metaData.headerChecksum;
257     encoder << metaData.headerSize;
258     encoder << metaData.bodyChecksum;
259     encoder << metaData.bodySize;
260
261     encoder.encodeChecksum();
262
263     return Data(encoder.buffer(), encoder.bufferSize());
264 }
265
266 static Data encodeRecordHeader(const Storage::Record& record)
267 {
268     RecordMetaData metaData(record.key);
269     metaData.epochRelativeTimeStamp = std::chrono::duration_cast<std::chrono::milliseconds>(record.timeStamp.time_since_epoch());
270     metaData.headerChecksum = hashData(record.header);
271     metaData.headerSize = record.header.size();
272     metaData.bodyChecksum = hashData(record.body);
273     metaData.bodySize = record.body.size();
274
275     auto encodedMetaData = encodeRecordMetaData(metaData);
276     auto headerData = concatenate(encodedMetaData, record.header);
277     if (!record.body.size())
278         return { headerData };
279
280     size_t dataOffset = WTF::roundUpToMultipleOf(pageSize(), headerData.size());
281     Vector<uint8_t, 4096> filler(dataOffset - headerData.size(), 0);
282     Data alignmentData(filler.data(), filler.size());
283
284     return concatenate(headerData, alignmentData);
285 }
286
287 void Storage::remove(const Key& key)
288 {
289     ASSERT(RunLoop::isMain());
290
291     // For simplicity we don't reduce m_approximateSize on removals.
292     // The next cache shrink will update the size.
293
294     if (m_contentsFilter.mayContain(key.shortHash()))
295         m_contentsFilter.remove(key.shortHash());
296
297     StringCapture filePathCapture(filePathForKey(key, m_directoryPath));
298     serialBackgroundIOQueue().dispatch([this, filePathCapture] {
299         WebCore::deleteFile(filePathCapture.string());
300     });
301 }
302
303 void Storage::updateFileModificationTime(IOChannel& channel)
304 {
305     StringCapture filePathCapture(channel.path());
306     serialBackgroundIOQueue().dispatch([filePathCapture] {
307         updateFileModificationTimeIfNeeded(filePathCapture.string());
308     });
309 }
310
311 void Storage::dispatchReadOperation(const ReadOperation& read)
312 {
313     ASSERT(RunLoop::isMain());
314     ASSERT(m_activeReadOperations.contains(&read));
315
316     StringCapture cachePathCapture(m_directoryPath);
317     ioQueue().dispatch([this, &read, cachePathCapture] {
318         RefPtr<IOChannel> channel = openFileForKey(read.key, IOChannel::Type::Read, cachePathCapture.string());
319         channel->read(0, std::numeric_limits<size_t>::max(), [this, channel, &read](Data& fileData, int error) {
320             if (error) {
321                 remove(read.key);
322                 read.completionHandler(nullptr);
323             } else {
324                 auto record = decodeRecord(fileData, channel->fileDescriptor(), read.key);
325                 bool success = read.completionHandler(WTF::move(record));
326                 if (success)
327                     updateFileModificationTime(*channel);
328                 else
329                     remove(read.key);
330             }
331
332             ASSERT(m_activeReadOperations.contains(&read));
333             m_activeReadOperations.remove(&read);
334             dispatchPendingReadOperations();
335
336             LOG(NetworkCacheStorage, "(NetworkProcess) read complete error=%d", error);
337         });
338     });
339 }
340
341 void Storage::dispatchPendingReadOperations()
342 {
343     ASSERT(RunLoop::isMain());
344
345     const int maximumActiveReadOperationCount = 5;
346
347     for (int priority = maximumRetrievePriority; priority >= 0; --priority) {
348         if (m_activeReadOperations.size() > maximumActiveReadOperationCount) {
349             LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel retrieves");
350             return;
351         }
352         auto& pendingRetrieveQueue = m_pendingReadOperationsByPriority[priority];
353         if (pendingRetrieveQueue.isEmpty())
354             continue;
355         auto readOperation = pendingRetrieveQueue.takeFirst();
356         auto& read = *readOperation;
357         m_activeReadOperations.add(WTF::move(readOperation));
358         dispatchReadOperation(read);
359     }
360 }
361
362 template <class T> bool retrieveFromMemory(const T& operations, const Key& key, Storage::RetrieveCompletionHandler& completionHandler)
363 {
364     for (auto& operation : operations) {
365         if (operation->record.key == key) {
366             LOG(NetworkCacheStorage, "(NetworkProcess) found write operation in progress");
367             auto record = operation->record;
368             RunLoop::main().dispatch([record, completionHandler] {
369                 completionHandler(std::make_unique<Storage::Record>(record));
370             });
371             return true;
372         }
373     }
374     return false;
375 }
376
377 void Storage::retrieve(const Key& key, unsigned priority, RetrieveCompletionHandler&& completionHandler)
378 {
379     ASSERT(RunLoop::isMain());
380     ASSERT(priority <= maximumRetrievePriority);
381     ASSERT(!key.isNull());
382
383     if (!m_maximumSize) {
384         completionHandler(nullptr);
385         return;
386     }
387
388     if (!cacheMayContain(key.shortHash())) {
389         completionHandler(nullptr);
390         return;
391     }
392
393     if (retrieveFromMemory(m_pendingWriteOperations, key, completionHandler))
394         return;
395     if (retrieveFromMemory(m_activeWriteOperations, key, completionHandler))
396         return;
397
398     m_pendingReadOperationsByPriority[priority].append(new ReadOperation { key, WTF::move(completionHandler) });
399     dispatchPendingReadOperations();
400 }
401
402 void Storage::store(const Record& record, StoreCompletionHandler&& completionHandler)
403 {
404     ASSERT(RunLoop::isMain());
405     ASSERT(!record.key.isNull());
406
407     if (!m_maximumSize) {
408         completionHandler(false, { });
409         return;
410     }
411
412     m_pendingWriteOperations.append(new WriteOperation { record, { }, WTF::move(completionHandler) });
413
414     // Add key to the filter already here as we do lookups from the pending operations too.
415     m_contentsFilter.add(record.key.shortHash());
416
417     dispatchPendingWriteOperations();
418 }
419
420 void Storage::update(const Record& updateRecord, const Record& existingRecord, StoreCompletionHandler&& completionHandler)
421 {
422     ASSERT(RunLoop::isMain());
423     ASSERT(!existingRecord.key.isNull());
424     ASSERT(existingRecord.key == updateRecord.key);
425
426     if (!m_maximumSize) {
427         completionHandler(false, { });
428         return;
429     }
430
431     m_pendingWriteOperations.append(new WriteOperation { updateRecord, existingRecord, WTF::move(completionHandler) });
432
433     dispatchPendingWriteOperations();
434 }
435
436 void Storage::traverse(TraverseFlags flags, std::function<void (const Record*, const RecordInfo&)>&& traverseHandler)
437 {
438     StringCapture cachePathCapture(m_directoryPath);
439     ioQueue().dispatch([this, flags, cachePathCapture, traverseHandler] {
440         String cachePath = cachePathCapture.string();
441         traverseCacheFiles(cachePath, [this, flags, &traverseHandler](const String& fileName, const String& partitionPath) {
442             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
443
444             RecordInfo info;
445             if (flags & TraverseFlag::ComputeWorth)
446                 info.worth = computeRecordWorth(fileTimes(filePath));
447
448             auto channel = IOChannel::open(filePath, IOChannel::Type::Read);
449             const size_t headerReadSize = 16 << 10;
450             // FIXME: Traversal is slower than it should be due to lack of parallelism.
451             channel->readSync(0, headerReadSize, [this, &traverseHandler, &info](Data& fileData, int) {
452                 RecordMetaData metaData;
453                 Data headerData;
454                 if (decodeRecordHeader(fileData, metaData, headerData)) {
455                     Record record { metaData.key, std::chrono::system_clock::time_point(metaData.epochRelativeTimeStamp), headerData, { } };
456                     info.bodySize = metaData.bodySize;
457                     traverseHandler(&record, info);
458                 }
459             });
460         });
461         RunLoop::main().dispatch([this, traverseHandler] {
462             traverseHandler(nullptr, { });
463         });
464     });
465 }
466
467 void Storage::dispatchPendingWriteOperations()
468 {
469     ASSERT(RunLoop::isMain());
470
471     const int maximumActiveWriteOperationCount { 3 };
472
473     while (!m_pendingWriteOperations.isEmpty()) {
474         if (m_activeWriteOperations.size() >= maximumActiveWriteOperationCount) {
475             LOG(NetworkCacheStorage, "(NetworkProcess) limiting parallel writes");
476             return;
477         }
478         auto writeOperation = m_pendingWriteOperations.takeFirst();
479         auto& write = *writeOperation;
480         m_activeWriteOperations.add(WTF::move(writeOperation));
481
482         if (write.existingRecord && cacheMayContain(write.record.key.shortHash())) {
483             dispatchHeaderWriteOperation(write);
484             continue;
485         }
486         dispatchFullWriteOperation(write);
487     }
488 }
489
490 void Storage::dispatchFullWriteOperation(const WriteOperation& write)
491 {
492     ASSERT(RunLoop::isMain());
493     ASSERT(m_activeWriteOperations.contains(&write));
494
495     if (!m_contentsFilter.mayContain(write.record.key.shortHash()))
496         m_contentsFilter.add(write.record.key.shortHash());
497
498     StringCapture cachePathCapture(m_directoryPath);
499     backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
500         auto encodedHeader = encodeRecordHeader(write.record);
501         auto headerAndBodyData = concatenate(encodedHeader, write.record.body);
502
503         auto channel = openFileForKey(write.record.key, IOChannel::Type::Create, cachePathCapture.string());
504         int fd = channel->fileDescriptor();
505         size_t bodyOffset = encodedHeader.size();
506
507         channel->write(0, headerAndBodyData, [this, &write, bodyOffset, fd](int error) {
508             LOG(NetworkCacheStorage, "(NetworkProcess) write complete error=%d", error);
509             if (error) {
510                 if (m_contentsFilter.mayContain(write.record.key.shortHash()))
511                     m_contentsFilter.remove(write.record.key.shortHash());
512             }
513             size_t bodySize = write.record.body.size();
514             size_t totalSize = bodyOffset + bodySize;
515
516             m_approximateSize += totalSize;
517
518             bool shouldMapBody = !error && bodySize >= pageSize();
519             auto bodyMap = shouldMapBody ? mapFile(fd, bodyOffset, bodySize) : Data();
520
521             write.completionHandler(!error, bodyMap);
522
523             ASSERT(m_activeWriteOperations.contains(&write));
524             m_activeWriteOperations.remove(&write);
525             dispatchPendingWriteOperations();
526         });
527     });
528
529     shrinkIfNeeded();
530 }
531
532 void Storage::dispatchHeaderWriteOperation(const WriteOperation& write)
533 {
534     ASSERT(RunLoop::isMain());
535     ASSERT(write.existingRecord);
536     ASSERT(m_activeWriteOperations.contains(&write));
537     ASSERT(cacheMayContain(write.record.key.shortHash()));
538
539     // Try to update the header of an existing entry.
540     StringCapture cachePathCapture(m_directoryPath);
541     backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
542         auto headerData = encodeRecordHeader(write.record);
543         auto existingHeaderData = encodeRecordHeader(write.existingRecord.value());
544
545         bool pageRoundedHeaderSizeChanged = headerData.size() != existingHeaderData.size();
546         if (pageRoundedHeaderSizeChanged) {
547             LOG(NetworkCacheStorage, "(NetworkProcess) page-rounded header size changed, storing full entry");
548             RunLoop::main().dispatch([this, &write] {
549                 dispatchFullWriteOperation(write);
550             });
551             return;
552         }
553
554         auto channel = openFileForKey(write.record.key, IOChannel::Type::Write, cachePathCapture.string());
555         channel->write(0, headerData, [this, &write](int error) {
556             LOG(NetworkCacheStorage, "(NetworkProcess) update complete error=%d", error);
557
558             if (error)
559                 remove(write.record.key);
560
561             write.completionHandler(!error, { });
562
563             ASSERT(m_activeWriteOperations.contains(&write));
564             m_activeWriteOperations.remove(&write);
565             dispatchPendingWriteOperations();
566         });
567     });
568 }
569
570 void Storage::setMaximumSize(size_t size)
571 {
572     ASSERT(RunLoop::isMain());
573     m_maximumSize = size;
574
575     shrinkIfNeeded();
576 }
577
578 void Storage::clear()
579 {
580     ASSERT(RunLoop::isMain());
581     LOG(NetworkCacheStorage, "(NetworkProcess) clearing cache");
582
583     m_contentsFilter.clear();
584     m_approximateSize = 0;
585
586     StringCapture directoryPathCapture(m_directoryPath);
587
588     ioQueue().dispatch([directoryPathCapture] {
589         String directoryPath = directoryPathCapture.string();
590         traverseDirectory(directoryPath, DT_DIR, [&directoryPath](const String& subdirName) {
591             String subdirPath = WebCore::pathByAppendingComponent(directoryPath, subdirName);
592             traverseDirectory(subdirPath, DT_REG, [&subdirPath](const String& fileName) {
593                 WebCore::deleteFile(WebCore::pathByAppendingComponent(subdirPath, fileName));
594             });
595             WebCore::deleteEmptyDirectory(subdirPath);
596         });
597     });
598 }
599
600 static double computeRecordWorth(FileTimes times)
601 {
602     using namespace std::chrono;
603     auto age = system_clock::now() - times.creation;
604     // File modification time is updated manually on cache read. We don't use access time since OS may update it automatically.
605     auto accessAge = times.modification - times.creation;
606
607     // For sanity.
608     if (age <= 0_s || accessAge < 0_s || accessAge > age)
609         return 0;
610
611     // We like old entries that have been accessed recently.
612     return duration<double>(accessAge) / age;
613 }
614
615
616 static double deletionProbability(FileTimes times)
617 {
618     static const double maximumProbability { 0.33 };
619
620     auto worth = computeRecordWorth(times);
621
622     // Adjust a bit so the most valuable entries don't get deleted at all.
623     auto effectiveWorth = std::min(1.1 * worth, 1.);
624
625     return (1 - effectiveWorth) * maximumProbability;
626 }
627
628 void Storage::shrinkIfNeeded()
629 {
630     ASSERT(RunLoop::isMain());
631
632     if (m_approximateSize <= m_maximumSize)
633         return;
634     if (m_shrinkInProgress)
635         return;
636     m_shrinkInProgress = true;
637
638     LOG(NetworkCacheStorage, "(NetworkProcess) shrinking cache approximateSize=%zu, m_maximumSize=%zu", static_cast<size_t>(m_approximateSize), m_maximumSize);
639
640     m_approximateSize = 0;
641
642     StringCapture cachePathCapture(m_directoryPath);
643     backgroundIOQueue().dispatch([this, cachePathCapture] {
644         String cachePath = cachePathCapture.string();
645         traverseCacheFiles(cachePath, [this](const String& fileName, const String& partitionPath) {
646             auto filePath = WebCore::pathByAppendingComponent(partitionPath, fileName);
647
648             auto times = fileTimes(filePath);
649             auto probability = deletionProbability(times);
650             bool shouldDelete = randomNumber() < probability;
651
652             LOG(NetworkCacheStorage, "Deletion probability=%f shouldDelete=%d", probability, shouldDelete);
653
654             if (!shouldDelete) {
655                 long long fileSize = 0;
656                 WebCore::getFileSize(filePath, fileSize);
657                 m_approximateSize += fileSize;
658                 return;
659             }
660
661             WebCore::deleteFile(filePath);
662             Key::HashType hash;
663             if (!Key::stringToHash(fileName, hash))
664                 return;
665             unsigned shortHash = Key::toShortHash(hash);
666             RunLoop::main().dispatch([this, shortHash] {
667                 if (m_contentsFilter.mayContain(shortHash))
668                     m_contentsFilter.remove(shortHash);
669             });
670         });
671
672         // Let system figure out if they are really empty.
673         traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
674             auto partitionPath = WebCore::pathByAppendingComponent(cachePath, subdirName);
675             WebCore::deleteEmptyDirectory(partitionPath);
676         });
677
678         m_shrinkInProgress = false;
679
680         LOG(NetworkCacheStorage, "(NetworkProcess) cache shrink completed approximateSize=%zu", static_cast<size_t>(m_approximateSize));
681     });
682 }
683
684 void Storage::deleteOldVersions()
685 {
686     // Delete V1 cache.
687     StringCapture cachePathCapture(m_baseDirectoryPath);
688     backgroundIOQueue().dispatch([cachePathCapture] {
689         String cachePath = cachePathCapture.string();
690         traverseDirectory(cachePath, DT_DIR, [&cachePath](const String& subdirName) {
691             if (subdirName.startsWith(versionDirectoryPrefix))
692                 return;
693             String partitionPath = WebCore::pathByAppendingComponent(cachePath, subdirName);
694             traverseDirectory(partitionPath, DT_REG, [&partitionPath](const String& fileName) {
695                 WebCore::deleteFile(WebCore::pathByAppendingComponent(partitionPath, fileName));
696             });
697             WebCore::deleteEmptyDirectory(partitionPath);
698         });
699     });
700 }
701
702 }
703 }
704
705 #endif