[JSC] Rename LargeAllocation to PreciseAllocation
[WebKit-https.git] / Source / JavaScriptCore / heap / CompleteSubspace.cpp
1 /*
2  * Copyright (C) 2017-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "Subspace.h"
28
29 #include "AlignedMemoryAllocator.h"
30 #include "AllocatorInlines.h"
31 #include "BlockDirectoryInlines.h"
32 #include "JSCInlines.h"
33 #include "LocalAllocatorInlines.h"
34 #include "MarkedBlockInlines.h"
35 #include "PreventCollectionScope.h"
36 #include "SubspaceInlines.h"
37
38 namespace JSC {
39
40 CompleteSubspace::CompleteSubspace(CString name, Heap& heap, HeapCellType* heapCellType, AlignedMemoryAllocator* alignedMemoryAllocator)
41     : Subspace(name, heap)
42 {
43     initialize(heapCellType, alignedMemoryAllocator);
44 }
45
46 CompleteSubspace::~CompleteSubspace()
47 {
48 }
49
50 Allocator CompleteSubspace::allocatorFor(size_t size, AllocatorForMode mode)
51 {
52     return allocatorForNonVirtual(size, mode);
53 }
54
55 void* CompleteSubspace::allocate(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
56 {
57     return allocateNonVirtual(vm, size, deferralContext, failureMode);
58 }
59
60 Allocator CompleteSubspace::allocatorForSlow(size_t size)
61 {
62     size_t index = MarkedSpace::sizeClassToIndex(size);
63     size_t sizeClass = MarkedSpace::s_sizeClassForSizeStep[index];
64     if (!sizeClass)
65         return Allocator();
66     
67     // This is written in such a way that it's OK for the JIT threads to end up here if they want
68     // to generate code that uses some allocator that hadn't been used yet. Note that a possibly-
69     // just-as-good solution would be to return null if we're in the JIT since the JIT treats null
70     // allocator as "please always take the slow path". But, that could lead to performance
71     // surprises and the algorithm here is pretty easy. Only this code has to hold the lock, to
72     // prevent simultaneously BlockDirectory creations from multiple threads. This code ensures
73     // that any "forEachAllocator" traversals will only see this allocator after it's initialized
74     // enough: it will have 
75     auto locker = holdLock(m_space.directoryLock());
76     if (Allocator allocator = m_allocatorForSizeStep[index])
77         return allocator;
78
79     if (false)
80         dataLog("Creating BlockDirectory/LocalAllocator for ", m_name, ", ", attributes(), ", ", sizeClass, ".\n");
81     
82     std::unique_ptr<BlockDirectory> uniqueDirectory = makeUnique<BlockDirectory>(m_space.heap(), sizeClass);
83     BlockDirectory* directory = uniqueDirectory.get();
84     m_directories.append(WTFMove(uniqueDirectory));
85     
86     directory->setSubspace(this);
87     m_space.addBlockDirectory(locker, directory);
88     
89     std::unique_ptr<LocalAllocator> uniqueLocalAllocator =
90         makeUnique<LocalAllocator>(directory);
91     LocalAllocator* localAllocator = uniqueLocalAllocator.get();
92     m_localAllocators.append(WTFMove(uniqueLocalAllocator));
93     
94     Allocator allocator(localAllocator);
95     
96     index = MarkedSpace::sizeClassToIndex(sizeClass);
97     for (;;) {
98         if (MarkedSpace::s_sizeClassForSizeStep[index] != sizeClass)
99             break;
100
101         m_allocatorForSizeStep[index] = allocator;
102         
103         if (!index--)
104             break;
105     }
106     
107     directory->setNextDirectoryInSubspace(m_firstDirectory);
108     m_alignedMemoryAllocator->registerDirectory(directory);
109     WTF::storeStoreFence();
110     m_firstDirectory = directory;
111     return allocator;
112 }
113
114 void* CompleteSubspace::allocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
115 {
116     void* result = tryAllocateSlow(vm, size, deferralContext);
117     if (failureMode == AllocationFailureMode::Assert)
118         RELEASE_ASSERT(result);
119     return result;
120 }
121
122 void* CompleteSubspace::tryAllocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext)
123 {
124     if (validateDFGDoesGC)
125         RELEASE_ASSERT(vm.heap.expectDoesGC());
126
127     sanitizeStackForVM(vm);
128     
129     if (Allocator allocator = allocatorFor(size, AllocatorForMode::EnsureAllocator))
130         return allocator.allocate(deferralContext, AllocationFailureMode::ReturnNull);
131     
132     if (size <= Options::preciseAllocationCutoff()
133         && size <= MarkedSpace::largeCutoff) {
134         dataLog("FATAL: attampting to allocate small object using large allocation.\n");
135         dataLog("Requested allocation size: ", size, "\n");
136         RELEASE_ASSERT_NOT_REACHED();
137     }
138     
139     vm.heap.collectIfNecessaryOrDefer(deferralContext);
140     
141     size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
142     PreciseAllocation* allocation = PreciseAllocation::tryCreate(vm.heap, size, this, m_space.m_preciseAllocations.size());
143     if (!allocation)
144         return nullptr;
145     
146     m_space.m_preciseAllocations.append(allocation);
147     if (auto* set = m_space.preciseAllocationSet())
148         set->add(allocation->cell());
149     ASSERT(allocation->indexInSpace() == m_space.m_preciseAllocations.size() - 1);
150     vm.heap.didAllocate(size);
151     m_space.m_capacity += size;
152     
153     m_preciseAllocations.append(allocation);
154         
155     return allocation->cell();
156 }
157
158 void* CompleteSubspace::reallocatePreciseAllocationNonVirtual(VM& vm, HeapCell* oldCell, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
159 {
160     if (validateDFGDoesGC)
161         RELEASE_ASSERT(vm.heap.expectDoesGC());
162
163     // The following conditions are met in Butterfly for example.
164     ASSERT(oldCell->isPreciseAllocation());
165
166     PreciseAllocation* oldAllocation = &oldCell->preciseAllocation();
167     ASSERT(oldAllocation->cellSize() <= size);
168     ASSERT(oldAllocation->weakSet().isTriviallyDestructible());
169     ASSERT(oldAllocation->attributes().destruction == DoesNotNeedDestruction);
170     ASSERT(oldAllocation->attributes().cellKind == HeapCell::Auxiliary);
171     ASSERT(size > MarkedSpace::largeCutoff);
172
173     sanitizeStackForVM(vm);
174
175     if (size <= Options::preciseAllocationCutoff()
176         && size <= MarkedSpace::largeCutoff) {
177         dataLog("FATAL: attampting to allocate small object using large allocation.\n");
178         dataLog("Requested allocation size: ", size, "\n");
179         RELEASE_ASSERT_NOT_REACHED();
180     }
181
182     vm.heap.collectIfNecessaryOrDefer(deferralContext);
183
184     size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
185     size_t difference = size - oldAllocation->cellSize();
186     unsigned oldIndexInSpace = oldAllocation->indexInSpace();
187     if (oldAllocation->isOnList())
188         oldAllocation->remove();
189
190     PreciseAllocation* allocation = oldAllocation->tryReallocate(size, this);
191     if (!allocation) {
192         RELEASE_ASSERT(failureMode != AllocationFailureMode::Assert);
193         m_preciseAllocations.append(oldAllocation);
194         return nullptr;
195     }
196     ASSERT(oldIndexInSpace == allocation->indexInSpace());
197
198     // If reallocation changes the address, we should update HashSet.
199     if (oldAllocation != allocation) {
200         if (auto* set = m_space.preciseAllocationSet()) {
201             set->remove(oldAllocation->cell());
202             set->add(allocation->cell());
203         }
204     }
205
206     m_space.m_preciseAllocations[oldIndexInSpace] = allocation;
207     vm.heap.didAllocate(difference);
208     m_space.m_capacity += difference;
209
210     m_preciseAllocations.append(allocation);
211
212     return allocation->cell();
213 }
214
215 } // namespace JSC
216