800527105519f2bd9671e61152223a1de280230f
[WebKit-https.git] / Source / JavaScriptCore / runtime / Structure.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "Structure.h"
28
29 #include "CodeBlock.h"
30 #include "DumpContext.h"
31 #include "JSCInlines.h"
32 #include "JSObject.h"
33 #include "JSPropertyNameEnumerator.h"
34 #include "Lookup.h"
35 #include "PropertyMapHashTable.h"
36 #include "PropertyNameArray.h"
37 #include "StructureChain.h"
38 #include "StructureRareDataInlines.h"
39 #include "WeakGCMapInlines.h"
40 #include <wtf/CommaPrinter.h>
41 #include <wtf/NeverDestroyed.h>
42 #include <wtf/ProcessID.h>
43 #include <wtf/RefPtr.h>
44 #include <wtf/Threading.h>
45
46 #define DUMP_STRUCTURE_ID_STATISTICS 0
47
48 #ifndef NDEBUG
49 #define DO_PROPERTYMAP_CONSTENCY_CHECK 0
50 #else
51 #define DO_PROPERTYMAP_CONSTENCY_CHECK 0
52 #endif
53
54 using namespace std;
55 using namespace WTF;
56
57 namespace JSC {
58
59 #if DUMP_STRUCTURE_ID_STATISTICS
60 static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>);
61 #endif
62
63 class SingleSlotTransitionWeakOwner final : public WeakHandleOwner {
64     void finalize(Handle<Unknown>, void* context) override
65     {
66         StructureTransitionTable* table = reinterpret_cast<StructureTransitionTable*>(context);
67         ASSERT(table->isUsingSingleSlot());
68         WeakSet::deallocate(table->weakImpl());
69         table->m_data = StructureTransitionTable::UsingSingleSlotFlag;
70     }
71 };
72
73 static SingleSlotTransitionWeakOwner& singleSlotTransitionWeakOwner()
74 {
75     static NeverDestroyed<SingleSlotTransitionWeakOwner> owner;
76     return owner;
77 }
78
79 inline Structure* StructureTransitionTable::singleTransition() const
80 {
81     ASSERT(isUsingSingleSlot());
82     if (WeakImpl* impl = this->weakImpl()) {
83         if (impl->state() == WeakImpl::Live)
84             return jsCast<Structure*>(impl->jsValue().asCell());
85     }
86     return nullptr;
87 }
88
89 inline void StructureTransitionTable::setSingleTransition(Structure* structure)
90 {
91     ASSERT(isUsingSingleSlot());
92     if (WeakImpl* impl = this->weakImpl())
93         WeakSet::deallocate(impl);
94     WeakImpl* impl = WeakSet::allocate(structure, &singleSlotTransitionWeakOwner(), this);
95     m_data = reinterpret_cast<intptr_t>(impl) | UsingSingleSlotFlag;
96 }
97
98 bool StructureTransitionTable::contains(UniquedStringImpl* rep, unsigned attributes) const
99 {
100     if (isUsingSingleSlot()) {
101         Structure* transition = singleTransition();
102         return transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes;
103     }
104     return map()->get(std::make_pair(rep, attributes));
105 }
106
107 Structure* StructureTransitionTable::get(UniquedStringImpl* rep, unsigned attributes) const
108 {
109     if (isUsingSingleSlot()) {
110         Structure* transition = singleTransition();
111         return (transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes) ? transition : 0;
112     }
113     return map()->get(std::make_pair(rep, attributes));
114 }
115
116 void StructureTransitionTable::add(VM& vm, Structure* structure)
117 {
118     if (isUsingSingleSlot()) {
119         Structure* existingTransition = singleTransition();
120
121         // This handles the first transition being added.
122         if (!existingTransition) {
123             setSingleTransition(structure);
124             return;
125         }
126
127         // This handles the second transition being added
128         // (or the first transition being despecified!)
129         setMap(new TransitionMap(vm));
130         add(vm, existingTransition);
131     }
132
133     // Add the structure to the map.
134
135     // Newer versions of the STL have an std::make_pair function that takes rvalue references.
136     // When either of the parameters are bitfields, the C++ compiler will try to bind them as lvalues, which is invalid. To work around this, use unary "+" to make the parameter an rvalue.
137     // See https://bugs.webkit.org/show_bug.cgi?id=59261 for more details
138     map()->set(std::make_pair(structure->m_nameInPrevious.get(), +structure->attributesInPrevious()), structure);
139 }
140
141 void Structure::dumpStatistics()
142 {
143 #if DUMP_STRUCTURE_ID_STATISTICS
144     unsigned numberLeaf = 0;
145     unsigned numberUsingSingleSlot = 0;
146     unsigned numberSingletons = 0;
147     unsigned numberWithPropertyMaps = 0;
148     unsigned totalPropertyMapsSize = 0;
149
150     HashSet<Structure*>::const_iterator end = liveStructureSet.end();
151     for (HashSet<Structure*>::const_iterator it = liveStructureSet.begin(); it != end; ++it) {
152         Structure* structure = *it;
153
154         switch (structure->m_transitionTable.size()) {
155             case 0:
156                 ++numberLeaf;
157                 if (!structure->previousID())
158                     ++numberSingletons;
159                 break;
160
161             case 1:
162                 ++numberUsingSingleSlot;
163                 break;
164         }
165
166         if (structure->propertyTable()) {
167             ++numberWithPropertyMaps;
168             totalPropertyMapsSize += structure->propertyTable()->sizeInMemory();
169         }
170     }
171
172     dataLogF("Number of live Structures: %d\n", liveStructureSet.size());
173     dataLogF("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
174     dataLogF("Number of Structures that are leaf nodes: %d\n", numberLeaf);
175     dataLogF("Number of Structures that singletons: %d\n", numberSingletons);
176     dataLogF("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
177
178     dataLogF("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
179     dataLogF("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
180     dataLogF("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
181 #else
182     dataLogF("Dumping Structure statistics is not enabled.\n");
183 #endif
184 }
185
186 Structure::Structure(VM& vm, JSGlobalObject* globalObject, JSValue prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
187     : JSCell(vm, vm.structureStructure.get())
188     , m_blob(vm.heap.structureIDTable().allocateID(this), indexingType, typeInfo)
189     , m_outOfLineTypeFlags(typeInfo.outOfLineTypeFlags())
190     , m_globalObject(vm, this, globalObject, WriteBarrier<JSGlobalObject>::MayBeNull)
191     , m_prototype(vm, this, prototype)
192     , m_classInfo(classInfo)
193     , m_transitionWatchpointSet(IsWatched)
194     , m_offset(invalidOffset)
195     , m_inlineCapacity(inlineCapacity)
196     , m_bitField(0)
197 {
198     setDictionaryKind(NoneDictionaryKind);
199     setIsPinnedPropertyTable(false);
200     setHasGetterSetterProperties(classInfo->hasStaticSetterOrReadonlyProperties());
201     setHasCustomGetterSetterProperties(false);
202     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(classInfo->hasStaticSetterOrReadonlyProperties());
203     setIsQuickPropertyAccessAllowedForEnumeration(true);
204     setAttributesInPrevious(0);
205     setDidPreventExtensions(false);
206     setDidTransition(false);
207     setStaticPropertiesReified(false);
208     setTransitionWatchpointIsLikelyToBeFired(false);
209     setHasBeenDictionary(false);
210  
211     ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
212     ASSERT(static_cast<PropertyOffset>(inlineCapacity) < firstOutOfLineOffset);
213     ASSERT(!hasRareData());
214     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
215     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
216 }
217
218 const ClassInfo Structure::s_info = { "Structure", 0, 0, CREATE_METHOD_TABLE(Structure) };
219
220 Structure::Structure(VM& vm)
221     : JSCell(CreatingEarlyCell)
222     , m_prototype(vm, this, jsNull())
223     , m_classInfo(info())
224     , m_transitionWatchpointSet(IsWatched)
225     , m_offset(invalidOffset)
226     , m_inlineCapacity(0)
227     , m_bitField(0)
228 {
229     setDictionaryKind(NoneDictionaryKind);
230     setIsPinnedPropertyTable(false);
231     setHasGetterSetterProperties(m_classInfo->hasStaticSetterOrReadonlyProperties());
232     setHasCustomGetterSetterProperties(false);
233     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(m_classInfo->hasStaticSetterOrReadonlyProperties());
234     setIsQuickPropertyAccessAllowedForEnumeration(true);
235     setAttributesInPrevious(0);
236     setDidPreventExtensions(false);
237     setDidTransition(false);
238     setStaticPropertiesReified(false);
239     setTransitionWatchpointIsLikelyToBeFired(false);
240     setHasBeenDictionary(false);
241  
242     TypeInfo typeInfo = TypeInfo(CellType, StructureFlags);
243     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), 0, typeInfo);
244     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
245
246     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
247     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
248 }
249
250 Structure::Structure(VM& vm, Structure* previous, DeferredStructureTransitionWatchpointFire* deferred)
251     : JSCell(vm, vm.structureStructure.get())
252     , m_prototype(vm, this, previous->storedPrototype())
253     , m_classInfo(previous->m_classInfo)
254     , m_transitionWatchpointSet(IsWatched)
255     , m_offset(invalidOffset)
256     , m_inlineCapacity(previous->m_inlineCapacity)
257     , m_bitField(0)
258 {
259     setDictionaryKind(previous->dictionaryKind());
260     setIsPinnedPropertyTable(previous->hasBeenFlattenedBefore());
261     setHasGetterSetterProperties(previous->hasGetterSetterProperties());
262     setHasCustomGetterSetterProperties(previous->hasCustomGetterSetterProperties());
263     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->hasReadOnlyOrGetterSetterPropertiesExcludingProto());
264     setIsQuickPropertyAccessAllowedForEnumeration(previous->isQuickPropertyAccessAllowedForEnumeration());
265     setAttributesInPrevious(0);
266     setDidPreventExtensions(previous->didPreventExtensions());
267     setDidTransition(true);
268     setStaticPropertiesReified(previous->staticPropertiesReified());
269     setHasBeenDictionary(previous->hasBeenDictionary());
270  
271     TypeInfo typeInfo = previous->typeInfo();
272     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), previous->indexingTypeIncludingHistory(), typeInfo);
273     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
274
275     ASSERT(!previous->typeInfo().structureIsImmortal());
276     setPreviousID(vm, previous);
277
278     previous->didTransitionFromThisStructure(deferred);
279     
280     // Copy this bit now, in case previous was being watched.
281     setTransitionWatchpointIsLikelyToBeFired(previous->transitionWatchpointIsLikelyToBeFired());
282
283     if (previous->m_globalObject)
284         m_globalObject.set(vm, this, previous->m_globalObject.get());
285     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
286     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
287 }
288
289 Structure::~Structure()
290 {
291     if (typeInfo().structureIsImmortal())
292         return;
293     Heap::heap(this)->structureIDTable().deallocateID(this, m_blob.structureID());
294 }
295
296 void Structure::destroy(JSCell* cell)
297 {
298     static_cast<Structure*>(cell)->Structure::~Structure();
299 }
300
301 void Structure::findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*& structure, PropertyTable*& table)
302 {
303     ASSERT(structures.isEmpty());
304     table = 0;
305
306     for (structure = this; structure; structure = structure->previousID()) {
307         structure->m_lock.lock();
308         
309         table = structure->propertyTable().get();
310         if (table) {
311             // Leave the structure locked, so that the caller can do things to it atomically
312             // before it loses its property table.
313             return;
314         }
315         
316         structures.append(structure);
317         structure->m_lock.unlock();
318     }
319     
320     ASSERT(!structure);
321     ASSERT(!table);
322 }
323
324 void Structure::materializePropertyMap(VM& vm)
325 {
326     ASSERT(structure()->classInfo() == info());
327     ASSERT(!propertyTable());
328
329     Vector<Structure*, 8> structures;
330     Structure* structure;
331     PropertyTable* table;
332     
333     findStructuresAndMapForMaterialization(structures, structure, table);
334     
335     if (table) {
336         table = table->copy(vm, numberOfSlotsForLastOffset(m_offset, m_inlineCapacity));
337         structure->m_lock.unlock();
338     }
339     
340     // Must hold the lock on this structure, since we will be modifying this structure's
341     // property map. We don't want getConcurrently() to see the property map in a half-baked
342     // state.
343     GCSafeConcurrentJITLocker locker(m_lock, vm.heap);
344     if (!table)
345         createPropertyMap(locker, vm, numberOfSlotsForLastOffset(m_offset, m_inlineCapacity));
346     else
347         propertyTable().set(vm, this, table);
348
349     InferredTypeTable* typeTable = m_inferredTypeTable.get();
350
351     for (size_t i = structures.size(); i--;) {
352         structure = structures[i];
353         if (!structure->m_nameInPrevious)
354             continue;
355         PropertyMapEntry entry(structure->m_nameInPrevious.get(), structure->m_offset, structure->attributesInPrevious());
356         if (typeTable && typeTable->get(structure->m_nameInPrevious.get()))
357             entry.hasInferredType = true;
358         propertyTable()->add(entry, m_offset, PropertyTable::PropertyOffsetMustNotChange);
359     }
360     
361     checkOffsetConsistency();
362 }
363
364 Structure* Structure::addPropertyTransitionToExistingStructureImpl(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
365 {
366     ASSERT(!structure->isDictionary());
367     ASSERT(structure->isObject());
368
369     if (Structure* existingTransition = structure->m_transitionTable.get(uid, attributes)) {
370         validateOffset(existingTransition->m_offset, existingTransition->inlineCapacity());
371         offset = existingTransition->m_offset;
372         return existingTransition;
373     }
374
375     return 0;
376 }
377
378 Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
379 {
380     ASSERT(!isCompilationThread());
381     return addPropertyTransitionToExistingStructureImpl(structure, propertyName.uid(), attributes, offset);
382 }
383
384 Structure* Structure::addPropertyTransitionToExistingStructureConcurrently(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
385 {
386     ConcurrentJITLocker locker(structure->m_lock);
387     return addPropertyTransitionToExistingStructureImpl(structure, uid, attributes, offset);
388 }
389
390 bool Structure::anyObjectInChainMayInterceptIndexedAccesses() const
391 {
392     for (const Structure* current = this; ;) {
393         if (current->mayInterceptIndexedAccesses())
394             return true;
395         
396         JSValue prototype = current->storedPrototype();
397         if (prototype.isNull())
398             return false;
399         
400         current = asObject(prototype)->structure();
401     }
402 }
403
404 bool Structure::holesMustForwardToPrototype(VM& vm) const
405 {
406     if (this->mayInterceptIndexedAccesses())
407         return true;
408
409     JSValue prototype = this->storedPrototype();
410     if (!prototype.isObject())
411         return false;
412     JSObject* object = asObject(prototype);
413
414     while (true) {
415         Structure& structure = *object->structure(vm);
416         if (hasIndexedProperties(object->indexingType()) || structure.mayInterceptIndexedAccesses())
417             return true;
418         prototype = structure.storedPrototype();
419         if (!prototype.isObject())
420             return false;
421         object = asObject(prototype);
422     }
423
424     RELEASE_ASSERT_NOT_REACHED();
425     return false;
426 }
427
428 bool Structure::needsSlowPutIndexing() const
429 {
430     return anyObjectInChainMayInterceptIndexedAccesses()
431         || globalObject()->isHavingABadTime();
432 }
433
434 NonPropertyTransition Structure::suggestedArrayStorageTransition() const
435 {
436     if (needsSlowPutIndexing())
437         return NonPropertyTransition::AllocateSlowPutArrayStorage;
438     
439     return NonPropertyTransition::AllocateArrayStorage;
440 }
441
442 Structure* Structure::addPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
443 {
444     Structure* newStructure = addPropertyTransitionToExistingStructure(
445         structure, propertyName, attributes, offset);
446     if (newStructure)
447         return newStructure;
448
449     return addNewPropertyTransition(
450         vm, structure, propertyName, attributes, offset, PutPropertySlot::UnknownContext);
451 }
452
453 Structure* Structure::addNewPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset, PutPropertySlot::Context context, DeferredStructureTransitionWatchpointFire* deferred)
454 {
455     ASSERT(!structure->isDictionary());
456     ASSERT(structure->isObject());
457     ASSERT(!Structure::addPropertyTransitionToExistingStructure(structure, propertyName, attributes, offset));
458     
459     int maxTransitionLength;
460     if (context == PutPropertySlot::PutById)
461         maxTransitionLength = s_maxTransitionLengthForNonEvalPutById;
462     else
463         maxTransitionLength = s_maxTransitionLength;
464     if (structure->transitionCount() > maxTransitionLength) {
465         Structure* transition = toCacheableDictionaryTransition(vm, structure, deferred);
466         ASSERT(structure != transition);
467         offset = transition->add(vm, propertyName, attributes);
468         return transition;
469     }
470     
471     Structure* transition = create(vm, structure, deferred);
472
473     transition->m_cachedPrototypeChain.setMayBeNull(vm, transition, structure->m_cachedPrototypeChain.get());
474     transition->m_nameInPrevious = propertyName.uid();
475     transition->setAttributesInPrevious(attributes);
476     transition->propertyTable().set(vm, transition, structure->takePropertyTableOrCloneIfPinned(vm));
477     transition->m_offset = structure->m_offset;
478     transition->m_inferredTypeTable.setMayBeNull(vm, transition, structure->m_inferredTypeTable.get());
479
480     offset = transition->add(vm, propertyName, attributes);
481
482     checkOffset(transition->m_offset, transition->inlineCapacity());
483     {
484         ConcurrentJITLocker locker(structure->m_lock);
485         structure->m_transitionTable.add(vm, transition);
486     }
487     transition->checkOffsetConsistency();
488     structure->checkOffsetConsistency();
489     return transition;
490 }
491
492 Structure* Structure::removePropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, PropertyOffset& offset)
493 {
494     // NOTE: There are some good reasons why this goes directly to uncacheable dictionary rather than
495     // caching the removal. We can fix all of these things, but we must remember to do so, if we ever try
496     // to optimize this case.
497     //
498     // - Cached transitions usually steal the property table, and assume that this is possible because they
499     //   can just rebuild the table by looking at past transitions. That code assumes that the table only
500     //   grew and never shrank. To support removals, we'd have to change the property table materialization
501     //   code to handle deletions. Also, we have logic to get the list of properties on a structure that
502     //   lacks a property table by just looking back through the set of transitions since the last
503     //   structure that had a pinned table. That logic would also have to be changed to handle cached
504     //   removals.
505     //
506     // - InferredTypeTable assumes that removal has never happened. This is important since if we could
507     //   remove a property and then re-add it later, then the "absence means top" optimization wouldn't
508     //   work anymore, unless removal also either poisoned type inference (by doing something equivalent to
509     //   hasBeenDictionary) or by strongly marking the entry as Top by ensuring that it is not absent, but
510     //   instead, has a null entry.
511     
512     ASSERT(!structure->isUncacheableDictionary());
513
514     Structure* transition = toUncacheableDictionaryTransition(vm, structure);
515
516     offset = transition->remove(propertyName);
517
518     transition->checkOffsetConsistency();
519     return transition;
520 }
521
522 Structure* Structure::changePrototypeTransition(VM& vm, Structure* structure, JSValue prototype)
523 {
524     Structure* transition = create(vm, structure);
525
526     transition->m_prototype.set(vm, transition, prototype);
527
528     DeferGC deferGC(vm.heap);
529     structure->materializePropertyMapIfNecessary(vm, deferGC);
530     transition->propertyTable().set(vm, transition, structure->copyPropertyTableForPinning(vm));
531     transition->m_offset = structure->m_offset;
532     transition->pin();
533
534     transition->checkOffsetConsistency();
535     return transition;
536 }
537
538 Structure* Structure::attributeChangeTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes)
539 {
540     DeferGC deferGC(vm.heap);
541     if (!structure->isUncacheableDictionary()) {
542         Structure* transition = create(vm, structure);
543
544         structure->materializePropertyMapIfNecessary(vm, deferGC);
545         transition->propertyTable().set(vm, transition, structure->copyPropertyTableForPinning(vm));
546         transition->m_offset = structure->m_offset;
547         transition->pin();
548         
549         structure = transition;
550     }
551
552     ASSERT(structure->propertyTable());
553     PropertyMapEntry* entry = structure->propertyTable()->get(propertyName.uid());
554     ASSERT(entry);
555     entry->attributes = attributes;
556
557     structure->checkOffsetConsistency();
558     return structure;
559 }
560
561 Structure* Structure::toDictionaryTransition(VM& vm, Structure* structure, DictionaryKind kind, DeferredStructureTransitionWatchpointFire* deferred)
562 {
563     ASSERT(!structure->isUncacheableDictionary());
564     
565     Structure* transition = create(vm, structure, deferred);
566
567     DeferGC deferGC(vm.heap);
568     structure->materializePropertyMapIfNecessary(vm, deferGC);
569     transition->propertyTable().set(vm, transition, structure->copyPropertyTableForPinning(vm));
570     transition->m_offset = structure->m_offset;
571     transition->setDictionaryKind(kind);
572     transition->pin();
573     transition->setHasBeenDictionary(true);
574
575     transition->checkOffsetConsistency();
576     return transition;
577 }
578
579 Structure* Structure::toCacheableDictionaryTransition(VM& vm, Structure* structure, DeferredStructureTransitionWatchpointFire* deferred)
580 {
581     return toDictionaryTransition(vm, structure, CachedDictionaryKind, deferred);
582 }
583
584 Structure* Structure::toUncacheableDictionaryTransition(VM& vm, Structure* structure)
585 {
586     return toDictionaryTransition(vm, structure, UncachedDictionaryKind);
587 }
588
589 Structure* Structure::sealTransition(VM& vm, Structure* structure)
590 {
591     return nonPropertyTransition(vm, structure, NonPropertyTransition::Seal);
592 }
593
594 Structure* Structure::freezeTransition(VM& vm, Structure* structure)
595 {
596     return nonPropertyTransition(vm, structure, NonPropertyTransition::Freeze);
597 }
598
599 Structure* Structure::preventExtensionsTransition(VM& vm, Structure* structure)
600 {
601     return nonPropertyTransition(vm, structure, NonPropertyTransition::PreventExtensions);
602 }
603
604 PropertyTable* Structure::takePropertyTableOrCloneIfPinned(VM& vm)
605 {
606     DeferGC deferGC(vm.heap);
607     materializePropertyMapIfNecessaryForPinning(vm, deferGC);
608     
609     if (isPinnedPropertyTable())
610         return propertyTable()->copy(vm, propertyTable()->size() + 1);
611     
612     // Hold the lock while stealing the table - so that getConcurrently() on another thread
613     // will either have to bypass this structure, or will get to use the property table
614     // before it is stolen.
615     ConcurrentJITLocker locker(m_lock);
616     PropertyTable* takenPropertyTable = propertyTable().get();
617     propertyTable().clear();
618     return takenPropertyTable;
619 }
620
621 Structure* Structure::nonPropertyTransition(VM& vm, Structure* structure, NonPropertyTransition transitionKind)
622 {
623     unsigned attributes = toAttributes(transitionKind);
624     IndexingType indexingType = newIndexingType(structure->indexingTypeIncludingHistory(), transitionKind);
625     
626     if (changesIndexingType(transitionKind)) {
627         if (JSGlobalObject* globalObject = structure->m_globalObject.get()) {
628             if (globalObject->isOriginalArrayStructure(structure)) {
629                 Structure* result = globalObject->originalArrayStructureForIndexingType(indexingType);
630                 if (result->indexingTypeIncludingHistory() == indexingType) {
631                     structure->didTransitionFromThisStructure();
632                     return result;
633                 }
634             }
635         }
636     }
637     
638     Structure* existingTransition;
639     if (!structure->isDictionary() && (existingTransition = structure->m_transitionTable.get(0, attributes))) {
640         ASSERT(existingTransition->attributesInPrevious() == attributes);
641         ASSERT(existingTransition->indexingTypeIncludingHistory() == indexingType);
642         return existingTransition;
643     }
644     
645     DeferGC deferGC(vm.heap);
646     
647     Structure* transition = create(vm, structure);
648     transition->setAttributesInPrevious(attributes);
649     transition->m_blob.setIndexingType(indexingType);
650     
651     if (preventsExtensions(transitionKind))
652         transition->setDidPreventExtensions(true);
653     
654     if (setsDontDeleteOnAllProperties(transitionKind)
655         || setsReadOnlyOnNonAccessorProperties(transitionKind)) {
656         // We pin the property table on transitions that do wholesale editing of the property
657         // table, since our logic for walking the property transition chain to rematerialize the
658         // table doesn't know how to take into account such wholesale edits.
659         
660         structure->materializePropertyMapIfNecessary(vm, deferGC);
661         transition->propertyTable().set(vm, transition, structure->copyPropertyTableForPinning(vm));
662         transition->m_offset = structure->m_offset;
663         transition->pinForCaching();
664         
665         if (transition->propertyTable()) {
666             for (auto& entry : *transition->propertyTable().get()) {
667                 if (setsDontDeleteOnAllProperties(transitionKind))
668                     entry.attributes |= DontDelete;
669                 if (setsReadOnlyOnNonAccessorProperties(transitionKind) && !(entry.attributes & Accessor))
670                     entry.attributes |= ReadOnly;
671             }
672         }
673     } else {
674         transition->propertyTable().set(vm, transition, structure->takePropertyTableOrCloneIfPinned(vm));
675         transition->m_offset = structure->m_offset;
676         checkOffset(transition->m_offset, transition->inlineCapacity());
677     }
678     
679     if (setsReadOnlyOnNonAccessorProperties(transitionKind)
680         && transition->propertyTable()
681         && !transition->propertyTable()->isEmpty())
682         transition->setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true);
683     
684     if (structure->isDictionary())
685         transition->pin();
686     else {
687         ConcurrentJITLocker locker(structure->m_lock);
688         structure->m_transitionTable.add(vm, transition);
689     }
690
691     transition->checkOffsetConsistency();
692     return transition;
693 }
694
695 // In future we may want to cache this property.
696 bool Structure::isSealed(VM& vm)
697 {
698     if (isStructureExtensible())
699         return false;
700
701     DeferGC deferGC(vm.heap);
702     materializePropertyMapIfNecessary(vm, deferGC);
703     if (!propertyTable())
704         return true;
705
706     PropertyTable::iterator end = propertyTable()->end();
707     for (PropertyTable::iterator iter = propertyTable()->begin(); iter != end; ++iter) {
708         if ((iter->attributes & DontDelete) != DontDelete)
709             return false;
710     }
711     return true;
712 }
713
714 // In future we may want to cache this property.
715 bool Structure::isFrozen(VM& vm)
716 {
717     if (isStructureExtensible())
718         return false;
719
720     DeferGC deferGC(vm.heap);
721     materializePropertyMapIfNecessary(vm, deferGC);
722     if (!propertyTable())
723         return true;
724
725     PropertyTable::iterator end = propertyTable()->end();
726     for (PropertyTable::iterator iter = propertyTable()->begin(); iter != end; ++iter) {
727         if (!(iter->attributes & DontDelete))
728             return false;
729         if (!(iter->attributes & (ReadOnly | Accessor)))
730             return false;
731     }
732     return true;
733 }
734
735 Structure* Structure::flattenDictionaryStructure(VM& vm, JSObject* object)
736 {
737     checkOffsetConsistency();
738     ASSERT(isDictionary());
739
740     size_t beforeOutOfLineCapacity = this->outOfLineCapacity();
741     if (isUncacheableDictionary()) {
742         ASSERT(propertyTable());
743
744         size_t propertyCount = propertyTable()->size();
745
746         // Holds our values compacted by insertion order.
747         Vector<JSValue> values(propertyCount);
748
749         // Copies out our values from their hashed locations, compacting property table offsets as we go.
750         unsigned i = 0;
751         PropertyTable::iterator end = propertyTable()->end();
752         m_offset = invalidOffset;
753         for (PropertyTable::iterator iter = propertyTable()->begin(); iter != end; ++iter, ++i) {
754             values[i] = object->getDirect(iter->offset);
755             m_offset = iter->offset = offsetForPropertyNumber(i, m_inlineCapacity);
756         }
757         
758         // Copies in our values to their compacted locations.
759         for (unsigned i = 0; i < propertyCount; i++)
760             object->putDirect(vm, offsetForPropertyNumber(i, m_inlineCapacity), values[i]);
761
762         propertyTable()->clearDeletedOffsets();
763         checkOffsetConsistency();
764     }
765
766     setDictionaryKind(NoneDictionaryKind);
767     setHasBeenFlattenedBefore(true);
768
769     size_t afterOutOfLineCapacity = this->outOfLineCapacity();
770
771     if (beforeOutOfLineCapacity != afterOutOfLineCapacity) {
772         ASSERT(beforeOutOfLineCapacity > afterOutOfLineCapacity);
773         // If the object had a Butterfly but after flattening/compacting we no longer have need of it,
774         // we need to zero it out because the collector depends on the Structure to know the size for copying.
775         if (object->butterfly() && !afterOutOfLineCapacity && !this->hasIndexingHeader(object))
776             object->setStructureAndButterfly(vm, this, 0);
777         // If the object was down-sized to the point where the base of the Butterfly is no longer within the 
778         // first CopiedBlock::blockSize bytes, we'll get the wrong answer if we try to mask the base back to 
779         // the CopiedBlock header. To prevent this case we need to memmove the Butterfly down.
780         else if (object->butterfly())
781             object->shiftButterflyAfterFlattening(vm, beforeOutOfLineCapacity, afterOutOfLineCapacity);
782     }
783
784     return this;
785 }
786
787 PropertyOffset Structure::addPropertyWithoutTransition(VM& vm, PropertyName propertyName, unsigned attributes)
788 {
789     DeferGC deferGC(vm.heap);
790     materializePropertyMapIfNecessaryForPinning(vm, deferGC);
791     
792     pin();
793
794     return add(vm, propertyName, attributes);
795 }
796
797 PropertyOffset Structure::removePropertyWithoutTransition(VM& vm, PropertyName propertyName)
798 {
799     ASSERT(isUncacheableDictionary());
800
801     DeferGC deferGC(vm.heap);
802     materializePropertyMapIfNecessaryForPinning(vm, deferGC);
803
804     pin();
805     return remove(propertyName);
806 }
807
808 void Structure::pin()
809 {
810     ASSERT(propertyTable());
811     setIsPinnedPropertyTable(true);
812     clearPreviousID();
813     m_nameInPrevious = nullptr;
814 }
815
816 void Structure::pinForCaching()
817 {
818     ASSERT(propertyTable());
819     setIsPinnedPropertyTable(true);
820     m_nameInPrevious = nullptr;
821 }
822
823 void Structure::allocateRareData(VM& vm)
824 {
825     ASSERT(!hasRareData());
826     StructureRareData* rareData = StructureRareData::create(vm, previousID());
827     WTF::storeStoreFence();
828     m_previousOrRareData.set(vm, this, rareData);
829     ASSERT(hasRareData());
830 }
831
832 WatchpointSet* Structure::ensurePropertyReplacementWatchpointSet(VM& vm, PropertyOffset offset)
833 {
834     ASSERT(!isUncacheableDictionary());
835
836     // In some places it's convenient to call this with an invalid offset. So, we do the check here.
837     if (!isValidOffset(offset))
838         return nullptr;
839     
840     if (!hasRareData())
841         allocateRareData(vm);
842     ConcurrentJITLocker locker(m_lock);
843     StructureRareData* rareData = this->rareData();
844     if (!rareData->m_replacementWatchpointSets) {
845         rareData->m_replacementWatchpointSets =
846             std::make_unique<StructureRareData::PropertyWatchpointMap>();
847         WTF::storeStoreFence();
848     }
849     auto result = rareData->m_replacementWatchpointSets->add(offset, nullptr);
850     if (result.isNewEntry)
851         result.iterator->value = adoptRef(new WatchpointSet(IsWatched));
852     return result.iterator->value.get();
853 }
854
855 void Structure::startWatchingPropertyForReplacements(VM& vm, PropertyName propertyName)
856 {
857     ASSERT(!isUncacheableDictionary());
858     
859     startWatchingPropertyForReplacements(vm, get(vm, propertyName));
860 }
861
862 void Structure::didCachePropertyReplacement(VM& vm, PropertyOffset offset)
863 {
864     ensurePropertyReplacementWatchpointSet(vm, offset)->fireAll(vm, "Did cache property replacement");
865 }
866
867 void Structure::startWatchingInternalProperties(VM& vm)
868 {
869     if (!isUncacheableDictionary()) {
870         startWatchingPropertyForReplacements(vm, vm.propertyNames->toString);
871         startWatchingPropertyForReplacements(vm, vm.propertyNames->valueOf);
872     }
873     setDidWatchInternalProperties(true);
874 }
875
876 void Structure::willStoreValueSlow(
877     VM& vm, PropertyName propertyName, JSValue value, bool shouldOptimize,
878     InferredTypeTable::StoredPropertyAge age)
879 {
880     ASSERT(!isCompilationThread());
881     ASSERT(structure()->classInfo() == info());
882     ASSERT(!hasBeenDictionary());
883
884     // Create the inferred type table before doing anything else, so that we don't GC after we have already
885     // grabbed a pointer into the property map.
886     InferredTypeTable* table = m_inferredTypeTable.get();
887     if (!table) {
888         table = InferredTypeTable::create(vm);
889         WTF::storeStoreFence();
890         m_inferredTypeTable.set(vm, this, table);
891     }
892
893     // This only works if we've got a property table.
894     PropertyTable* propertyTable;
895     materializePropertyMapIfNecessary(vm, propertyTable);
896
897     // We must be calling this after having created the given property or confirmed that it was present
898     // already, so we must have a property table now.
899     ASSERT(propertyTable);
900
901     // ... and the property must be present.
902     PropertyMapEntry* entry = propertyTable->get(propertyName.uid());
903     ASSERT(entry);
904
905     if (shouldOptimize)
906         entry->hasInferredType = table->willStoreValue(vm, propertyName, value, age);
907     else {
908         table->makeTop(vm, propertyName, age);
909         entry->hasInferredType = false;
910     }
911 }
912
913 #if DUMP_PROPERTYMAP_STATS
914
915 PropertyMapHashTableStats* propertyMapHashTableStats = 0;
916
917 struct PropertyMapStatisticsExitLogger {
918     PropertyMapStatisticsExitLogger();
919     ~PropertyMapStatisticsExitLogger();
920 };
921
922 DEFINE_GLOBAL_FOR_LOGGING(PropertyMapStatisticsExitLogger, logger, );
923
924 PropertyMapStatisticsExitLogger::PropertyMapStatisticsExitLogger()
925 {
926     propertyMapHashTableStats = adoptPtr(new PropertyMapHashTableStats()).leakPtr();
927 }
928
929 PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
930 {
931     unsigned finds = propertyMapHashTableStats->numFinds;
932     unsigned collisions = propertyMapHashTableStats->numCollisions;
933     dataLogF("\nJSC::PropertyMap statistics for process %d\n\n", getCurrentProcessID());
934     dataLogF("%d finds\n", finds);
935     dataLogF("%d collisions (%.1f%%)\n", collisions, 100.0 * collisions / finds);
936     dataLogF("%d lookups\n", propertyMapHashTableStats->numLookups.load());
937     dataLogF("%d lookup probings\n", propertyMapHashTableStats->numLookupProbing.load());
938     dataLogF("%d adds\n", propertyMapHashTableStats->numAdds.load());
939     dataLogF("%d removes\n", propertyMapHashTableStats->numRemoves.load());
940     dataLogF("%d rehashes\n", propertyMapHashTableStats->numRehashes.load());
941     dataLogF("%d reinserts\n", propertyMapHashTableStats->numReinserts.load());
942 }
943
944 #endif
945
946 PropertyTable* Structure::copyPropertyTable(VM& vm)
947 {
948     if (!propertyTable())
949         return 0;
950     return PropertyTable::clone(vm, *propertyTable().get());
951 }
952
953 PropertyTable* Structure::copyPropertyTableForPinning(VM& vm)
954 {
955     if (propertyTable())
956         return PropertyTable::clone(vm, *propertyTable().get());
957     return PropertyTable::create(vm, numberOfSlotsForLastOffset(m_offset, m_inlineCapacity));
958 }
959
960 PropertyOffset Structure::getConcurrently(UniquedStringImpl* uid, unsigned& attributes)
961 {
962     PropertyOffset result = invalidOffset;
963     
964     forEachPropertyConcurrently(
965         [&] (const PropertyMapEntry& candidate) -> bool {
966             if (candidate.key != uid)
967                 return true;
968             
969             result = candidate.offset;
970             attributes = candidate.attributes;
971             return false;
972         });
973     
974     return result;
975 }
976
977 Vector<PropertyMapEntry> Structure::getPropertiesConcurrently()
978 {
979     Vector<PropertyMapEntry> result;
980
981     forEachPropertyConcurrently(
982         [&] (const PropertyMapEntry& entry) -> bool {
983             result.append(entry);
984             return true;
985         });
986     
987     return result;
988 }
989
990 PropertyOffset Structure::add(VM& vm, PropertyName propertyName, unsigned attributes)
991 {
992     GCSafeConcurrentJITLocker locker(m_lock, vm.heap);
993     
994     ASSERT(!JSC::isValidOffset(get(vm, propertyName)));
995
996     checkConsistency();
997     if (attributes & DontEnum || propertyName.isSymbol())
998         setIsQuickPropertyAccessAllowedForEnumeration(false);
999
1000     auto rep = propertyName.uid();
1001
1002     if (!propertyTable())
1003         createPropertyMap(locker, vm);
1004
1005     PropertyOffset newOffset = propertyTable()->nextOffset(m_inlineCapacity);
1006
1007     propertyTable()->add(PropertyMapEntry(rep, newOffset, attributes), m_offset, PropertyTable::PropertyOffsetMayChange);
1008     
1009     checkConsistency();
1010     return newOffset;
1011 }
1012
1013 PropertyOffset Structure::remove(PropertyName propertyName)
1014 {
1015     ConcurrentJITLocker locker(m_lock);
1016     
1017     checkConsistency();
1018
1019     auto rep = propertyName.uid();
1020
1021     if (!propertyTable())
1022         return invalidOffset;
1023
1024     PropertyTable::find_iterator position = propertyTable()->find(rep);
1025     if (!position.first)
1026         return invalidOffset;
1027
1028     PropertyOffset offset = position.first->offset;
1029
1030     propertyTable()->remove(position);
1031     propertyTable()->addDeletedOffset(offset);
1032
1033     checkConsistency();
1034     return offset;
1035 }
1036
1037 void Structure::createPropertyMap(const GCSafeConcurrentJITLocker&, VM& vm, unsigned capacity)
1038 {
1039     ASSERT(!propertyTable());
1040
1041     checkConsistency();
1042     propertyTable().set(vm, this, PropertyTable::create(vm, capacity));
1043 }
1044
1045 void Structure::getPropertyNamesFromStructure(VM& vm, PropertyNameArray& propertyNames, EnumerationMode mode)
1046 {
1047     DeferGC deferGC(vm.heap);
1048     materializePropertyMapIfNecessary(vm, deferGC);
1049     if (!propertyTable())
1050         return;
1051
1052     bool knownUnique = propertyNames.canAddKnownUniqueForStructure();
1053
1054     PropertyTable::iterator end = propertyTable()->end();
1055     for (PropertyTable::iterator iter = propertyTable()->begin(); iter != end; ++iter) {
1056         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !(iter->attributes & DontEnum));
1057         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !iter->key->isSymbol());
1058         if (!(iter->attributes & DontEnum) || mode.includeDontEnumProperties()) {
1059             if (iter->key->isSymbol() && !propertyNames.includeSymbolProperties())
1060                 continue;
1061             if (knownUnique)
1062                 propertyNames.addUnchecked(iter->key);
1063             else
1064                 propertyNames.add(iter->key);
1065         }
1066     }
1067 }
1068
1069 void StructureFireDetail::dump(PrintStream& out) const
1070 {
1071     out.print("Structure transition from ", *m_structure);
1072 }
1073
1074 DeferredStructureTransitionWatchpointFire::DeferredStructureTransitionWatchpointFire()
1075     : m_structure(nullptr)
1076 {
1077 }
1078
1079 DeferredStructureTransitionWatchpointFire::~DeferredStructureTransitionWatchpointFire()
1080 {
1081     if (m_structure)
1082         m_structure->transitionWatchpointSet().fireAll(*m_structure->vm(), StructureFireDetail(m_structure));
1083 }
1084
1085 void DeferredStructureTransitionWatchpointFire::add(const Structure* structure)
1086 {
1087     RELEASE_ASSERT(!m_structure);
1088     RELEASE_ASSERT(structure);
1089     m_structure = structure;
1090 }
1091
1092 void Structure::didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* deferred) const
1093 {
1094     // If the structure is being watched, and this is the kind of structure that the DFG would
1095     // like to watch, then make sure to note for all future versions of this structure that it's
1096     // unwise to watch it.
1097     if (m_transitionWatchpointSet.isBeingWatched())
1098         const_cast<Structure*>(this)->setTransitionWatchpointIsLikelyToBeFired(true);
1099     
1100     if (deferred)
1101         deferred->add(this);
1102     else
1103         m_transitionWatchpointSet.fireAll(*vm(), StructureFireDetail(this));
1104 }
1105
1106 JSValue Structure::prototypeForLookup(CodeBlock* codeBlock) const
1107 {
1108     return prototypeForLookup(codeBlock->globalObject());
1109 }
1110
1111 void Structure::visitChildren(JSCell* cell, SlotVisitor& visitor)
1112 {
1113     Structure* thisObject = jsCast<Structure*>(cell);
1114     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1115
1116     JSCell::visitChildren(thisObject, visitor);
1117     visitor.append(&thisObject->m_globalObject);
1118     if (!thisObject->isObject())
1119         thisObject->m_cachedPrototypeChain.clear();
1120     else {
1121         visitor.append(&thisObject->m_prototype);
1122         visitor.append(&thisObject->m_cachedPrototypeChain);
1123     }
1124     visitor.append(&thisObject->m_previousOrRareData);
1125
1126     if (thisObject->isPinnedPropertyTable()) {
1127         ASSERT(thisObject->m_propertyTableUnsafe);
1128         visitor.append(&thisObject->m_propertyTableUnsafe);
1129     } else if (visitor.isBuildingHeapSnapshot())
1130         visitor.append(&thisObject->m_propertyTableUnsafe);
1131     else if (thisObject->m_propertyTableUnsafe)
1132         thisObject->m_propertyTableUnsafe.clear();
1133
1134     visitor.append(&thisObject->m_inferredTypeTable);
1135 }
1136
1137 bool Structure::isCheapDuringGC()
1138 {
1139     // FIXME: We could make this even safer by returning false if this structure's property table
1140     // has any large property names.
1141     // https://bugs.webkit.org/show_bug.cgi?id=157334
1142     
1143     return (!m_globalObject || Heap::isMarked(m_globalObject.get()))
1144         && (!storedPrototypeObject() || Heap::isMarked(storedPrototypeObject()));
1145 }
1146
1147 bool Structure::markIfCheap(SlotVisitor& visitor)
1148 {
1149     if (!isCheapDuringGC())
1150         return Heap::isMarked(this);
1151     
1152     visitor.appendUnbarrieredReadOnlyPointer(this);
1153     return true;
1154 }
1155
1156 bool Structure::prototypeChainMayInterceptStoreTo(VM& vm, PropertyName propertyName)
1157 {
1158     if (parseIndex(propertyName))
1159         return anyObjectInChainMayInterceptIndexedAccesses();
1160     
1161     for (Structure* current = this; ;) {
1162         JSValue prototype = current->storedPrototype();
1163         if (prototype.isNull())
1164             return false;
1165         
1166         current = prototype.asCell()->structure(vm);
1167         
1168         unsigned attributes;
1169         PropertyOffset offset = current->get(vm, propertyName, attributes);
1170         if (!JSC::isValidOffset(offset))
1171             continue;
1172         
1173         if (attributes & (ReadOnly | Accessor))
1174             return true;
1175         
1176         return false;
1177     }
1178 }
1179
1180 PassRefPtr<StructureShape> Structure::toStructureShape(JSValue value)
1181 {
1182     RefPtr<StructureShape> baseShape = StructureShape::create();
1183     RefPtr<StructureShape> curShape = baseShape;
1184     Structure* curStructure = this;
1185     JSValue curValue = value;
1186     while (curStructure) {
1187         curStructure->forEachPropertyConcurrently(
1188             [&] (const PropertyMapEntry& entry) -> bool {
1189                 curShape->addProperty(*entry.key);
1190                 return true;
1191             });
1192
1193         if (JSObject* curObject = curValue.getObject())
1194             curShape->setConstructorName(JSObject::calculatedClassName(curObject));
1195         else
1196             curShape->setConstructorName(curStructure->classInfo()->className);
1197
1198         if (curStructure->isDictionary())
1199             curShape->enterDictionaryMode();
1200
1201         curShape->markAsFinal();
1202
1203         if (curStructure->storedPrototypeStructure()) {
1204             auto newShape = StructureShape::create();
1205             curShape->setProto(newShape.ptr());
1206             curShape = WTFMove(newShape);
1207             curValue = curStructure->storedPrototype();
1208         }
1209
1210         curStructure = curStructure->storedPrototypeStructure();
1211     }
1212     
1213     return WTFMove(baseShape);
1214 }
1215
1216 bool Structure::canUseForAllocationsOf(Structure* other)
1217 {
1218     return inlineCapacity() == other->inlineCapacity()
1219         && storedPrototype() == other->storedPrototype()
1220         && objectInitializationBlob() == other->objectInitializationBlob();
1221 }
1222
1223 void Structure::dump(PrintStream& out) const
1224 {
1225     out.print(RawPointer(this), ":[", classInfo()->className, ", {");
1226     
1227     CommaPrinter comma;
1228     
1229     const_cast<Structure*>(this)->forEachPropertyConcurrently(
1230         [&] (const PropertyMapEntry& entry) -> bool {
1231             out.print(comma, entry.key, ":", static_cast<int>(entry.offset));
1232             return true;
1233         });
1234     
1235     out.print("}, ", IndexingTypeDump(indexingType()));
1236     
1237     if (m_prototype.get().isCell())
1238         out.print(", Proto:", RawPointer(m_prototype.get().asCell()));
1239
1240     switch (dictionaryKind()) {
1241     case NoneDictionaryKind:
1242         if (hasBeenDictionary())
1243             out.print(", Has been dictionary");
1244         break;
1245     case CachedDictionaryKind:
1246         out.print(", Dictionary");
1247         break;
1248     case UncachedDictionaryKind:
1249         out.print(", UncacheableDictionary");
1250         break;
1251     }
1252
1253     if (transitionWatchpointSetIsStillValid())
1254         out.print(", Leaf");
1255     else if (transitionWatchpointIsLikelyToBeFired())
1256         out.print(", Shady leaf");
1257     
1258     out.print("]");
1259 }
1260
1261 void Structure::dumpInContext(PrintStream& out, DumpContext* context) const
1262 {
1263     if (context)
1264         context->structures.dumpBrief(this, out);
1265     else
1266         dump(out);
1267 }
1268
1269 void Structure::dumpBrief(PrintStream& out, const CString& string) const
1270 {
1271     out.print("%", string, ":", classInfo()->className);
1272 }
1273
1274 void Structure::dumpContextHeader(PrintStream& out)
1275 {
1276     out.print("Structures:");
1277 }
1278
1279 #if DO_PROPERTYMAP_CONSTENCY_CHECK
1280
1281 void PropertyTable::checkConsistency()
1282 {
1283     ASSERT(m_indexSize >= PropertyTable::MinimumTableSize);
1284     ASSERT(m_indexMask);
1285     ASSERT(m_indexSize == m_indexMask + 1);
1286     ASSERT(!(m_indexSize & m_indexMask));
1287
1288     ASSERT(m_keyCount <= m_indexSize / 2);
1289     ASSERT(m_keyCount + m_deletedCount <= m_indexSize / 2);
1290     ASSERT(m_deletedCount <= m_indexSize / 4);
1291
1292     unsigned indexCount = 0;
1293     unsigned deletedIndexCount = 0;
1294     for (unsigned a = 0; a != m_indexSize; ++a) {
1295         unsigned entryIndex = m_index[a];
1296         if (entryIndex == PropertyTable::EmptyEntryIndex)
1297             continue;
1298         if (entryIndex == deletedEntryIndex()) {
1299             ++deletedIndexCount;
1300             continue;
1301         }
1302         ASSERT(entryIndex < deletedEntryIndex());
1303         ASSERT(entryIndex - 1 <= usedCount());
1304         ++indexCount;
1305
1306         for (unsigned b = a + 1; b != m_indexSize; ++b)
1307             ASSERT(m_index[b] != entryIndex);
1308     }
1309     ASSERT(indexCount == m_keyCount);
1310     ASSERT(deletedIndexCount == m_deletedCount);
1311
1312     ASSERT(!table()[deletedEntryIndex() - 1].key);
1313
1314     unsigned nonEmptyEntryCount = 0;
1315     for (unsigned c = 0; c < usedCount(); ++c) {
1316         StringImpl* rep = table()[c].key;
1317         if (rep == PROPERTY_MAP_DELETED_ENTRY_KEY)
1318             continue;
1319         ++nonEmptyEntryCount;
1320         unsigned i = IdentifierRepHash::hash(rep);
1321         unsigned k = 0;
1322         unsigned entryIndex;
1323         while (1) {
1324             entryIndex = m_index[i & m_indexMask];
1325             ASSERT(entryIndex != PropertyTable::EmptyEntryIndex);
1326             if (rep == table()[entryIndex - 1].key)
1327                 break;
1328             if (k == 0)
1329                 k = 1 | doubleHash(IdentifierRepHash::hash(rep));
1330             i += k;
1331         }
1332         ASSERT(entryIndex == c + 1);
1333     }
1334
1335     ASSERT(nonEmptyEntryCount == m_keyCount);
1336 }
1337
1338 void Structure::checkConsistency()
1339 {
1340     checkOffsetConsistency();
1341
1342     if (!propertyTable())
1343         return;
1344
1345     if (isQuickPropertyAccessAllowedForEnumeration()) {
1346         PropertyTable::iterator end = propertyTable()->end();
1347         for (PropertyTable::iterator iter = propertyTable()->begin(); iter != end; ++iter) {
1348             ASSERT(!(iter->attributes & DontEnum));
1349             ASSERT(!iter->key->isSymbol());
1350         }
1351     }
1352
1353     propertyTable()->checkConsistency();
1354 }
1355
1356 #else
1357
1358 inline void Structure::checkConsistency()
1359 {
1360     checkOffsetConsistency();
1361 }
1362
1363 #endif // DO_PROPERTYMAP_CONSTENCY_CHECK
1364
1365 bool ClassInfo::hasStaticSetterOrReadonlyProperties() const
1366 {
1367     for (const ClassInfo* ci = this; ci; ci = ci->parentClass) {
1368         if (const HashTable* table = ci->staticPropHashTable) {
1369             if (table->hasSetterOrReadonlyProperties)
1370                 return true;
1371         }
1372     }
1373     return false;
1374 }
1375
1376 void Structure::setCachedPropertyNameEnumerator(VM& vm, JSPropertyNameEnumerator* enumerator)
1377 {
1378     ASSERT(!isDictionary());
1379     if (!hasRareData())
1380         allocateRareData(vm);
1381     rareData()->setCachedPropertyNameEnumerator(vm, enumerator);
1382 }
1383
1384 JSPropertyNameEnumerator* Structure::cachedPropertyNameEnumerator() const
1385 {
1386     if (!hasRareData())
1387         return nullptr;
1388     return rareData()->cachedPropertyNameEnumerator();
1389 }
1390
1391 bool Structure::canCachePropertyNameEnumerator() const
1392 {
1393     if (isDictionary())
1394         return false;
1395
1396     if (hasIndexedProperties(indexingType()))
1397         return false;
1398
1399     if (typeInfo().overridesGetPropertyNames())
1400         return false;
1401
1402     StructureChain* structureChain = m_cachedPrototypeChain.get();
1403     ASSERT(structureChain);
1404     WriteBarrier<Structure>* structure = structureChain->head();
1405     while (true) {
1406         if (!structure->get())
1407             break;
1408         if (structure->get()->typeInfo().overridesGetPropertyNames())
1409             return false;
1410         structure++;
1411     }
1412     
1413     return true;
1414 }
1415     
1416 bool Structure::canAccessPropertiesQuicklyForEnumeration() const
1417 {
1418     if (!isQuickPropertyAccessAllowedForEnumeration())
1419         return false;
1420     if (hasGetterSetterProperties())
1421         return false;
1422     if (isUncacheableDictionary())
1423         return false;
1424     return true;
1425 }
1426
1427 } // namespace JSC