All prototypes should call didBecomePrototype()
[WebKit.git] / Source / JavaScriptCore / runtime / Structure.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "Structure.h"
28
29 #include "BuiltinNames.h"
30 #include "CodeBlock.h"
31 #include "DumpContext.h"
32 #include "JSCInlines.h"
33 #include "JSObject.h"
34 #include "JSPropertyNameEnumerator.h"
35 #include "Lookup.h"
36 #include "PropertyMapHashTable.h"
37 #include "PropertyNameArray.h"
38 #include "StructureChain.h"
39 #include "StructureRareDataInlines.h"
40 #include "WeakGCMapInlines.h"
41 #include <wtf/CommaPrinter.h>
42 #include <wtf/NeverDestroyed.h>
43 #include <wtf/ProcessID.h>
44 #include <wtf/RefPtr.h>
45 #include <wtf/Threading.h>
46
47 #define DUMP_STRUCTURE_ID_STATISTICS 0
48
49 namespace JSC {
50
51 #if DUMP_STRUCTURE_ID_STATISTICS
52 static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>);
53 #endif
54
55 class SingleSlotTransitionWeakOwner final : public WeakHandleOwner {
56     void finalize(Handle<Unknown>, void* context) override
57     {
58         StructureTransitionTable* table = reinterpret_cast<StructureTransitionTable*>(context);
59         ASSERT(table->isUsingSingleSlot());
60         WeakSet::deallocate(table->weakImpl());
61         table->m_data = StructureTransitionTable::UsingSingleSlotFlag;
62     }
63 };
64
65 static SingleSlotTransitionWeakOwner& singleSlotTransitionWeakOwner()
66 {
67     static NeverDestroyed<SingleSlotTransitionWeakOwner> owner;
68     return owner;
69 }
70
71 inline Structure* StructureTransitionTable::singleTransition() const
72 {
73     ASSERT(isUsingSingleSlot());
74     if (WeakImpl* impl = this->weakImpl()) {
75         if (impl->state() == WeakImpl::Live)
76             return jsCast<Structure*>(impl->jsValue().asCell());
77     }
78     return nullptr;
79 }
80
81 inline void StructureTransitionTable::setSingleTransition(Structure* structure)
82 {
83     ASSERT(isUsingSingleSlot());
84     if (WeakImpl* impl = this->weakImpl())
85         WeakSet::deallocate(impl);
86     WeakImpl* impl = WeakSet::allocate(structure, &singleSlotTransitionWeakOwner(), this);
87     m_data = bitwise_cast<intptr_t>(impl) | UsingSingleSlotFlag;
88 }
89
90 bool StructureTransitionTable::contains(UniquedStringImpl* rep, unsigned attributes) const
91 {
92     if (isUsingSingleSlot()) {
93         Structure* transition = singleTransition();
94         return transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes;
95     }
96     return map()->get(std::make_pair(rep, attributes));
97 }
98
99 inline Structure* StructureTransitionTable::get(UniquedStringImpl* rep, unsigned attributes) const
100 {
101     if (isUsingSingleSlot()) {
102         Structure* transition = singleTransition();
103         return (transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes) ? transition : 0;
104     }
105     return map()->get(std::make_pair(rep, attributes));
106 }
107
108 void StructureTransitionTable::add(VM& vm, Structure* structure)
109 {
110     if (isUsingSingleSlot()) {
111         Structure* existingTransition = singleTransition();
112
113         // This handles the first transition being added.
114         if (!existingTransition) {
115             setSingleTransition(structure);
116             return;
117         }
118
119         // This handles the second transition being added
120         // (or the first transition being despecified!)
121         setMap(new TransitionMap(vm));
122         add(vm, existingTransition);
123     }
124
125     // Add the structure to the map.
126
127     // Newer versions of the STL have an std::make_pair function that takes rvalue references.
128     // When either of the parameters are bitfields, the C++ compiler will try to bind them as lvalues, which is invalid. To work around this, use unary "+" to make the parameter an rvalue.
129     // See https://bugs.webkit.org/show_bug.cgi?id=59261 for more details
130     map()->set(std::make_pair(structure->m_nameInPrevious.get(), +structure->attributesInPrevious()), structure);
131 }
132
133 void Structure::dumpStatistics()
134 {
135 #if DUMP_STRUCTURE_ID_STATISTICS
136     unsigned numberLeaf = 0;
137     unsigned numberUsingSingleSlot = 0;
138     unsigned numberSingletons = 0;
139     unsigned numberWithPropertyMaps = 0;
140     unsigned totalPropertyMapsSize = 0;
141
142     HashSet<Structure*>::const_iterator end = liveStructureSet.end();
143     for (HashSet<Structure*>::const_iterator it = liveStructureSet.begin(); it != end; ++it) {
144         Structure* structure = *it;
145
146         switch (structure->m_transitionTable.size()) {
147             case 0:
148                 ++numberLeaf;
149                 if (!structure->previousID())
150                     ++numberSingletons;
151                 break;
152
153             case 1:
154                 ++numberUsingSingleSlot;
155                 break;
156         }
157
158         if (PropertyTable* table = structure->propertyTableOrNull()) {
159             ++numberWithPropertyMaps;
160             totalPropertyMapsSize += table->sizeInMemory();
161         }
162     }
163
164     dataLogF("Number of live Structures: %d\n", liveStructureSet.size());
165     dataLogF("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
166     dataLogF("Number of Structures that are leaf nodes: %d\n", numberLeaf);
167     dataLogF("Number of Structures that singletons: %d\n", numberSingletons);
168     dataLogF("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
169
170     dataLogF("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
171     dataLogF("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
172     dataLogF("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
173 #else
174     dataLogF("Dumping Structure statistics is not enabled.\n");
175 #endif
176 }
177
178 Structure::Structure(VM& vm, JSGlobalObject* globalObject, JSValue prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
179     : JSCell(vm, vm.structureStructure.get())
180     , m_blob(vm.heap.structureIDTable().allocateID(this), indexingType, typeInfo)
181     , m_outOfLineTypeFlags(typeInfo.outOfLineTypeFlags())
182     , m_inlineCapacity(inlineCapacity)
183     , m_bitField(0)
184     , m_globalObject(vm, this, globalObject, WriteBarrier<JSGlobalObject>::MayBeNull)
185     , m_prototype(vm, this, prototype)
186     , m_classInfo(classInfo)
187     , m_transitionWatchpointSet(IsWatched)
188     , m_offset(invalidOffset)
189     , m_propertyHash(0)
190 {
191     setDictionaryKind(NoneDictionaryKind);
192     setIsPinnedPropertyTable(false);
193     setHasGetterSetterProperties(classInfo->hasStaticSetterOrReadonlyProperties());
194     setHasCustomGetterSetterProperties(false);
195     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(classInfo->hasStaticSetterOrReadonlyProperties());
196     setHasUnderscoreProtoPropertyExcludingOriginalProto(false);
197     setIsQuickPropertyAccessAllowedForEnumeration(true);
198     setAttributesInPrevious(0);
199     setDidPreventExtensions(false);
200     setDidTransition(false);
201     setStaticPropertiesReified(false);
202     setTransitionWatchpointIsLikelyToBeFired(false);
203     setHasBeenDictionary(false);
204     setIsAddingPropertyForTransition(false);
205  
206     ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
207     ASSERT(static_cast<PropertyOffset>(inlineCapacity) < firstOutOfLineOffset);
208     ASSERT(!hasRareData());
209     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
210     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
211     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
212 }
213
214 const ClassInfo Structure::s_info = { "Structure", nullptr, nullptr, nullptr, CREATE_METHOD_TABLE(Structure) };
215
216 Structure::Structure(VM& vm)
217     : JSCell(CreatingEarlyCell)
218     , m_inlineCapacity(0)
219     , m_bitField(0)
220     , m_prototype(vm, this, jsNull())
221     , m_classInfo(info())
222     , m_transitionWatchpointSet(IsWatched)
223     , m_offset(invalidOffset)
224     , m_propertyHash(0)
225 {
226     setDictionaryKind(NoneDictionaryKind);
227     setIsPinnedPropertyTable(false);
228     setHasGetterSetterProperties(m_classInfo->hasStaticSetterOrReadonlyProperties());
229     setHasCustomGetterSetterProperties(false);
230     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(m_classInfo->hasStaticSetterOrReadonlyProperties());
231     setHasUnderscoreProtoPropertyExcludingOriginalProto(false);
232     setIsQuickPropertyAccessAllowedForEnumeration(true);
233     setAttributesInPrevious(0);
234     setDidPreventExtensions(false);
235     setDidTransition(false);
236     setStaticPropertiesReified(false);
237     setTransitionWatchpointIsLikelyToBeFired(false);
238     setHasBeenDictionary(false);
239     setIsAddingPropertyForTransition(false);
240  
241     TypeInfo typeInfo = TypeInfo(CellType, StructureFlags);
242     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), 0, typeInfo);
243     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
244
245     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
246     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
247     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
248 }
249
250 Structure::Structure(VM& vm, Structure* previous, DeferredStructureTransitionWatchpointFire* deferred)
251     : JSCell(vm, vm.structureStructure.get())
252     , m_inlineCapacity(previous->m_inlineCapacity)
253     , m_bitField(0)
254     , m_prototype(vm, this, previous->m_prototype.get())
255     , m_classInfo(previous->m_classInfo)
256     , m_transitionWatchpointSet(IsWatched)
257     , m_offset(invalidOffset)
258     , m_propertyHash(previous->m_propertyHash)
259 {
260     setDictionaryKind(previous->dictionaryKind());
261     setIsPinnedPropertyTable(false);
262     setHasBeenFlattenedBefore(previous->hasBeenFlattenedBefore());
263     setHasGetterSetterProperties(previous->hasGetterSetterProperties());
264     setHasCustomGetterSetterProperties(previous->hasCustomGetterSetterProperties());
265     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->hasReadOnlyOrGetterSetterPropertiesExcludingProto());
266     setHasUnderscoreProtoPropertyExcludingOriginalProto(previous->hasUnderscoreProtoPropertyExcludingOriginalProto());
267     setIsQuickPropertyAccessAllowedForEnumeration(previous->isQuickPropertyAccessAllowedForEnumeration());
268     setAttributesInPrevious(0);
269     setDidPreventExtensions(previous->didPreventExtensions());
270     setDidTransition(true);
271     setStaticPropertiesReified(previous->staticPropertiesReified());
272     setHasBeenDictionary(previous->hasBeenDictionary());
273     setIsAddingPropertyForTransition(false);
274  
275     TypeInfo typeInfo = previous->typeInfo();
276     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), previous->indexingModeIncludingHistory(), typeInfo);
277     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
278
279     ASSERT(!previous->typeInfo().structureIsImmortal());
280     setPreviousID(vm, previous);
281
282     previous->didTransitionFromThisStructure(deferred);
283     
284     // Copy this bit now, in case previous was being watched.
285     setTransitionWatchpointIsLikelyToBeFired(previous->transitionWatchpointIsLikelyToBeFired());
286
287     if (previous->m_globalObject)
288         m_globalObject.set(vm, this, previous->m_globalObject.get());
289     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
290     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
291     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
292 }
293
294 Structure::~Structure()
295 {
296     if (typeInfo().structureIsImmortal())
297         return;
298     Heap::heap(this)->structureIDTable().deallocateID(this, m_blob.structureID());
299 }
300
301 void Structure::destroy(JSCell* cell)
302 {
303     static_cast<Structure*>(cell)->Structure::~Structure();
304 }
305
306 Structure* Structure::create(PolyProtoTag, VM& vm, JSGlobalObject* globalObject, JSObject* prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
307 {
308     Structure* result = create(vm, globalObject, prototype, typeInfo, classInfo, indexingType, inlineCapacity);
309
310     unsigned oldOutOfLineCapacity = result->outOfLineCapacity();
311     result->addPropertyWithoutTransition(
312         vm, vm.propertyNames->builtinNames().polyProtoName(), static_cast<unsigned>(PropertyAttribute::DontEnum),
313         [&] (const GCSafeConcurrentJSLocker&, PropertyOffset offset, PropertyOffset newLastOffset) {
314             RELEASE_ASSERT(Structure::outOfLineCapacity(newLastOffset) == oldOutOfLineCapacity);
315             RELEASE_ASSERT(offset == knownPolyProtoOffset);
316             RELEASE_ASSERT(isInlineOffset(knownPolyProtoOffset));
317             result->m_prototype.setWithoutWriteBarrier(JSValue());
318             result->setLastOffset(newLastOffset);
319         });
320
321     return result;
322 }
323
324 bool Structure::isValidPrototype(JSValue prototype)
325 {
326     return prototype.isNull() || (prototype.isObject() && prototype.getObject()->mayBePrototype());
327 }
328
329 void Structure::findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*& structure, PropertyTable*& table)
330 {
331     ASSERT(structures.isEmpty());
332     table = 0;
333
334     for (structure = this; structure; structure = structure->previousID()) {
335         structure->m_lock.lock();
336         
337         table = structure->propertyTableOrNull();
338         if (table) {
339             // Leave the structure locked, so that the caller can do things to it atomically
340             // before it loses its property table.
341             return;
342         }
343         
344         structures.append(structure);
345         structure->m_lock.unlock();
346     }
347     
348     ASSERT(!structure);
349     ASSERT(!table);
350 }
351
352 PropertyTable* Structure::materializePropertyTable(VM& vm, bool setPropertyTable)
353 {
354     ASSERT(structure(vm)->classInfo() == info());
355     ASSERT(!isAddingPropertyForTransition());
356     
357     DeferGC deferGC(vm.heap);
358     
359     Vector<Structure*, 8> structures;
360     Structure* structure;
361     PropertyTable* table;
362     
363     findStructuresAndMapForMaterialization(structures, structure, table);
364     
365     unsigned capacity = numberOfSlotsForLastOffset(m_offset, m_inlineCapacity);
366     if (table) {
367         table = table->copy(vm, capacity);
368         structure->m_lock.unlock();
369     } else
370         table = PropertyTable::create(vm, capacity);
371     
372     // Must hold the lock on this structure, since we will be modifying this structure's
373     // property map. We don't want getConcurrently() to see the property map in a half-baked
374     // state.
375     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
376     if (setPropertyTable)
377         this->setPropertyTable(vm, table);
378
379     for (size_t i = structures.size(); i--;) {
380         structure = structures[i];
381         if (!structure->m_nameInPrevious)
382             continue;
383         PropertyMapEntry entry(structure->m_nameInPrevious.get(), structure->m_offset, structure->attributesInPrevious());
384         table->add(entry, m_offset, PropertyTable::PropertyOffsetMustNotChange);
385     }
386     
387     checkOffsetConsistency(
388         table,
389         [&] () {
390             dataLog("Detected in materializePropertyTable.\n");
391             dataLog("Found structure = ", RawPointer(structure), "\n");
392             dataLog("structures = ");
393             CommaPrinter comma;
394             for (Structure* structure : structures)
395                 dataLog(comma, RawPointer(structure));
396             dataLog("\n");
397         });
398     
399     return table;
400 }
401
402 Structure* Structure::addPropertyTransitionToExistingStructureImpl(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
403 {
404     ASSERT(!structure->isDictionary());
405     ASSERT(structure->isObject());
406
407     if (Structure* existingTransition = structure->m_transitionTable.get(uid, attributes)) {
408         validateOffset(existingTransition->m_offset, existingTransition->inlineCapacity());
409         offset = existingTransition->m_offset;
410         return existingTransition;
411     }
412
413     return 0;
414 }
415
416 Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
417 {
418     ASSERT(!isCompilationThread());
419     return addPropertyTransitionToExistingStructureImpl(structure, propertyName.uid(), attributes, offset);
420 }
421
422 Structure* Structure::addPropertyTransitionToExistingStructureConcurrently(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
423 {
424     ConcurrentJSLocker locker(structure->m_lock);
425     return addPropertyTransitionToExistingStructureImpl(structure, uid, attributes, offset);
426 }
427
428 bool Structure::holesMustForwardToPrototype(VM& vm, JSObject* base) const
429 {
430     ASSERT(base->structure(vm) == this);
431
432     if (this->mayInterceptIndexedAccesses())
433         return true;
434
435     JSValue prototype = this->storedPrototype(base);
436     if (!prototype.isObject())
437         return false;
438     JSObject* object = asObject(prototype);
439
440     while (true) {
441         Structure& structure = *object->structure(vm);
442         if (hasIndexedProperties(object->indexingType()) || structure.mayInterceptIndexedAccesses())
443             return true;
444         prototype = structure.storedPrototype(object);
445         if (!prototype.isObject())
446             return false;
447         object = asObject(prototype);
448     }
449
450     RELEASE_ASSERT_NOT_REACHED();
451     return false;
452 }
453
454 Structure* Structure::addPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
455 {
456     Structure* newStructure = addPropertyTransitionToExistingStructure(
457         structure, propertyName, attributes, offset);
458     if (newStructure)
459         return newStructure;
460
461     return addNewPropertyTransition(
462         vm, structure, propertyName, attributes, offset, PutPropertySlot::UnknownContext);
463 }
464
465 Structure* Structure::addNewPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset, PutPropertySlot::Context context, DeferredStructureTransitionWatchpointFire* deferred)
466 {
467     ASSERT(!structure->isDictionary());
468     ASSERT(structure->isObject());
469     ASSERT(!Structure::addPropertyTransitionToExistingStructure(structure, propertyName, attributes, offset));
470     
471     int maxTransitionLength;
472     if (context == PutPropertySlot::PutById)
473         maxTransitionLength = s_maxTransitionLengthForNonEvalPutById;
474     else
475         maxTransitionLength = s_maxTransitionLength;
476     if (structure->transitionCount() > maxTransitionLength) {
477         ASSERT(!isCopyOnWrite(structure->indexingMode()));
478         Structure* transition = toCacheableDictionaryTransition(vm, structure, deferred);
479         ASSERT(structure != transition);
480         offset = transition->add(vm, propertyName, attributes);
481         return transition;
482     }
483     
484     Structure* transition = create(vm, structure, deferred);
485
486     transition->m_cachedPrototypeChain.setMayBeNull(vm, transition, structure->m_cachedPrototypeChain.get());
487     
488     // While we are adding the property, rematerializing the property table is super weird: we already
489     // have a m_nameInPrevious and attributesInPrevious but the m_offset is still wrong. If the
490     // materialization algorithm runs, it'll build a property table that already has the property but
491     // at a bogus offset. Rather than try to teach the materialization code how to create a table under
492     // those conditions, we just tell the GC not to blow the table away during this period of time.
493     // Holding the lock ensures that we either do this before the GC starts scanning the structure, in
494     // which case the GC will not blow the table away, or we do it after the GC already ran in which
495     // case all is well.  If it wasn't for the lock, the GC would have TOCTOU: if could read
496     // isAddingPropertyForTransition before we set it to true, and then blow the table away after.
497     {
498         ConcurrentJSLocker locker(transition->m_lock);
499         transition->setIsAddingPropertyForTransition(true);
500     }
501
502     transition->m_blob.setIndexingModeIncludingHistory(structure->indexingModeIncludingHistory() & ~CopyOnWrite);
503     transition->m_nameInPrevious = propertyName.uid();
504     transition->setAttributesInPrevious(attributes);
505     transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
506     transition->m_offset = structure->m_offset;
507
508     offset = transition->add(vm, propertyName, attributes);
509
510     // Now that everything is fine with the new structure's bookkeeping, the GC is free to blow the
511     // table away if it wants. We can now rebuild it fine.
512     WTF::storeStoreFence();
513     transition->setIsAddingPropertyForTransition(false);
514
515     checkOffset(transition->m_offset, transition->inlineCapacity());
516     {
517         ConcurrentJSLocker locker(structure->m_lock);
518         DeferGC deferGC(vm.heap);
519         structure->m_transitionTable.add(vm, transition);
520     }
521     transition->checkOffsetConsistency();
522     structure->checkOffsetConsistency();
523     return transition;
524 }
525
526 Structure* Structure::removePropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, PropertyOffset& offset)
527 {
528     // NOTE: There are some good reasons why this goes directly to uncacheable dictionary rather than
529     // caching the removal. We can fix all of these things, but we must remember to do so, if we ever try
530     // to optimize this case.
531     //
532     // - Cached transitions usually steal the property table, and assume that this is possible because they
533     //   can just rebuild the table by looking at past transitions. That code assumes that the table only
534     //   grew and never shrank. To support removals, we'd have to change the property table materialization
535     //   code to handle deletions. Also, we have logic to get the list of properties on a structure that
536     //   lacks a property table by just looking back through the set of transitions since the last
537     //   structure that had a pinned table. That logic would also have to be changed to handle cached
538     //   removals.
539     //
540     ASSERT(!structure->isUncacheableDictionary());
541
542     Structure* transition = toUncacheableDictionaryTransition(vm, structure);
543
544     offset = transition->remove(propertyName);
545
546     transition->checkOffsetConsistency();
547     return transition;
548 }
549
550 Structure* Structure::changePrototypeTransition(VM& vm, Structure* structure, JSValue prototype, DeferredStructureTransitionWatchpointFire& deferred)
551 {
552     ASSERT(isValidPrototype(prototype));
553
554     DeferGC deferGC(vm.heap);
555     Structure* transition = create(vm, structure, &deferred);
556
557     transition->m_prototype.set(vm, transition, prototype);
558
559     PropertyTable* table = structure->copyPropertyTableForPinning(vm);
560     transition->pin(holdLock(transition->m_lock), vm, table);
561     transition->m_offset = structure->m_offset;
562     
563     transition->checkOffsetConsistency();
564     return transition;
565 }
566
567 Structure* Structure::attributeChangeTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes)
568 {
569     if (!structure->isUncacheableDictionary()) {
570         Structure* transition = create(vm, structure);
571
572         PropertyTable* table = structure->copyPropertyTableForPinning(vm);
573         transition->pin(holdLock(transition->m_lock), vm, table);
574         transition->m_offset = structure->m_offset;
575         
576         structure = transition;
577     }
578
579     PropertyMapEntry* entry = structure->ensurePropertyTable(vm)->get(propertyName.uid());
580     ASSERT(entry);
581     entry->attributes = attributes;
582
583     structure->checkOffsetConsistency();
584     return structure;
585 }
586
587 Structure* Structure::toDictionaryTransition(VM& vm, Structure* structure, DictionaryKind kind, DeferredStructureTransitionWatchpointFire* deferred)
588 {
589     ASSERT(!structure->isUncacheableDictionary());
590     DeferGC deferGC(vm.heap);
591     
592     Structure* transition = create(vm, structure, deferred);
593
594     PropertyTable* table = structure->copyPropertyTableForPinning(vm);
595     transition->pin(holdLock(transition->m_lock), vm, table);
596     transition->m_offset = structure->m_offset;
597     transition->setDictionaryKind(kind);
598     transition->setHasBeenDictionary(true);
599     
600     transition->checkOffsetConsistency();
601     return transition;
602 }
603
604 Structure* Structure::toCacheableDictionaryTransition(VM& vm, Structure* structure, DeferredStructureTransitionWatchpointFire* deferred)
605 {
606     return toDictionaryTransition(vm, structure, CachedDictionaryKind, deferred);
607 }
608
609 Structure* Structure::toUncacheableDictionaryTransition(VM& vm, Structure* structure)
610 {
611     return toDictionaryTransition(vm, structure, UncachedDictionaryKind);
612 }
613
614 Structure* Structure::sealTransition(VM& vm, Structure* structure)
615 {
616     return nonPropertyTransition(vm, structure, NonPropertyTransition::Seal);
617 }
618
619 Structure* Structure::freezeTransition(VM& vm, Structure* structure)
620 {
621     return nonPropertyTransition(vm, structure, NonPropertyTransition::Freeze);
622 }
623
624 Structure* Structure::preventExtensionsTransition(VM& vm, Structure* structure)
625 {
626     return nonPropertyTransition(vm, structure, NonPropertyTransition::PreventExtensions);
627 }
628
629 PropertyTable* Structure::takePropertyTableOrCloneIfPinned(VM& vm)
630 {
631     // This must always return a property table. It can't return null.
632     PropertyTable* result = propertyTableOrNull();
633     if (result) {
634         if (isPinnedPropertyTable())
635             return result->copy(vm, result->size() + 1);
636         ConcurrentJSLocker locker(m_lock);
637         setPropertyTable(vm, nullptr);
638         return result;
639     }
640     bool setPropertyTable = false;
641     return materializePropertyTable(vm, setPropertyTable);
642 }
643
644 Structure* Structure::nonPropertyTransitionSlow(VM& vm, Structure* structure, NonPropertyTransition transitionKind)
645 {
646     unsigned attributes = toAttributes(transitionKind);
647     IndexingType indexingModeIncludingHistory = newIndexingType(structure->indexingModeIncludingHistory(), transitionKind);
648     
649     Structure* existingTransition;
650     if (!structure->isDictionary() && (existingTransition = structure->m_transitionTable.get(0, attributes))) {
651         ASSERT(existingTransition->attributesInPrevious() == attributes);
652         ASSERT(existingTransition->indexingModeIncludingHistory() == indexingModeIncludingHistory);
653         return existingTransition;
654     }
655     
656     DeferGC deferGC(vm.heap);
657     
658     Structure* transition = create(vm, structure);
659     transition->setAttributesInPrevious(attributes);
660     transition->m_blob.setIndexingModeIncludingHistory(indexingModeIncludingHistory);
661     
662     if (preventsExtensions(transitionKind))
663         transition->setDidPreventExtensions(true);
664     
665     if (setsDontDeleteOnAllProperties(transitionKind)
666         || setsReadOnlyOnNonAccessorProperties(transitionKind)) {
667         // We pin the property table on transitions that do wholesale editing of the property
668         // table, since our logic for walking the property transition chain to rematerialize the
669         // table doesn't know how to take into account such wholesale edits.
670
671         PropertyTable* table = structure->copyPropertyTableForPinning(vm);
672         transition->pinForCaching(holdLock(transition->m_lock), vm, table);
673         transition->m_offset = structure->m_offset;
674         
675         table = transition->propertyTableOrNull();
676         RELEASE_ASSERT(table);
677         for (auto& entry : *table) {
678             if (setsDontDeleteOnAllProperties(transitionKind))
679                 entry.attributes |= static_cast<unsigned>(PropertyAttribute::DontDelete);
680             if (setsReadOnlyOnNonAccessorProperties(transitionKind) && !(entry.attributes & PropertyAttribute::Accessor))
681                 entry.attributes |= static_cast<unsigned>(PropertyAttribute::ReadOnly);
682         }
683     } else {
684         transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
685         transition->m_offset = structure->m_offset;
686         checkOffset(transition->m_offset, transition->inlineCapacity());
687     }
688     
689     if (setsReadOnlyOnNonAccessorProperties(transitionKind)
690         && !transition->propertyTableOrNull()->isEmpty())
691         transition->setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true);
692     
693     if (structure->isDictionary()) {
694         PropertyTable* table = transition->ensurePropertyTable(vm);
695         transition->pin(holdLock(transition->m_lock), vm, table);
696     } else {
697         auto locker = holdLock(structure->m_lock);
698         structure->m_transitionTable.add(vm, transition);
699     }
700
701     transition->checkOffsetConsistency();
702     return transition;
703 }
704
705 // In future we may want to cache this property.
706 bool Structure::isSealed(VM& vm)
707 {
708     if (isStructureExtensible())
709         return false;
710
711     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
712     if (!table)
713         return true;
714     
715     PropertyTable::iterator end = table->end();
716     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
717         if ((iter->attributes & PropertyAttribute::DontDelete) != static_cast<unsigned>(PropertyAttribute::DontDelete))
718             return false;
719     }
720     return true;
721 }
722
723 // In future we may want to cache this property.
724 bool Structure::isFrozen(VM& vm)
725 {
726     if (isStructureExtensible())
727         return false;
728
729     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
730     if (!table)
731         return true;
732     
733     PropertyTable::iterator end = table->end();
734     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
735         if (!(iter->attributes & PropertyAttribute::DontDelete))
736             return false;
737         if (!(iter->attributes & (PropertyAttribute::ReadOnly | PropertyAttribute::Accessor)))
738             return false;
739     }
740     return true;
741 }
742
743 Structure* Structure::flattenDictionaryStructure(VM& vm, JSObject* object)
744 {
745     checkOffsetConsistency();
746     ASSERT(isDictionary());
747     
748     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
749     
750     object->setStructureIDDirectly(nuke(id()));
751     WTF::storeStoreFence();
752
753     size_t beforeOutOfLineCapacity = this->outOfLineCapacity();
754     if (isUncacheableDictionary()) {
755         PropertyTable* table = propertyTableOrNull();
756         ASSERT(table);
757
758         size_t propertyCount = table->size();
759
760         // Holds our values compacted by insertion order.
761         Vector<JSValue> values(propertyCount);
762
763         // Copies out our values from their hashed locations, compacting property table offsets as we go.
764         unsigned i = 0;
765         PropertyTable::iterator end = table->end();
766         m_offset = invalidOffset;
767         for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter, ++i) {
768             values[i] = object->getDirect(iter->offset);
769             m_offset = iter->offset = offsetForPropertyNumber(i, m_inlineCapacity);
770         }
771         
772         // Copies in our values to their compacted locations.
773         for (unsigned i = 0; i < propertyCount; i++)
774             object->putDirect(vm, offsetForPropertyNumber(i, m_inlineCapacity), values[i]);
775
776         table->clearDeletedOffsets();
777
778         // We need to zero our unused property space; otherwise the GC might see a
779         // stale pointer when we add properties in the future.
780         memset(
781             object->inlineStorageUnsafe() + inlineSize(),
782             0,
783             (inlineCapacity() - inlineSize()) * sizeof(EncodedJSValue));
784
785         Butterfly* butterfly = object->butterfly();
786         size_t preCapacity = butterfly->indexingHeader()->preCapacity(this);
787         void* base = butterfly->base(preCapacity, beforeOutOfLineCapacity);
788         void* startOfPropertyStorageSlots = reinterpret_cast<EncodedJSValue*>(base) + preCapacity;
789         memset(startOfPropertyStorageSlots, 0, (beforeOutOfLineCapacity - outOfLineSize()) * sizeof(EncodedJSValue));
790         checkOffsetConsistency();
791     }
792
793     setDictionaryKind(NoneDictionaryKind);
794     setHasBeenFlattenedBefore(true);
795
796     size_t afterOutOfLineCapacity = this->outOfLineCapacity();
797
798     if (object->butterfly() && beforeOutOfLineCapacity != afterOutOfLineCapacity) {
799         ASSERT(beforeOutOfLineCapacity > afterOutOfLineCapacity);
800         // If the object had a Butterfly but after flattening/compacting we no longer have need of it,
801         // we need to zero it out because the collector depends on the Structure to know the size for copying.
802         if (!afterOutOfLineCapacity && !this->hasIndexingHeader(object))
803             object->setButterfly(vm, nullptr);
804         // If the object was down-sized to the point where the base of the Butterfly is no longer within the 
805         // first CopiedBlock::blockSize bytes, we'll get the wrong answer if we try to mask the base back to 
806         // the CopiedBlock header. To prevent this case we need to memmove the Butterfly down.
807         else
808             object->shiftButterflyAfterFlattening(locker, vm, this, afterOutOfLineCapacity);
809     }
810     
811     WTF::storeStoreFence();
812     object->setStructureIDDirectly(id());
813
814     // We need to do a writebarrier here because the GC thread might be scanning the butterfly while
815     // we are shuffling properties around. See: https://bugs.webkit.org/show_bug.cgi?id=166989
816     vm.heap.writeBarrier(object);
817
818     return this;
819 }
820
821 void Structure::pin(const AbstractLocker&, VM& vm, PropertyTable* table)
822 {
823     setIsPinnedPropertyTable(true);
824     setPropertyTable(vm, table);
825     clearPreviousID();
826     m_nameInPrevious = nullptr;
827 }
828
829 void Structure::pinForCaching(const AbstractLocker&, VM& vm, PropertyTable* table)
830 {
831     setIsPinnedPropertyTable(true);
832     setPropertyTable(vm, table);
833     m_nameInPrevious = nullptr;
834 }
835
836 void Structure::allocateRareData(VM& vm)
837 {
838     ASSERT(!hasRareData());
839     StructureRareData* rareData = StructureRareData::create(vm, previousID());
840     WTF::storeStoreFence();
841     m_previousOrRareData.set(vm, this, rareData);
842     ASSERT(hasRareData());
843 }
844
845 WatchpointSet* Structure::ensurePropertyReplacementWatchpointSet(VM& vm, PropertyOffset offset)
846 {
847     ASSERT(!isUncacheableDictionary());
848
849     // In some places it's convenient to call this with an invalid offset. So, we do the check here.
850     if (!isValidOffset(offset))
851         return nullptr;
852     
853     if (!hasRareData())
854         allocateRareData(vm);
855     ConcurrentJSLocker locker(m_lock);
856     StructureRareData* rareData = this->rareData();
857     if (!rareData->m_replacementWatchpointSets) {
858         rareData->m_replacementWatchpointSets =
859             std::make_unique<StructureRareData::PropertyWatchpointMap>();
860         WTF::storeStoreFence();
861     }
862     auto result = rareData->m_replacementWatchpointSets->add(offset, nullptr);
863     if (result.isNewEntry)
864         result.iterator->value = adoptRef(new WatchpointSet(IsWatched));
865     return result.iterator->value.get();
866 }
867
868 void Structure::startWatchingPropertyForReplacements(VM& vm, PropertyName propertyName)
869 {
870     ASSERT(!isUncacheableDictionary());
871     
872     startWatchingPropertyForReplacements(vm, get(vm, propertyName));
873 }
874
875 void Structure::didCachePropertyReplacement(VM& vm, PropertyOffset offset)
876 {
877     RELEASE_ASSERT(isValidOffset(offset));
878     ensurePropertyReplacementWatchpointSet(vm, offset)->fireAll(vm, "Did cache property replacement");
879 }
880
881 void Structure::startWatchingInternalProperties(VM& vm)
882 {
883     if (!isUncacheableDictionary()) {
884         startWatchingPropertyForReplacements(vm, vm.propertyNames->toString);
885         startWatchingPropertyForReplacements(vm, vm.propertyNames->valueOf);
886     }
887     setDidWatchInternalProperties(true);
888 }
889
890 #if DUMP_PROPERTYMAP_STATS
891
892 PropertyMapHashTableStats* propertyMapHashTableStats = 0;
893
894 struct PropertyMapStatisticsExitLogger {
895     PropertyMapStatisticsExitLogger();
896     ~PropertyMapStatisticsExitLogger();
897 };
898
899 DEFINE_GLOBAL_FOR_LOGGING(PropertyMapStatisticsExitLogger, logger, );
900
901 PropertyMapStatisticsExitLogger::PropertyMapStatisticsExitLogger()
902 {
903     propertyMapHashTableStats = adoptPtr(new PropertyMapHashTableStats()).leakPtr();
904 }
905
906 PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
907 {
908     unsigned finds = propertyMapHashTableStats->numFinds;
909     unsigned collisions = propertyMapHashTableStats->numCollisions;
910     dataLogF("\nJSC::PropertyMap statistics for process %d\n\n", getCurrentProcessID());
911     dataLogF("%d finds\n", finds);
912     dataLogF("%d collisions (%.1f%%)\n", collisions, 100.0 * collisions / finds);
913     dataLogF("%d lookups\n", propertyMapHashTableStats->numLookups.load());
914     dataLogF("%d lookup probings\n", propertyMapHashTableStats->numLookupProbing.load());
915     dataLogF("%d adds\n", propertyMapHashTableStats->numAdds.load());
916     dataLogF("%d removes\n", propertyMapHashTableStats->numRemoves.load());
917     dataLogF("%d rehashes\n", propertyMapHashTableStats->numRehashes.load());
918     dataLogF("%d reinserts\n", propertyMapHashTableStats->numReinserts.load());
919 }
920
921 #endif
922
923 PropertyTable* Structure::copyPropertyTableForPinning(VM& vm)
924 {
925     if (PropertyTable* table = propertyTableOrNull())
926         return PropertyTable::clone(vm, *table);
927     bool setPropertyTable = false;
928     return materializePropertyTable(vm, setPropertyTable);
929 }
930
931 PropertyOffset Structure::getConcurrently(UniquedStringImpl* uid, unsigned& attributes)
932 {
933     PropertyOffset result = invalidOffset;
934     
935     forEachPropertyConcurrently(
936         [&] (const PropertyMapEntry& candidate) -> bool {
937             if (candidate.key != uid)
938                 return true;
939             
940             result = candidate.offset;
941             attributes = candidate.attributes;
942             return false;
943         });
944     
945     return result;
946 }
947
948 Vector<PropertyMapEntry> Structure::getPropertiesConcurrently()
949 {
950     Vector<PropertyMapEntry> result;
951
952     forEachPropertyConcurrently(
953         [&] (const PropertyMapEntry& entry) -> bool {
954             result.append(entry);
955             return true;
956         });
957     
958     return result;
959 }
960
961 PropertyOffset Structure::add(VM& vm, PropertyName propertyName, unsigned attributes)
962 {
963     return add<ShouldPin::No>(
964         vm, propertyName, attributes,
965         [this] (const GCSafeConcurrentJSLocker&, PropertyOffset, PropertyOffset newLastOffset) {
966             setLastOffset(newLastOffset);
967         });
968 }
969
970 PropertyOffset Structure::remove(PropertyName propertyName)
971 {
972     return remove(propertyName, [] (const ConcurrentJSLocker&, PropertyOffset) { });
973 }
974
975 void Structure::getPropertyNamesFromStructure(VM& vm, PropertyNameArray& propertyNames, EnumerationMode mode)
976 {
977     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
978     if (!table)
979         return;
980     
981     bool knownUnique = propertyNames.canAddKnownUniqueForStructure();
982     
983     PropertyTable::iterator end = table->end();
984     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
985         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !(iter->attributes & PropertyAttribute::DontEnum));
986         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !iter->key->isSymbol());
987         if (!(iter->attributes & PropertyAttribute::DontEnum) || mode.includeDontEnumProperties()) {
988             if (iter->key->isSymbol() && !propertyNames.includeSymbolProperties())
989                 continue;
990             if (knownUnique)
991                 propertyNames.addUnchecked(iter->key);
992             else
993                 propertyNames.add(iter->key);
994         }
995     }
996 }
997
998 void StructureFireDetail::dump(PrintStream& out) const
999 {
1000     out.print("Structure transition from ", *m_structure);
1001 }
1002
1003 DeferredStructureTransitionWatchpointFire::DeferredStructureTransitionWatchpointFire(VM& vm, Structure* structure)
1004     : DeferredWatchpointFire(vm)
1005     , m_structure(structure)
1006 {
1007 }
1008
1009 DeferredStructureTransitionWatchpointFire::~DeferredStructureTransitionWatchpointFire()
1010 {
1011     fireAll();
1012 }
1013
1014 void DeferredStructureTransitionWatchpointFire::dump(PrintStream& out) const
1015 {
1016     out.print("Structure transition from ", *m_structure);
1017 }
1018
1019 void Structure::didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* deferred) const
1020 {
1021     // If the structure is being watched, and this is the kind of structure that the DFG would
1022     // like to watch, then make sure to note for all future versions of this structure that it's
1023     // unwise to watch it.
1024     if (m_transitionWatchpointSet.isBeingWatched())
1025         const_cast<Structure*>(this)->setTransitionWatchpointIsLikelyToBeFired(true);
1026
1027     if (deferred) {
1028         ASSERT(deferred->structure() == this);
1029         m_transitionWatchpointSet.fireAll(*vm(), deferred);
1030     } else
1031         m_transitionWatchpointSet.fireAll(*vm(), StructureFireDetail(this));
1032 }
1033
1034 void Structure::visitChildren(JSCell* cell, SlotVisitor& visitor)
1035 {
1036     Structure* thisObject = jsCast<Structure*>(cell);
1037     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1038
1039     Base::visitChildren(thisObject, visitor);
1040     
1041     ConcurrentJSLocker locker(thisObject->m_lock);
1042     
1043     visitor.append(thisObject->m_globalObject);
1044     if (!thisObject->isObject())
1045         thisObject->m_cachedPrototypeChain.clear();
1046     else {
1047         visitor.append(thisObject->m_prototype);
1048         visitor.append(thisObject->m_cachedPrototypeChain);
1049     }
1050     visitor.append(thisObject->m_previousOrRareData);
1051
1052     if (thisObject->isPinnedPropertyTable() || thisObject->isAddingPropertyForTransition()) {
1053         // NOTE: This can interleave in pin(), in which case it may see a null property table.
1054         // That's fine, because then the barrier will fire and we will scan this again.
1055         visitor.append(thisObject->m_propertyTableUnsafe);
1056     } else if (visitor.isBuildingHeapSnapshot())
1057         visitor.append(thisObject->m_propertyTableUnsafe);
1058     else if (thisObject->m_propertyTableUnsafe)
1059         thisObject->m_propertyTableUnsafe.clear();
1060 }
1061
1062 bool Structure::isCheapDuringGC(VM& vm)
1063 {
1064     // FIXME: We could make this even safer by returning false if this structure's property table
1065     // has any large property names.
1066     // https://bugs.webkit.org/show_bug.cgi?id=157334
1067     
1068     return (!m_globalObject || vm.heap.isMarked(m_globalObject.get()))
1069         && (hasPolyProto() || !storedPrototypeObject() || vm.heap.isMarked(storedPrototypeObject()));
1070 }
1071
1072 bool Structure::markIfCheap(SlotVisitor& visitor)
1073 {
1074     VM& vm = visitor.vm();
1075     if (!isCheapDuringGC(vm))
1076         return vm.heap.isMarked(this);
1077     
1078     visitor.appendUnbarriered(this);
1079     return true;
1080 }
1081
1082 Ref<StructureShape> Structure::toStructureShape(JSValue value, bool& sawPolyProtoStructure)
1083 {
1084     Ref<StructureShape> baseShape = StructureShape::create();
1085     RefPtr<StructureShape> curShape = baseShape.ptr();
1086     Structure* curStructure = this;
1087     JSValue curValue = value;
1088     sawPolyProtoStructure = false;
1089     while (curStructure) {
1090         sawPolyProtoStructure |= curStructure->hasPolyProto();
1091         curStructure->forEachPropertyConcurrently(
1092             [&] (const PropertyMapEntry& entry) -> bool {
1093                 if (!PropertyName(entry.key).isPrivateName())
1094                     curShape->addProperty(*entry.key);
1095                 return true;
1096             });
1097
1098         if (JSObject* curObject = curValue.getObject())
1099             curShape->setConstructorName(JSObject::calculatedClassName(curObject));
1100         else
1101             curShape->setConstructorName(curStructure->classInfo()->className);
1102
1103         if (curStructure->isDictionary())
1104             curShape->enterDictionaryMode();
1105
1106         curShape->markAsFinal();
1107
1108         if (!curValue.isObject())
1109             break;
1110
1111         JSObject* object = asObject(curValue);
1112         JSObject* prototypeObject = object->structure()->storedPrototypeObject(object);
1113         if (!prototypeObject)
1114             break;
1115
1116         auto newShape = StructureShape::create();
1117         curShape->setProto(newShape.copyRef());
1118         curShape = WTFMove(newShape);
1119         curValue = prototypeObject;
1120         curStructure = prototypeObject->structure();
1121     }
1122     
1123     return baseShape;
1124 }
1125
1126 void Structure::dump(PrintStream& out) const
1127 {
1128     out.print(RawPointer(this), ":[", classInfo()->className, ", {");
1129     
1130     CommaPrinter comma;
1131     
1132     const_cast<Structure*>(this)->forEachPropertyConcurrently(
1133         [&] (const PropertyMapEntry& entry) -> bool {
1134             out.print(comma, entry.key, ":", static_cast<int>(entry.offset));
1135             return true;
1136         });
1137     
1138     out.print("}, ", IndexingTypeDump(indexingMode()));
1139     
1140     if (hasPolyProto())
1141         out.print(", PolyProto offset:", knownPolyProtoOffset);
1142     else if (m_prototype.get().isCell())
1143         out.print(", Proto:", RawPointer(m_prototype.get().asCell()));
1144
1145     switch (dictionaryKind()) {
1146     case NoneDictionaryKind:
1147         if (hasBeenDictionary())
1148             out.print(", Has been dictionary");
1149         break;
1150     case CachedDictionaryKind:
1151         out.print(", Dictionary");
1152         break;
1153     case UncachedDictionaryKind:
1154         out.print(", UncacheableDictionary");
1155         break;
1156     }
1157
1158     if (transitionWatchpointSetIsStillValid())
1159         out.print(", Leaf");
1160     else if (transitionWatchpointIsLikelyToBeFired())
1161         out.print(", Shady leaf");
1162     
1163     out.print("]");
1164 }
1165
1166 void Structure::dumpInContext(PrintStream& out, DumpContext* context) const
1167 {
1168     if (context)
1169         context->structures.dumpBrief(this, out);
1170     else
1171         dump(out);
1172 }
1173
1174 void Structure::dumpBrief(PrintStream& out, const CString& string) const
1175 {
1176     out.print("%", string, ":", classInfo()->className);
1177 }
1178
1179 void Structure::dumpContextHeader(PrintStream& out)
1180 {
1181     out.print("Structures:");
1182 }
1183
1184 bool ClassInfo::hasStaticSetterOrReadonlyProperties() const
1185 {
1186     for (const ClassInfo* ci = this; ci; ci = ci->parentClass) {
1187         if (const HashTable* table = ci->staticPropHashTable) {
1188             if (table->hasSetterOrReadonlyProperties)
1189                 return true;
1190         }
1191     }
1192     return false;
1193 }
1194
1195 void Structure::setCachedPropertyNameEnumerator(VM& vm, JSPropertyNameEnumerator* enumerator)
1196 {
1197     ASSERT(!isDictionary());
1198     if (!hasRareData())
1199         allocateRareData(vm);
1200     rareData()->setCachedPropertyNameEnumerator(vm, enumerator);
1201 }
1202
1203 JSPropertyNameEnumerator* Structure::cachedPropertyNameEnumerator() const
1204 {
1205     if (!hasRareData())
1206         return nullptr;
1207     return rareData()->cachedPropertyNameEnumerator();
1208 }
1209
1210 bool Structure::canCachePropertyNameEnumerator() const
1211 {
1212     if (!this->canCacheOwnKeys())
1213         return false;
1214
1215     StructureChain* structureChain = m_cachedPrototypeChain.get();
1216     ASSERT(structureChain);
1217     WriteBarrier<Structure>* structure = structureChain->head();
1218     while (true) {
1219         if (!structure->get())
1220             return true;
1221         if (!structure->get()->canCacheOwnKeys())
1222             return false;
1223         structure++;
1224     }
1225
1226     ASSERT_NOT_REACHED();
1227     return true;
1228 }
1229     
1230 bool Structure::canAccessPropertiesQuicklyForEnumeration() const
1231 {
1232     if (!isQuickPropertyAccessAllowedForEnumeration())
1233         return false;
1234     if (hasGetterSetterProperties())
1235         return false;
1236     if (isUncacheableDictionary())
1237         return false;
1238     return true;
1239 }
1240
1241 } // namespace JSC