Get rid of HeapRootVisitor and make SlotVisitor less painful to use
[WebKit-https.git] / Source / JavaScriptCore / runtime / Structure.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "Structure.h"
28
29 #include "CodeBlock.h"
30 #include "DumpContext.h"
31 #include "JSCInlines.h"
32 #include "JSObject.h"
33 #include "JSPropertyNameEnumerator.h"
34 #include "Lookup.h"
35 #include "PropertyMapHashTable.h"
36 #include "PropertyNameArray.h"
37 #include "StructureChain.h"
38 #include "StructureRareDataInlines.h"
39 #include "WeakGCMapInlines.h"
40 #include <wtf/CommaPrinter.h>
41 #include <wtf/NeverDestroyed.h>
42 #include <wtf/ProcessID.h>
43 #include <wtf/RefPtr.h>
44 #include <wtf/Threading.h>
45
46 #define DUMP_STRUCTURE_ID_STATISTICS 0
47
48 using namespace std;
49 using namespace WTF;
50
51 namespace JSC {
52
53 #if DUMP_STRUCTURE_ID_STATISTICS
54 static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>);
55 #endif
56
57 class SingleSlotTransitionWeakOwner final : public WeakHandleOwner {
58     void finalize(Handle<Unknown>, void* context) override
59     {
60         StructureTransitionTable* table = reinterpret_cast<StructureTransitionTable*>(context);
61         ASSERT(table->isUsingSingleSlot());
62         WeakSet::deallocate(table->weakImpl());
63         table->m_data = StructureTransitionTable::UsingSingleSlotFlag;
64     }
65 };
66
67 static SingleSlotTransitionWeakOwner& singleSlotTransitionWeakOwner()
68 {
69     static NeverDestroyed<SingleSlotTransitionWeakOwner> owner;
70     return owner;
71 }
72
73 inline Structure* StructureTransitionTable::singleTransition() const
74 {
75     ASSERT(isUsingSingleSlot());
76     if (WeakImpl* impl = this->weakImpl()) {
77         if (impl->state() == WeakImpl::Live)
78             return jsCast<Structure*>(impl->jsValue().asCell());
79     }
80     return nullptr;
81 }
82
83 inline void StructureTransitionTable::setSingleTransition(Structure* structure)
84 {
85     ASSERT(isUsingSingleSlot());
86     if (WeakImpl* impl = this->weakImpl())
87         WeakSet::deallocate(impl);
88     WeakImpl* impl = WeakSet::allocate(structure, &singleSlotTransitionWeakOwner(), this);
89     m_data = reinterpret_cast<intptr_t>(impl) | UsingSingleSlotFlag;
90 }
91
92 bool StructureTransitionTable::contains(UniquedStringImpl* rep, unsigned attributes) const
93 {
94     if (isUsingSingleSlot()) {
95         Structure* transition = singleTransition();
96         return transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes;
97     }
98     return map()->get(std::make_pair(rep, attributes));
99 }
100
101 Structure* StructureTransitionTable::get(UniquedStringImpl* rep, unsigned attributes) const
102 {
103     if (isUsingSingleSlot()) {
104         Structure* transition = singleTransition();
105         return (transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes) ? transition : 0;
106     }
107     return map()->get(std::make_pair(rep, attributes));
108 }
109
110 void StructureTransitionTable::add(VM& vm, Structure* structure)
111 {
112     if (isUsingSingleSlot()) {
113         Structure* existingTransition = singleTransition();
114
115         // This handles the first transition being added.
116         if (!existingTransition) {
117             setSingleTransition(structure);
118             return;
119         }
120
121         // This handles the second transition being added
122         // (or the first transition being despecified!)
123         setMap(new TransitionMap(vm));
124         add(vm, existingTransition);
125     }
126
127     // Add the structure to the map.
128
129     // Newer versions of the STL have an std::make_pair function that takes rvalue references.
130     // When either of the parameters are bitfields, the C++ compiler will try to bind them as lvalues, which is invalid. To work around this, use unary "+" to make the parameter an rvalue.
131     // See https://bugs.webkit.org/show_bug.cgi?id=59261 for more details
132     map()->set(std::make_pair(structure->m_nameInPrevious.get(), +structure->attributesInPrevious()), structure);
133 }
134
135 void Structure::dumpStatistics()
136 {
137 #if DUMP_STRUCTURE_ID_STATISTICS
138     unsigned numberLeaf = 0;
139     unsigned numberUsingSingleSlot = 0;
140     unsigned numberSingletons = 0;
141     unsigned numberWithPropertyMaps = 0;
142     unsigned totalPropertyMapsSize = 0;
143
144     HashSet<Structure*>::const_iterator end = liveStructureSet.end();
145     for (HashSet<Structure*>::const_iterator it = liveStructureSet.begin(); it != end; ++it) {
146         Structure* structure = *it;
147
148         switch (structure->m_transitionTable.size()) {
149             case 0:
150                 ++numberLeaf;
151                 if (!structure->previousID())
152                     ++numberSingletons;
153                 break;
154
155             case 1:
156                 ++numberUsingSingleSlot;
157                 break;
158         }
159
160         if (PropertyTable* table = structure->propertyTableOrNull()) {
161             ++numberWithPropertyMaps;
162             totalPropertyMapsSize += table->sizeInMemory();
163         }
164     }
165
166     dataLogF("Number of live Structures: %d\n", liveStructureSet.size());
167     dataLogF("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
168     dataLogF("Number of Structures that are leaf nodes: %d\n", numberLeaf);
169     dataLogF("Number of Structures that singletons: %d\n", numberSingletons);
170     dataLogF("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
171
172     dataLogF("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
173     dataLogF("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
174     dataLogF("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
175 #else
176     dataLogF("Dumping Structure statistics is not enabled.\n");
177 #endif
178 }
179
180 Structure::Structure(VM& vm, JSGlobalObject* globalObject, JSValue prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
181     : JSCell(vm, vm.structureStructure.get())
182     , m_blob(vm.heap.structureIDTable().allocateID(this), indexingType, typeInfo)
183     , m_outOfLineTypeFlags(typeInfo.outOfLineTypeFlags())
184     , m_globalObject(vm, this, globalObject, WriteBarrier<JSGlobalObject>::MayBeNull)
185     , m_prototype(vm, this, prototype)
186     , m_classInfo(classInfo)
187     , m_transitionWatchpointSet(IsWatched)
188     , m_offset(invalidOffset)
189     , m_inlineCapacity(inlineCapacity)
190     , m_bitField(0)
191 {
192     setDictionaryKind(NoneDictionaryKind);
193     setIsPinnedPropertyTable(false);
194     setHasGetterSetterProperties(classInfo->hasStaticSetterOrReadonlyProperties());
195     setHasCustomGetterSetterProperties(false);
196     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(classInfo->hasStaticSetterOrReadonlyProperties());
197     setIsQuickPropertyAccessAllowedForEnumeration(true);
198     setAttributesInPrevious(0);
199     setDidPreventExtensions(false);
200     setDidTransition(false);
201     setStaticPropertiesReified(false);
202     setTransitionWatchpointIsLikelyToBeFired(false);
203     setHasBeenDictionary(false);
204     setIsAddingPropertyForTransition(false);
205  
206     ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
207     ASSERT(static_cast<PropertyOffset>(inlineCapacity) < firstOutOfLineOffset);
208     ASSERT(!hasRareData());
209     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
210     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
211 }
212
213 const ClassInfo Structure::s_info = { "Structure", 0, 0, CREATE_METHOD_TABLE(Structure) };
214
215 Structure::Structure(VM& vm)
216     : JSCell(CreatingEarlyCell)
217     , m_prototype(vm, this, jsNull())
218     , m_classInfo(info())
219     , m_transitionWatchpointSet(IsWatched)
220     , m_offset(invalidOffset)
221     , m_inlineCapacity(0)
222     , m_bitField(0)
223 {
224     setDictionaryKind(NoneDictionaryKind);
225     setIsPinnedPropertyTable(false);
226     setHasGetterSetterProperties(m_classInfo->hasStaticSetterOrReadonlyProperties());
227     setHasCustomGetterSetterProperties(false);
228     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(m_classInfo->hasStaticSetterOrReadonlyProperties());
229     setIsQuickPropertyAccessAllowedForEnumeration(true);
230     setAttributesInPrevious(0);
231     setDidPreventExtensions(false);
232     setDidTransition(false);
233     setStaticPropertiesReified(false);
234     setTransitionWatchpointIsLikelyToBeFired(false);
235     setHasBeenDictionary(false);
236     setIsAddingPropertyForTransition(false);
237  
238     TypeInfo typeInfo = TypeInfo(CellType, StructureFlags);
239     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), 0, typeInfo);
240     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
241
242     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
243     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
244 }
245
246 Structure::Structure(VM& vm, Structure* previous, DeferredStructureTransitionWatchpointFire* deferred)
247     : JSCell(vm, vm.structureStructure.get())
248     , m_prototype(vm, this, previous->storedPrototype())
249     , m_classInfo(previous->m_classInfo)
250     , m_transitionWatchpointSet(IsWatched)
251     , m_offset(invalidOffset)
252     , m_inlineCapacity(previous->m_inlineCapacity)
253     , m_bitField(0)
254 {
255     setDictionaryKind(previous->dictionaryKind());
256     setIsPinnedPropertyTable(previous->hasBeenFlattenedBefore());
257     setHasGetterSetterProperties(previous->hasGetterSetterProperties());
258     setHasCustomGetterSetterProperties(previous->hasCustomGetterSetterProperties());
259     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->hasReadOnlyOrGetterSetterPropertiesExcludingProto());
260     setIsQuickPropertyAccessAllowedForEnumeration(previous->isQuickPropertyAccessAllowedForEnumeration());
261     setAttributesInPrevious(0);
262     setDidPreventExtensions(previous->didPreventExtensions());
263     setDidTransition(true);
264     setStaticPropertiesReified(previous->staticPropertiesReified());
265     setHasBeenDictionary(previous->hasBeenDictionary());
266     setIsAddingPropertyForTransition(false);
267  
268     TypeInfo typeInfo = previous->typeInfo();
269     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), previous->indexingTypeIncludingHistory(), typeInfo);
270     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
271
272     ASSERT(!previous->typeInfo().structureIsImmortal());
273     setPreviousID(vm, previous);
274
275     previous->didTransitionFromThisStructure(deferred);
276     
277     // Copy this bit now, in case previous was being watched.
278     setTransitionWatchpointIsLikelyToBeFired(previous->transitionWatchpointIsLikelyToBeFired());
279
280     if (previous->m_globalObject)
281         m_globalObject.set(vm, this, previous->m_globalObject.get());
282     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
283     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
284 }
285
286 Structure::~Structure()
287 {
288     if (typeInfo().structureIsImmortal())
289         return;
290     Heap::heap(this)->structureIDTable().deallocateID(this, m_blob.structureID());
291 }
292
293 void Structure::destroy(JSCell* cell)
294 {
295     static_cast<Structure*>(cell)->Structure::~Structure();
296 }
297
298 void Structure::findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*& structure, PropertyTable*& table)
299 {
300     ASSERT(structures.isEmpty());
301     table = 0;
302
303     for (structure = this; structure; structure = structure->previousID()) {
304         structure->m_lock.lock();
305         
306         table = structure->propertyTableOrNull();
307         if (table) {
308             // Leave the structure locked, so that the caller can do things to it atomically
309             // before it loses its property table.
310             return;
311         }
312         
313         structures.append(structure);
314         structure->m_lock.unlock();
315     }
316     
317     ASSERT(!structure);
318     ASSERT(!table);
319 }
320
321 PropertyTable* Structure::materializePropertyTable(VM& vm, bool setPropertyTable)
322 {
323     ASSERT(structure()->classInfo() == info());
324     ASSERT(!isAddingPropertyForTransition());
325     
326     DeferGC deferGC(vm.heap);
327     
328     Vector<Structure*, 8> structures;
329     Structure* structure;
330     PropertyTable* table;
331     
332     findStructuresAndMapForMaterialization(structures, structure, table);
333     
334     unsigned capacity = numberOfSlotsForLastOffset(m_offset, m_inlineCapacity);
335     if (table) {
336         table = table->copy(vm, capacity);
337         structure->m_lock.unlock();
338     } else
339         table = PropertyTable::create(vm, capacity);
340     
341     // Must hold the lock on this structure, since we will be modifying this structure's
342     // property map. We don't want getConcurrently() to see the property map in a half-baked
343     // state.
344     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
345     if (setPropertyTable)
346         this->setPropertyTable(vm, table);
347
348     InferredTypeTable* typeTable = m_inferredTypeTable.get();
349
350     for (size_t i = structures.size(); i--;) {
351         structure = structures[i];
352         if (!structure->m_nameInPrevious)
353             continue;
354         PropertyMapEntry entry(structure->m_nameInPrevious.get(), structure->m_offset, structure->attributesInPrevious());
355         if (typeTable && typeTable->get(structure->m_nameInPrevious.get()))
356             entry.hasInferredType = true;
357         table->add(entry, m_offset, PropertyTable::PropertyOffsetMustNotChange);
358     }
359     
360     checkOffsetConsistency();
361     
362     return table;
363 }
364
365 Structure* Structure::addPropertyTransitionToExistingStructureImpl(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
366 {
367     ASSERT(!structure->isDictionary());
368     ASSERT(structure->isObject());
369
370     if (Structure* existingTransition = structure->m_transitionTable.get(uid, attributes)) {
371         validateOffset(existingTransition->m_offset, existingTransition->inlineCapacity());
372         offset = existingTransition->m_offset;
373         return existingTransition;
374     }
375
376     return 0;
377 }
378
379 Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
380 {
381     ASSERT(!isCompilationThread());
382     return addPropertyTransitionToExistingStructureImpl(structure, propertyName.uid(), attributes, offset);
383 }
384
385 Structure* Structure::addPropertyTransitionToExistingStructureConcurrently(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
386 {
387     ConcurrentJSLocker locker(structure->m_lock);
388     return addPropertyTransitionToExistingStructureImpl(structure, uid, attributes, offset);
389 }
390
391 bool Structure::anyObjectInChainMayInterceptIndexedAccesses() const
392 {
393     for (const Structure* current = this; ;) {
394         if (current->mayInterceptIndexedAccesses())
395             return true;
396         
397         JSValue prototype = current->storedPrototype();
398         if (prototype.isNull())
399             return false;
400         
401         current = asObject(prototype)->structure();
402     }
403 }
404
405 bool Structure::holesMustForwardToPrototype(VM& vm) const
406 {
407     if (this->mayInterceptIndexedAccesses())
408         return true;
409
410     JSValue prototype = this->storedPrototype();
411     if (!prototype.isObject())
412         return false;
413     JSObject* object = asObject(prototype);
414
415     while (true) {
416         Structure& structure = *object->structure(vm);
417         if (hasIndexedProperties(object->indexingType()) || structure.mayInterceptIndexedAccesses())
418             return true;
419         prototype = structure.storedPrototype();
420         if (!prototype.isObject())
421             return false;
422         object = asObject(prototype);
423     }
424
425     RELEASE_ASSERT_NOT_REACHED();
426     return false;
427 }
428
429 bool Structure::needsSlowPutIndexing() const
430 {
431     return anyObjectInChainMayInterceptIndexedAccesses()
432         || globalObject()->isHavingABadTime();
433 }
434
435 NonPropertyTransition Structure::suggestedArrayStorageTransition() const
436 {
437     if (needsSlowPutIndexing())
438         return NonPropertyTransition::AllocateSlowPutArrayStorage;
439     
440     return NonPropertyTransition::AllocateArrayStorage;
441 }
442
443 Structure* Structure::addPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
444 {
445     Structure* newStructure = addPropertyTransitionToExistingStructure(
446         structure, propertyName, attributes, offset);
447     if (newStructure)
448         return newStructure;
449
450     return addNewPropertyTransition(
451         vm, structure, propertyName, attributes, offset, PutPropertySlot::UnknownContext);
452 }
453
454 Structure* Structure::addNewPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset, PutPropertySlot::Context context, DeferredStructureTransitionWatchpointFire* deferred)
455 {
456     ASSERT(!structure->isDictionary());
457     ASSERT(structure->isObject());
458     ASSERT(!Structure::addPropertyTransitionToExistingStructure(structure, propertyName, attributes, offset));
459     
460     int maxTransitionLength;
461     if (context == PutPropertySlot::PutById)
462         maxTransitionLength = s_maxTransitionLengthForNonEvalPutById;
463     else
464         maxTransitionLength = s_maxTransitionLength;
465     if (structure->transitionCount() > maxTransitionLength) {
466         Structure* transition = toCacheableDictionaryTransition(vm, structure, deferred);
467         ASSERT(structure != transition);
468         offset = transition->add(vm, propertyName, attributes);
469         return transition;
470     }
471     
472     Structure* transition = create(vm, structure, deferred);
473
474     transition->m_cachedPrototypeChain.setMayBeNull(vm, transition, structure->m_cachedPrototypeChain.get());
475     
476     // While we are adding the property, rematerializing the property table is super weird: we already
477     // have a m_nameInPrevious and attributesInPrevious but the m_offset is still wrong. If the
478     // materialization algorithm runs, it'll build a property table that already has the property but
479     // at a bogus offset. Rather than try to teach the materialization code how to create a table under
480     // those conditions, we just tell the GC not to blow the table away during this period of time.
481     // Holding the lock ensures that we either do this before the GC starts scanning the structure, in
482     // which case the GC will not blow the table away, or we do it after the GC already ran in which
483     // case all is well.  If it wasn't for the lock, the GC would have TOCTOU: if could read
484     // isAddingPropertyForTransition before we set it to true, and then blow the table away after.
485     {
486         ConcurrentJSLocker locker(transition->m_lock);
487         transition->setIsAddingPropertyForTransition(true);
488     }
489     
490     transition->m_nameInPrevious = propertyName.uid();
491     transition->setAttributesInPrevious(attributes);
492     transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
493     transition->m_offset = structure->m_offset;
494     transition->m_inferredTypeTable.setMayBeNull(vm, transition, structure->m_inferredTypeTable.get());
495
496     offset = transition->add(vm, propertyName, attributes);
497
498     // Now that everything is fine with the new structure's bookkeeping, the GC is free to blow the
499     // table away if it wants. We can now rebuild it fine.
500     WTF::storeStoreFence();
501     transition->setIsAddingPropertyForTransition(false);
502
503     checkOffset(transition->m_offset, transition->inlineCapacity());
504     {
505         ConcurrentJSLocker locker(structure->m_lock);
506         structure->m_transitionTable.add(vm, transition);
507     }
508     transition->checkOffsetConsistency();
509     structure->checkOffsetConsistency();
510     return transition;
511 }
512
513 Structure* Structure::removePropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, PropertyOffset& offset)
514 {
515     // NOTE: There are some good reasons why this goes directly to uncacheable dictionary rather than
516     // caching the removal. We can fix all of these things, but we must remember to do so, if we ever try
517     // to optimize this case.
518     //
519     // - Cached transitions usually steal the property table, and assume that this is possible because they
520     //   can just rebuild the table by looking at past transitions. That code assumes that the table only
521     //   grew and never shrank. To support removals, we'd have to change the property table materialization
522     //   code to handle deletions. Also, we have logic to get the list of properties on a structure that
523     //   lacks a property table by just looking back through the set of transitions since the last
524     //   structure that had a pinned table. That logic would also have to be changed to handle cached
525     //   removals.
526     //
527     // - InferredTypeTable assumes that removal has never happened. This is important since if we could
528     //   remove a property and then re-add it later, then the "absence means top" optimization wouldn't
529     //   work anymore, unless removal also either poisoned type inference (by doing something equivalent to
530     //   hasBeenDictionary) or by strongly marking the entry as Top by ensuring that it is not absent, but
531     //   instead, has a null entry.
532     
533     ASSERT(!structure->isUncacheableDictionary());
534
535     Structure* transition = toUncacheableDictionaryTransition(vm, structure);
536
537     offset = transition->remove(propertyName);
538
539     transition->checkOffsetConsistency();
540     return transition;
541 }
542
543 Structure* Structure::changePrototypeTransition(VM& vm, Structure* structure, JSValue prototype)
544 {
545     DeferGC deferGC(vm.heap);
546     Structure* transition = create(vm, structure);
547
548     transition->m_prototype.set(vm, transition, prototype);
549     
550     transition->pin(vm, structure->copyPropertyTableForPinning(vm));
551     transition->m_offset = structure->m_offset;
552     
553     transition->checkOffsetConsistency();
554     return transition;
555 }
556
557 Structure* Structure::attributeChangeTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes)
558 {
559     if (!structure->isUncacheableDictionary()) {
560         Structure* transition = create(vm, structure);
561         
562         transition->pin(vm, structure->copyPropertyTableForPinning(vm));
563         transition->m_offset = structure->m_offset;
564         
565         structure = transition;
566     }
567
568     PropertyMapEntry* entry = structure->ensurePropertyTable(vm)->get(propertyName.uid());
569     ASSERT(entry);
570     entry->attributes = attributes;
571
572     structure->checkOffsetConsistency();
573     return structure;
574 }
575
576 Structure* Structure::toDictionaryTransition(VM& vm, Structure* structure, DictionaryKind kind, DeferredStructureTransitionWatchpointFire* deferred)
577 {
578     ASSERT(!structure->isUncacheableDictionary());
579     DeferGC deferGC(vm.heap);
580     
581     Structure* transition = create(vm, structure, deferred);
582
583     transition->pin(vm, structure->copyPropertyTableForPinning(vm));
584     transition->m_offset = structure->m_offset;
585     transition->setDictionaryKind(kind);
586     transition->setHasBeenDictionary(true);
587     
588     transition->checkOffsetConsistency();
589     return transition;
590 }
591
592 Structure* Structure::toCacheableDictionaryTransition(VM& vm, Structure* structure, DeferredStructureTransitionWatchpointFire* deferred)
593 {
594     return toDictionaryTransition(vm, structure, CachedDictionaryKind, deferred);
595 }
596
597 Structure* Structure::toUncacheableDictionaryTransition(VM& vm, Structure* structure)
598 {
599     return toDictionaryTransition(vm, structure, UncachedDictionaryKind);
600 }
601
602 Structure* Structure::sealTransition(VM& vm, Structure* structure)
603 {
604     return nonPropertyTransition(vm, structure, NonPropertyTransition::Seal);
605 }
606
607 Structure* Structure::freezeTransition(VM& vm, Structure* structure)
608 {
609     return nonPropertyTransition(vm, structure, NonPropertyTransition::Freeze);
610 }
611
612 Structure* Structure::preventExtensionsTransition(VM& vm, Structure* structure)
613 {
614     return nonPropertyTransition(vm, structure, NonPropertyTransition::PreventExtensions);
615 }
616
617 PropertyTable* Structure::takePropertyTableOrCloneIfPinned(VM& vm)
618 {
619     // This must always return a property table. It can't return null.
620     PropertyTable* result = propertyTableOrNull();
621     if (result) {
622         if (isPinnedPropertyTable())
623             return result->copy(vm, result->size() + 1);
624         ConcurrentJSLocker locker(m_lock);
625         setPropertyTable(vm, nullptr);
626         return result;
627     }
628     bool setPropertyTable = false;
629     return materializePropertyTable(vm, setPropertyTable);
630 }
631
632 Structure* Structure::nonPropertyTransition(VM& vm, Structure* structure, NonPropertyTransition transitionKind)
633 {
634     unsigned attributes = toAttributes(transitionKind);
635     IndexingType indexingTypeIncludingHistory = newIndexingType(structure->indexingTypeIncludingHistory(), transitionKind);
636     
637     if (changesIndexingType(transitionKind)) {
638         if (JSGlobalObject* globalObject = structure->m_globalObject.get()) {
639             if (globalObject->isOriginalArrayStructure(structure)) {
640                 Structure* result = globalObject->originalArrayStructureForIndexingType(indexingTypeIncludingHistory);
641                 if (result->indexingTypeIncludingHistory() == indexingTypeIncludingHistory) {
642                     structure->didTransitionFromThisStructure();
643                     return result;
644                 }
645             }
646         }
647     }
648     
649     Structure* existingTransition;
650     if (!structure->isDictionary() && (existingTransition = structure->m_transitionTable.get(0, attributes))) {
651         ASSERT(existingTransition->attributesInPrevious() == attributes);
652         ASSERT(existingTransition->indexingTypeIncludingHistory() == indexingTypeIncludingHistory);
653         return existingTransition;
654     }
655     
656     DeferGC deferGC(vm.heap);
657     
658     Structure* transition = create(vm, structure);
659     transition->setAttributesInPrevious(attributes);
660     transition->m_blob.setIndexingTypeIncludingHistory(indexingTypeIncludingHistory);
661     
662     if (preventsExtensions(transitionKind))
663         transition->setDidPreventExtensions(true);
664     
665     if (setsDontDeleteOnAllProperties(transitionKind)
666         || setsReadOnlyOnNonAccessorProperties(transitionKind)) {
667         // We pin the property table on transitions that do wholesale editing of the property
668         // table, since our logic for walking the property transition chain to rematerialize the
669         // table doesn't know how to take into account such wholesale edits.
670         
671         transition->pinForCaching(vm, structure->copyPropertyTableForPinning(vm));
672         transition->m_offset = structure->m_offset;
673         
674         PropertyTable* table = transition->propertyTableOrNull();
675         RELEASE_ASSERT(table);
676         for (auto& entry : *table) {
677             if (setsDontDeleteOnAllProperties(transitionKind))
678                 entry.attributes |= DontDelete;
679             if (setsReadOnlyOnNonAccessorProperties(transitionKind) && !(entry.attributes & Accessor))
680                 entry.attributes |= ReadOnly;
681         }
682     } else {
683         transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
684         transition->m_offset = structure->m_offset;
685         checkOffset(transition->m_offset, transition->inlineCapacity());
686     }
687     
688     if (setsReadOnlyOnNonAccessorProperties(transitionKind)
689         && !transition->propertyTableOrNull()->isEmpty())
690         transition->setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true);
691     
692     if (structure->isDictionary())
693         transition->pin(vm, transition->ensurePropertyTable(vm));
694     else {
695         ConcurrentJSLocker locker(structure->m_lock);
696         structure->m_transitionTable.add(vm, transition);
697     }
698
699     transition->checkOffsetConsistency();
700     return transition;
701 }
702
703 // In future we may want to cache this property.
704 bool Structure::isSealed(VM& vm)
705 {
706     if (isStructureExtensible())
707         return false;
708
709     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
710     if (!table)
711         return true;
712     
713     PropertyTable::iterator end = table->end();
714     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
715         if ((iter->attributes & DontDelete) != DontDelete)
716             return false;
717     }
718     return true;
719 }
720
721 // In future we may want to cache this property.
722 bool Structure::isFrozen(VM& vm)
723 {
724     if (isStructureExtensible())
725         return false;
726
727     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
728     if (!table)
729         return true;
730     
731     PropertyTable::iterator end = table->end();
732     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
733         if (!(iter->attributes & DontDelete))
734             return false;
735         if (!(iter->attributes & (ReadOnly | Accessor)))
736             return false;
737     }
738     return true;
739 }
740
741 Structure* Structure::flattenDictionaryStructure(VM& vm, JSObject* object)
742 {
743     checkOffsetConsistency();
744     ASSERT(isDictionary());
745     
746     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
747     
748     object->setStructureIDDirectly(nuke(id()));
749     WTF::storeStoreFence();
750
751     size_t beforeOutOfLineCapacity = this->outOfLineCapacity();
752     if (isUncacheableDictionary()) {
753         PropertyTable* table = propertyTableOrNull();
754         ASSERT(table);
755
756         size_t propertyCount = table->size();
757
758         // Holds our values compacted by insertion order.
759         Vector<JSValue> values(propertyCount);
760
761         // Copies out our values from their hashed locations, compacting property table offsets as we go.
762         unsigned i = 0;
763         PropertyTable::iterator end = table->end();
764         m_offset = invalidOffset;
765         for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter, ++i) {
766             values[i] = object->getDirect(iter->offset);
767             m_offset = iter->offset = offsetForPropertyNumber(i, m_inlineCapacity);
768         }
769         
770         // Copies in our values to their compacted locations.
771         for (unsigned i = 0; i < propertyCount; i++)
772             object->putDirect(vm, offsetForPropertyNumber(i, m_inlineCapacity), values[i]);
773
774         table->clearDeletedOffsets();
775         checkOffsetConsistency();
776     }
777
778     setDictionaryKind(NoneDictionaryKind);
779     setHasBeenFlattenedBefore(true);
780
781     size_t afterOutOfLineCapacity = this->outOfLineCapacity();
782
783     if (object->butterfly() && beforeOutOfLineCapacity != afterOutOfLineCapacity) {
784         ASSERT(beforeOutOfLineCapacity > afterOutOfLineCapacity);
785         // If the object had a Butterfly but after flattening/compacting we no longer have need of it,
786         // we need to zero it out because the collector depends on the Structure to know the size for copying.
787         if (!afterOutOfLineCapacity && !this->hasIndexingHeader(object))
788             object->setButterfly(vm, nullptr);
789         // If the object was down-sized to the point where the base of the Butterfly is no longer within the 
790         // first CopiedBlock::blockSize bytes, we'll get the wrong answer if we try to mask the base back to 
791         // the CopiedBlock header. To prevent this case we need to memmove the Butterfly down.
792         else
793             object->shiftButterflyAfterFlattening(locker, vm, this, afterOutOfLineCapacity);
794     }
795     
796     vm.heap.writeBarrier(object);
797     WTF::storeStoreFence();
798     object->setStructureIDDirectly(id());
799     
800     return this;
801 }
802
803 void Structure::pin(VM& vm, PropertyTable* table)
804 {
805     setIsPinnedPropertyTable(true);
806     setPropertyTable(vm, table);
807     clearPreviousID();
808     m_nameInPrevious = nullptr;
809 }
810
811 void Structure::pinForCaching(VM& vm, PropertyTable* table)
812 {
813     setIsPinnedPropertyTable(true);
814     setPropertyTable(vm, table);
815     m_nameInPrevious = nullptr;
816 }
817
818 void Structure::allocateRareData(VM& vm)
819 {
820     ASSERT(!hasRareData());
821     StructureRareData* rareData = StructureRareData::create(vm, previousID());
822     WTF::storeStoreFence();
823     m_previousOrRareData.set(vm, this, rareData);
824     ASSERT(hasRareData());
825 }
826
827 WatchpointSet* Structure::ensurePropertyReplacementWatchpointSet(VM& vm, PropertyOffset offset)
828 {
829     ASSERT(!isUncacheableDictionary());
830
831     // In some places it's convenient to call this with an invalid offset. So, we do the check here.
832     if (!isValidOffset(offset))
833         return nullptr;
834     
835     if (!hasRareData())
836         allocateRareData(vm);
837     ConcurrentJSLocker locker(m_lock);
838     StructureRareData* rareData = this->rareData();
839     if (!rareData->m_replacementWatchpointSets) {
840         rareData->m_replacementWatchpointSets =
841             std::make_unique<StructureRareData::PropertyWatchpointMap>();
842         WTF::storeStoreFence();
843     }
844     auto result = rareData->m_replacementWatchpointSets->add(offset, nullptr);
845     if (result.isNewEntry)
846         result.iterator->value = adoptRef(new WatchpointSet(IsWatched));
847     return result.iterator->value.get();
848 }
849
850 void Structure::startWatchingPropertyForReplacements(VM& vm, PropertyName propertyName)
851 {
852     ASSERT(!isUncacheableDictionary());
853     
854     startWatchingPropertyForReplacements(vm, get(vm, propertyName));
855 }
856
857 void Structure::didCachePropertyReplacement(VM& vm, PropertyOffset offset)
858 {
859     ensurePropertyReplacementWatchpointSet(vm, offset)->fireAll(vm, "Did cache property replacement");
860 }
861
862 void Structure::startWatchingInternalProperties(VM& vm)
863 {
864     if (!isUncacheableDictionary()) {
865         startWatchingPropertyForReplacements(vm, vm.propertyNames->toString);
866         startWatchingPropertyForReplacements(vm, vm.propertyNames->valueOf);
867     }
868     setDidWatchInternalProperties(true);
869 }
870
871 void Structure::willStoreValueSlow(
872     VM& vm, PropertyName propertyName, JSValue value, bool shouldOptimize,
873     InferredTypeTable::StoredPropertyAge age)
874 {
875     ASSERT(!isCompilationThread());
876     ASSERT(structure()->classInfo() == info());
877     ASSERT(!hasBeenDictionary());
878
879     // Create the inferred type table before doing anything else, so that we don't GC after we have already
880     // grabbed a pointer into the property map.
881     InferredTypeTable* table = m_inferredTypeTable.get();
882     if (!table) {
883         table = InferredTypeTable::create(vm);
884         WTF::storeStoreFence();
885         m_inferredTypeTable.set(vm, this, table);
886     }
887
888     // This only works if we've got a property table.
889     PropertyTable* propertyTable = ensurePropertyTable(vm);
890     
891     // We must be calling this after having created the given property or confirmed that it was present
892     // already, so the property must be present.
893     PropertyMapEntry* entry = propertyTable->get(propertyName.uid());
894     ASSERT(entry);
895     
896     if (shouldOptimize)
897         entry->hasInferredType = table->willStoreValue(vm, propertyName, value, age);
898     else {
899         table->makeTop(vm, propertyName, age);
900         entry->hasInferredType = false;
901     }
902 }
903
904 #if DUMP_PROPERTYMAP_STATS
905
906 PropertyMapHashTableStats* propertyMapHashTableStats = 0;
907
908 struct PropertyMapStatisticsExitLogger {
909     PropertyMapStatisticsExitLogger();
910     ~PropertyMapStatisticsExitLogger();
911 };
912
913 DEFINE_GLOBAL_FOR_LOGGING(PropertyMapStatisticsExitLogger, logger, );
914
915 PropertyMapStatisticsExitLogger::PropertyMapStatisticsExitLogger()
916 {
917     propertyMapHashTableStats = adoptPtr(new PropertyMapHashTableStats()).leakPtr();
918 }
919
920 PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
921 {
922     unsigned finds = propertyMapHashTableStats->numFinds;
923     unsigned collisions = propertyMapHashTableStats->numCollisions;
924     dataLogF("\nJSC::PropertyMap statistics for process %d\n\n", getCurrentProcessID());
925     dataLogF("%d finds\n", finds);
926     dataLogF("%d collisions (%.1f%%)\n", collisions, 100.0 * collisions / finds);
927     dataLogF("%d lookups\n", propertyMapHashTableStats->numLookups.load());
928     dataLogF("%d lookup probings\n", propertyMapHashTableStats->numLookupProbing.load());
929     dataLogF("%d adds\n", propertyMapHashTableStats->numAdds.load());
930     dataLogF("%d removes\n", propertyMapHashTableStats->numRemoves.load());
931     dataLogF("%d rehashes\n", propertyMapHashTableStats->numRehashes.load());
932     dataLogF("%d reinserts\n", propertyMapHashTableStats->numReinserts.load());
933 }
934
935 #endif
936
937 PropertyTable* Structure::copyPropertyTableForPinning(VM& vm)
938 {
939     if (PropertyTable* table = propertyTableOrNull())
940         return PropertyTable::clone(vm, *table);
941     bool setPropertyTable = false;
942     return materializePropertyTable(vm, setPropertyTable);
943 }
944
945 PropertyOffset Structure::getConcurrently(UniquedStringImpl* uid, unsigned& attributes)
946 {
947     PropertyOffset result = invalidOffset;
948     
949     forEachPropertyConcurrently(
950         [&] (const PropertyMapEntry& candidate) -> bool {
951             if (candidate.key != uid)
952                 return true;
953             
954             result = candidate.offset;
955             attributes = candidate.attributes;
956             return false;
957         });
958     
959     return result;
960 }
961
962 Vector<PropertyMapEntry> Structure::getPropertiesConcurrently()
963 {
964     Vector<PropertyMapEntry> result;
965
966     forEachPropertyConcurrently(
967         [&] (const PropertyMapEntry& entry) -> bool {
968             result.append(entry);
969             return true;
970         });
971     
972     return result;
973 }
974
975 PropertyOffset Structure::add(VM& vm, PropertyName propertyName, unsigned attributes)
976 {
977     return add(
978         vm, propertyName, attributes,
979         [this] (const GCSafeConcurrentJSLocker&, PropertyOffset, PropertyOffset newLastOffset) {
980             setLastOffset(newLastOffset);
981         });
982 }
983
984 PropertyOffset Structure::remove(PropertyName propertyName)
985 {
986     return remove(propertyName, [] (const ConcurrentJSLocker&, PropertyOffset) { });
987 }
988
989 void Structure::getPropertyNamesFromStructure(VM& vm, PropertyNameArray& propertyNames, EnumerationMode mode)
990 {
991     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
992     if (!table)
993         return;
994     
995     bool knownUnique = propertyNames.canAddKnownUniqueForStructure();
996     
997     PropertyTable::iterator end = table->end();
998     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
999         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !(iter->attributes & DontEnum));
1000         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !iter->key->isSymbol());
1001         if (!(iter->attributes & DontEnum) || mode.includeDontEnumProperties()) {
1002             if (iter->key->isSymbol() && !propertyNames.includeSymbolProperties())
1003                 continue;
1004             if (knownUnique)
1005                 propertyNames.addUnchecked(iter->key);
1006             else
1007                 propertyNames.add(iter->key);
1008         }
1009     }
1010 }
1011
1012 void StructureFireDetail::dump(PrintStream& out) const
1013 {
1014     out.print("Structure transition from ", *m_structure);
1015 }
1016
1017 DeferredStructureTransitionWatchpointFire::DeferredStructureTransitionWatchpointFire()
1018     : m_structure(nullptr)
1019 {
1020 }
1021
1022 DeferredStructureTransitionWatchpointFire::~DeferredStructureTransitionWatchpointFire()
1023 {
1024     if (m_structure)
1025         m_structure->transitionWatchpointSet().fireAll(*m_structure->vm(), StructureFireDetail(m_structure));
1026 }
1027
1028 void DeferredStructureTransitionWatchpointFire::add(const Structure* structure)
1029 {
1030     RELEASE_ASSERT(!m_structure);
1031     RELEASE_ASSERT(structure);
1032     m_structure = structure;
1033 }
1034
1035 void Structure::didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* deferred) const
1036 {
1037     // If the structure is being watched, and this is the kind of structure that the DFG would
1038     // like to watch, then make sure to note for all future versions of this structure that it's
1039     // unwise to watch it.
1040     if (m_transitionWatchpointSet.isBeingWatched())
1041         const_cast<Structure*>(this)->setTransitionWatchpointIsLikelyToBeFired(true);
1042     
1043     if (deferred)
1044         deferred->add(this);
1045     else
1046         m_transitionWatchpointSet.fireAll(*vm(), StructureFireDetail(this));
1047 }
1048
1049 JSValue Structure::prototypeForLookup(CodeBlock* codeBlock) const
1050 {
1051     return prototypeForLookup(codeBlock->globalObject());
1052 }
1053
1054 void Structure::visitChildren(JSCell* cell, SlotVisitor& visitor)
1055 {
1056     Structure* thisObject = jsCast<Structure*>(cell);
1057     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1058
1059     JSCell::visitChildren(thisObject, visitor);
1060     
1061     ConcurrentJSLocker locker(thisObject->m_lock);
1062     
1063     visitor.append(thisObject->m_globalObject);
1064     if (!thisObject->isObject())
1065         thisObject->m_cachedPrototypeChain.clear();
1066     else {
1067         visitor.append(thisObject->m_prototype);
1068         visitor.append(thisObject->m_cachedPrototypeChain);
1069     }
1070     visitor.append(thisObject->m_previousOrRareData);
1071
1072     if (thisObject->isPinnedPropertyTable() || thisObject->isAddingPropertyForTransition()) {
1073         // NOTE: This can interleave in pin(), in which case it may see a null property table.
1074         // That's fine, because then the barrier will fire and we will scan this again.
1075         visitor.append(thisObject->m_propertyTableUnsafe);
1076     } else if (visitor.isBuildingHeapSnapshot())
1077         visitor.append(thisObject->m_propertyTableUnsafe);
1078     else if (thisObject->m_propertyTableUnsafe)
1079         thisObject->m_propertyTableUnsafe.clear();
1080
1081     visitor.append(thisObject->m_inferredTypeTable);
1082 }
1083
1084 bool Structure::isCheapDuringGC()
1085 {
1086     // FIXME: We could make this even safer by returning false if this structure's property table
1087     // has any large property names.
1088     // https://bugs.webkit.org/show_bug.cgi?id=157334
1089     
1090     return (!m_globalObject || Heap::isMarkedConcurrently(m_globalObject.get()))
1091         && (!storedPrototypeObject() || Heap::isMarkedConcurrently(storedPrototypeObject()));
1092 }
1093
1094 bool Structure::markIfCheap(SlotVisitor& visitor)
1095 {
1096     if (!isCheapDuringGC())
1097         return Heap::isMarkedConcurrently(this);
1098     
1099     visitor.appendUnbarriered(this);
1100     return true;
1101 }
1102
1103 bool Structure::prototypeChainMayInterceptStoreTo(VM& vm, PropertyName propertyName)
1104 {
1105     if (parseIndex(propertyName))
1106         return anyObjectInChainMayInterceptIndexedAccesses();
1107     
1108     for (Structure* current = this; ;) {
1109         JSValue prototype = current->storedPrototype();
1110         if (prototype.isNull())
1111             return false;
1112         
1113         current = prototype.asCell()->structure(vm);
1114         
1115         unsigned attributes;
1116         PropertyOffset offset = current->get(vm, propertyName, attributes);
1117         if (!JSC::isValidOffset(offset))
1118             continue;
1119         
1120         if (attributes & (ReadOnly | Accessor))
1121             return true;
1122         
1123         return false;
1124     }
1125 }
1126
1127 PassRefPtr<StructureShape> Structure::toStructureShape(JSValue value)
1128 {
1129     RefPtr<StructureShape> baseShape = StructureShape::create();
1130     RefPtr<StructureShape> curShape = baseShape;
1131     Structure* curStructure = this;
1132     JSValue curValue = value;
1133     while (curStructure) {
1134         curStructure->forEachPropertyConcurrently(
1135             [&] (const PropertyMapEntry& entry) -> bool {
1136                 curShape->addProperty(*entry.key);
1137                 return true;
1138             });
1139
1140         if (JSObject* curObject = curValue.getObject())
1141             curShape->setConstructorName(JSObject::calculatedClassName(curObject));
1142         else
1143             curShape->setConstructorName(curStructure->classInfo()->className);
1144
1145         if (curStructure->isDictionary())
1146             curShape->enterDictionaryMode();
1147
1148         curShape->markAsFinal();
1149
1150         if (curStructure->storedPrototypeStructure()) {
1151             auto newShape = StructureShape::create();
1152             curShape->setProto(newShape.ptr());
1153             curShape = WTFMove(newShape);
1154             curValue = curStructure->storedPrototype();
1155         }
1156
1157         curStructure = curStructure->storedPrototypeStructure();
1158     }
1159     
1160     return WTFMove(baseShape);
1161 }
1162
1163 bool Structure::canUseForAllocationsOf(Structure* other)
1164 {
1165     return inlineCapacity() == other->inlineCapacity()
1166         && storedPrototype() == other->storedPrototype()
1167         && objectInitializationBlob() == other->objectInitializationBlob();
1168 }
1169
1170 void Structure::dump(PrintStream& out) const
1171 {
1172     out.print(RawPointer(this), ":[", classInfo()->className, ", {");
1173     
1174     CommaPrinter comma;
1175     
1176     const_cast<Structure*>(this)->forEachPropertyConcurrently(
1177         [&] (const PropertyMapEntry& entry) -> bool {
1178             out.print(comma, entry.key, ":", static_cast<int>(entry.offset));
1179             return true;
1180         });
1181     
1182     out.print("}, ", IndexingTypeDump(indexingType()));
1183     
1184     if (m_prototype.get().isCell())
1185         out.print(", Proto:", RawPointer(m_prototype.get().asCell()));
1186
1187     switch (dictionaryKind()) {
1188     case NoneDictionaryKind:
1189         if (hasBeenDictionary())
1190             out.print(", Has been dictionary");
1191         break;
1192     case CachedDictionaryKind:
1193         out.print(", Dictionary");
1194         break;
1195     case UncachedDictionaryKind:
1196         out.print(", UncacheableDictionary");
1197         break;
1198     }
1199
1200     if (transitionWatchpointSetIsStillValid())
1201         out.print(", Leaf");
1202     else if (transitionWatchpointIsLikelyToBeFired())
1203         out.print(", Shady leaf");
1204     
1205     out.print("]");
1206 }
1207
1208 void Structure::dumpInContext(PrintStream& out, DumpContext* context) const
1209 {
1210     if (context)
1211         context->structures.dumpBrief(this, out);
1212     else
1213         dump(out);
1214 }
1215
1216 void Structure::dumpBrief(PrintStream& out, const CString& string) const
1217 {
1218     out.print("%", string, ":", classInfo()->className);
1219 }
1220
1221 void Structure::dumpContextHeader(PrintStream& out)
1222 {
1223     out.print("Structures:");
1224 }
1225
1226 bool ClassInfo::hasStaticSetterOrReadonlyProperties() const
1227 {
1228     for (const ClassInfo* ci = this; ci; ci = ci->parentClass) {
1229         if (const HashTable* table = ci->staticPropHashTable) {
1230             if (table->hasSetterOrReadonlyProperties)
1231                 return true;
1232         }
1233     }
1234     return false;
1235 }
1236
1237 void Structure::setCachedPropertyNameEnumerator(VM& vm, JSPropertyNameEnumerator* enumerator)
1238 {
1239     ASSERT(!isDictionary());
1240     if (!hasRareData())
1241         allocateRareData(vm);
1242     rareData()->setCachedPropertyNameEnumerator(vm, enumerator);
1243 }
1244
1245 JSPropertyNameEnumerator* Structure::cachedPropertyNameEnumerator() const
1246 {
1247     if (!hasRareData())
1248         return nullptr;
1249     return rareData()->cachedPropertyNameEnumerator();
1250 }
1251
1252 bool Structure::canCachePropertyNameEnumerator() const
1253 {
1254     if (isDictionary())
1255         return false;
1256
1257     if (hasIndexedProperties(indexingType()))
1258         return false;
1259
1260     if (typeInfo().overridesGetPropertyNames())
1261         return false;
1262
1263     StructureChain* structureChain = m_cachedPrototypeChain.get();
1264     ASSERT(structureChain);
1265     WriteBarrier<Structure>* structure = structureChain->head();
1266     while (true) {
1267         if (!structure->get())
1268             break;
1269         if (structure->get()->typeInfo().overridesGetPropertyNames())
1270             return false;
1271         structure++;
1272     }
1273     
1274     return true;
1275 }
1276     
1277 bool Structure::canAccessPropertiesQuicklyForEnumeration() const
1278 {
1279     if (!isQuickPropertyAccessAllowedForEnumeration())
1280         return false;
1281     if (hasGetterSetterProperties())
1282         return false;
1283     if (isUncacheableDictionary())
1284         return false;
1285     return true;
1286 }
1287
1288 } // namespace JSC