[JSC] Optimize Object.keys by caching own keys results in StructureRareData
[WebKit-https.git] / Source / JavaScriptCore / runtime / Structure.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "Structure.h"
28
29 #include "BuiltinNames.h"
30 #include "CodeBlock.h"
31 #include "DumpContext.h"
32 #include "JSCInlines.h"
33 #include "JSObject.h"
34 #include "JSPropertyNameEnumerator.h"
35 #include "Lookup.h"
36 #include "PropertyMapHashTable.h"
37 #include "PropertyNameArray.h"
38 #include "StructureChain.h"
39 #include "StructureRareDataInlines.h"
40 #include "WeakGCMapInlines.h"
41 #include <wtf/CommaPrinter.h>
42 #include <wtf/NeverDestroyed.h>
43 #include <wtf/ProcessID.h>
44 #include <wtf/RefPtr.h>
45 #include <wtf/Threading.h>
46
47 #define DUMP_STRUCTURE_ID_STATISTICS 0
48
49 namespace JSC {
50
51 #if DUMP_STRUCTURE_ID_STATISTICS
52 static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>);
53 #endif
54
55 class SingleSlotTransitionWeakOwner final : public WeakHandleOwner {
56     void finalize(Handle<Unknown>, void* context) override
57     {
58         StructureTransitionTable* table = reinterpret_cast<StructureTransitionTable*>(context);
59         ASSERT(table->isUsingSingleSlot());
60         WeakSet::deallocate(table->weakImpl());
61         table->m_data = StructureTransitionTable::UsingSingleSlotFlag;
62     }
63 };
64
65 static SingleSlotTransitionWeakOwner& singleSlotTransitionWeakOwner()
66 {
67     static NeverDestroyed<SingleSlotTransitionWeakOwner> owner;
68     return owner;
69 }
70
71 inline Structure* StructureTransitionTable::singleTransition() const
72 {
73     ASSERT(isUsingSingleSlot());
74     if (WeakImpl* impl = this->weakImpl()) {
75         if (impl->state() == WeakImpl::Live)
76             return jsCast<Structure*>(impl->jsValue().asCell());
77     }
78     return nullptr;
79 }
80
81 inline void StructureTransitionTable::setSingleTransition(Structure* structure)
82 {
83     ASSERT(isUsingSingleSlot());
84     if (WeakImpl* impl = this->weakImpl())
85         WeakSet::deallocate(impl);
86     WeakImpl* impl = WeakSet::allocate(structure, &singleSlotTransitionWeakOwner(), this);
87     m_data = PoisonedWeakImplPtr(impl).bits() | UsingSingleSlotFlag;
88 }
89
90 bool StructureTransitionTable::contains(UniquedStringImpl* rep, unsigned attributes) const
91 {
92     if (isUsingSingleSlot()) {
93         Structure* transition = singleTransition();
94         return transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes;
95     }
96     return map()->get(std::make_pair(rep, attributes));
97 }
98
99 inline Structure* StructureTransitionTable::get(UniquedStringImpl* rep, unsigned attributes) const
100 {
101     if (isUsingSingleSlot()) {
102         Structure* transition = singleTransition();
103         return (transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes) ? transition : 0;
104     }
105     return map()->get(std::make_pair(rep, attributes));
106 }
107
108 void StructureTransitionTable::add(VM& vm, Structure* structure)
109 {
110     if (isUsingSingleSlot()) {
111         Structure* existingTransition = singleTransition();
112
113         // This handles the first transition being added.
114         if (!existingTransition) {
115             setSingleTransition(structure);
116             return;
117         }
118
119         // This handles the second transition being added
120         // (or the first transition being despecified!)
121         setMap(new TransitionMap(vm));
122         add(vm, existingTransition);
123     }
124
125     // Add the structure to the map.
126
127     // Newer versions of the STL have an std::make_pair function that takes rvalue references.
128     // When either of the parameters are bitfields, the C++ compiler will try to bind them as lvalues, which is invalid. To work around this, use unary "+" to make the parameter an rvalue.
129     // See https://bugs.webkit.org/show_bug.cgi?id=59261 for more details
130     map()->set(std::make_pair(structure->m_nameInPrevious.get(), +structure->attributesInPrevious()), structure);
131 }
132
133 void Structure::dumpStatistics()
134 {
135 #if DUMP_STRUCTURE_ID_STATISTICS
136     unsigned numberLeaf = 0;
137     unsigned numberUsingSingleSlot = 0;
138     unsigned numberSingletons = 0;
139     unsigned numberWithPropertyMaps = 0;
140     unsigned totalPropertyMapsSize = 0;
141
142     HashSet<Structure*>::const_iterator end = liveStructureSet.end();
143     for (HashSet<Structure*>::const_iterator it = liveStructureSet.begin(); it != end; ++it) {
144         Structure* structure = *it;
145
146         switch (structure->m_transitionTable.size()) {
147             case 0:
148                 ++numberLeaf;
149                 if (!structure->previousID())
150                     ++numberSingletons;
151                 break;
152
153             case 1:
154                 ++numberUsingSingleSlot;
155                 break;
156         }
157
158         if (PropertyTable* table = structure->propertyTableOrNull()) {
159             ++numberWithPropertyMaps;
160             totalPropertyMapsSize += table->sizeInMemory();
161         }
162     }
163
164     dataLogF("Number of live Structures: %d\n", liveStructureSet.size());
165     dataLogF("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
166     dataLogF("Number of Structures that are leaf nodes: %d\n", numberLeaf);
167     dataLogF("Number of Structures that singletons: %d\n", numberSingletons);
168     dataLogF("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
169
170     dataLogF("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
171     dataLogF("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
172     dataLogF("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
173 #else
174     dataLogF("Dumping Structure statistics is not enabled.\n");
175 #endif
176 }
177
178 Structure::Structure(VM& vm, JSGlobalObject* globalObject, JSValue prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
179     : JSCell(vm, vm.structureStructure.get())
180     , m_blob(vm.heap.structureIDTable().allocateID(this), indexingType, typeInfo)
181     , m_outOfLineTypeFlags(typeInfo.outOfLineTypeFlags())
182     , m_inlineCapacity(inlineCapacity)
183     , m_bitField(0)
184     , m_globalObject(vm, this, globalObject, WriteBarrier<JSGlobalObject>::MayBeNull)
185     , m_prototype(vm, this, prototype)
186     , m_classInfo(classInfo)
187     , m_transitionWatchpointSet(IsWatched)
188     , m_offset(invalidOffset)
189     , m_propertyHash(0)
190 {
191     setDictionaryKind(NoneDictionaryKind);
192     setIsPinnedPropertyTable(false);
193     setHasGetterSetterProperties(classInfo->hasStaticSetterOrReadonlyProperties());
194     setHasCustomGetterSetterProperties(false);
195     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(classInfo->hasStaticSetterOrReadonlyProperties());
196     setHasUnderscoreProtoPropertyExcludingOriginalProto(false);
197     setIsQuickPropertyAccessAllowedForEnumeration(true);
198     setAttributesInPrevious(0);
199     setDidPreventExtensions(false);
200     setDidTransition(false);
201     setStaticPropertiesReified(false);
202     setTransitionWatchpointIsLikelyToBeFired(false);
203     setHasBeenDictionary(false);
204     setIsAddingPropertyForTransition(false);
205  
206     ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
207     ASSERT(static_cast<PropertyOffset>(inlineCapacity) < firstOutOfLineOffset);
208     ASSERT(!hasRareData());
209     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
210     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
211     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
212 }
213
214 const ClassInfo Structure::s_info = { "Structure", nullptr, nullptr, nullptr, CREATE_METHOD_TABLE(Structure) };
215
216 Structure::Structure(VM& vm)
217     : JSCell(CreatingEarlyCell)
218     , m_inlineCapacity(0)
219     , m_bitField(0)
220     , m_prototype(vm, this, jsNull())
221     , m_classInfo(info())
222     , m_transitionWatchpointSet(IsWatched)
223     , m_offset(invalidOffset)
224     , m_propertyHash(0)
225 {
226     setDictionaryKind(NoneDictionaryKind);
227     setIsPinnedPropertyTable(false);
228     setHasGetterSetterProperties(m_classInfo->hasStaticSetterOrReadonlyProperties());
229     setHasCustomGetterSetterProperties(false);
230     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(m_classInfo->hasStaticSetterOrReadonlyProperties());
231     setHasUnderscoreProtoPropertyExcludingOriginalProto(false);
232     setIsQuickPropertyAccessAllowedForEnumeration(true);
233     setAttributesInPrevious(0);
234     setDidPreventExtensions(false);
235     setDidTransition(false);
236     setStaticPropertiesReified(false);
237     setTransitionWatchpointIsLikelyToBeFired(false);
238     setHasBeenDictionary(false);
239     setIsAddingPropertyForTransition(false);
240  
241     TypeInfo typeInfo = TypeInfo(CellType, StructureFlags);
242     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), 0, typeInfo);
243     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
244
245     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
246     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
247     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
248 }
249
250 Structure::Structure(VM& vm, Structure* previous, DeferredStructureTransitionWatchpointFire* deferred)
251     : JSCell(vm, vm.structureStructure.get())
252     , m_inlineCapacity(previous->m_inlineCapacity)
253     , m_bitField(0)
254     , m_prototype(vm, this, previous->m_prototype.get())
255     , m_classInfo(previous->m_classInfo)
256     , m_transitionWatchpointSet(IsWatched)
257     , m_offset(invalidOffset)
258     , m_propertyHash(previous->m_propertyHash)
259 {
260     setDictionaryKind(previous->dictionaryKind());
261     setIsPinnedPropertyTable(false);
262     setHasBeenFlattenedBefore(previous->hasBeenFlattenedBefore());
263     setHasGetterSetterProperties(previous->hasGetterSetterProperties());
264     setHasCustomGetterSetterProperties(previous->hasCustomGetterSetterProperties());
265     setHasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->hasReadOnlyOrGetterSetterPropertiesExcludingProto());
266     setHasUnderscoreProtoPropertyExcludingOriginalProto(previous->hasUnderscoreProtoPropertyExcludingOriginalProto());
267     setIsQuickPropertyAccessAllowedForEnumeration(previous->isQuickPropertyAccessAllowedForEnumeration());
268     setAttributesInPrevious(0);
269     setDidPreventExtensions(previous->didPreventExtensions());
270     setDidTransition(true);
271     setStaticPropertiesReified(previous->staticPropertiesReified());
272     setHasBeenDictionary(previous->hasBeenDictionary());
273     setIsAddingPropertyForTransition(false);
274  
275     TypeInfo typeInfo = previous->typeInfo();
276     m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), previous->indexingModeIncludingHistory(), typeInfo);
277     m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags();
278
279     ASSERT(!previous->typeInfo().structureIsImmortal());
280     setPreviousID(vm, previous);
281
282     previous->didTransitionFromThisStructure(deferred);
283     
284     // Copy this bit now, in case previous was being watched.
285     setTransitionWatchpointIsLikelyToBeFired(previous->transitionWatchpointIsLikelyToBeFired());
286
287     if (previous->m_globalObject)
288         m_globalObject.set(vm, this, previous->m_globalObject.get());
289     ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
290     ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties());
291     ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData);
292 }
293
294 Structure::~Structure()
295 {
296     if (typeInfo().structureIsImmortal())
297         return;
298     Heap::heap(this)->structureIDTable().deallocateID(this, m_blob.structureID());
299 }
300
301 void Structure::destroy(JSCell* cell)
302 {
303     static_cast<Structure*>(cell)->Structure::~Structure();
304 }
305
306 Structure* Structure::create(PolyProtoTag, VM& vm, JSGlobalObject* globalObject, JSObject* prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity)
307 {
308     Structure* result = create(vm, globalObject, prototype, typeInfo, classInfo, indexingType, inlineCapacity);
309
310     unsigned oldOutOfLineCapacity = result->outOfLineCapacity();
311     result->addPropertyWithoutTransition(
312         vm, vm.propertyNames->builtinNames().polyProtoName(), static_cast<unsigned>(PropertyAttribute::DontEnum),
313         [&] (const GCSafeConcurrentJSLocker&, PropertyOffset offset, PropertyOffset newLastOffset) {
314             RELEASE_ASSERT(Structure::outOfLineCapacity(newLastOffset) == oldOutOfLineCapacity);
315             RELEASE_ASSERT(offset == knownPolyProtoOffset);
316             RELEASE_ASSERT(isInlineOffset(knownPolyProtoOffset));
317             result->m_prototype.setWithoutWriteBarrier(JSValue());
318             result->setLastOffset(newLastOffset);
319         });
320
321     return result;
322 }
323
324 void Structure::findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*& structure, PropertyTable*& table)
325 {
326     ASSERT(structures.isEmpty());
327     table = 0;
328
329     for (structure = this; structure; structure = structure->previousID()) {
330         structure->m_lock.lock();
331         
332         table = structure->propertyTableOrNull();
333         if (table) {
334             // Leave the structure locked, so that the caller can do things to it atomically
335             // before it loses its property table.
336             return;
337         }
338         
339         structures.append(structure);
340         structure->m_lock.unlock();
341     }
342     
343     ASSERT(!structure);
344     ASSERT(!table);
345 }
346
347 PropertyTable* Structure::materializePropertyTable(VM& vm, bool setPropertyTable)
348 {
349     ASSERT(structure(vm)->classInfo() == info());
350     ASSERT(!isAddingPropertyForTransition());
351     
352     DeferGC deferGC(vm.heap);
353     
354     Vector<Structure*, 8> structures;
355     Structure* structure;
356     PropertyTable* table;
357     
358     findStructuresAndMapForMaterialization(structures, structure, table);
359     
360     unsigned capacity = numberOfSlotsForLastOffset(m_offset, m_inlineCapacity);
361     if (table) {
362         table = table->copy(vm, capacity);
363         structure->m_lock.unlock();
364     } else
365         table = PropertyTable::create(vm, capacity);
366     
367     // Must hold the lock on this structure, since we will be modifying this structure's
368     // property map. We don't want getConcurrently() to see the property map in a half-baked
369     // state.
370     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
371     if (setPropertyTable)
372         this->setPropertyTable(vm, table);
373
374     InferredTypeTable* typeTable = m_inferredTypeTable.get();
375
376     for (size_t i = structures.size(); i--;) {
377         structure = structures[i];
378         if (!structure->m_nameInPrevious)
379             continue;
380         PropertyMapEntry entry(structure->m_nameInPrevious.get(), structure->m_offset, structure->attributesInPrevious());
381         if (typeTable && typeTable->get(structure->m_nameInPrevious.get()))
382             entry.hasInferredType = true;
383         table->add(entry, m_offset, PropertyTable::PropertyOffsetMustNotChange);
384     }
385     
386     checkOffsetConsistency(
387         table,
388         [&] () {
389             dataLog("Detected in materializePropertyTable.\n");
390             dataLog("Found structure = ", RawPointer(structure), "\n");
391             dataLog("structures = ");
392             CommaPrinter comma;
393             for (Structure* structure : structures)
394                 dataLog(comma, RawPointer(structure));
395             dataLog("\n");
396         });
397     
398     return table;
399 }
400
401 Structure* Structure::addPropertyTransitionToExistingStructureImpl(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
402 {
403     ASSERT(!structure->isDictionary());
404     ASSERT(structure->isObject());
405
406     if (Structure* existingTransition = structure->m_transitionTable.get(uid, attributes)) {
407         validateOffset(existingTransition->m_offset, existingTransition->inlineCapacity());
408         offset = existingTransition->m_offset;
409         return existingTransition;
410     }
411
412     return 0;
413 }
414
415 Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
416 {
417     ASSERT(!isCompilationThread());
418     return addPropertyTransitionToExistingStructureImpl(structure, propertyName.uid(), attributes, offset);
419 }
420
421 Structure* Structure::addPropertyTransitionToExistingStructureConcurrently(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset)
422 {
423     ConcurrentJSLocker locker(structure->m_lock);
424     return addPropertyTransitionToExistingStructureImpl(structure, uid, attributes, offset);
425 }
426
427 bool Structure::holesMustForwardToPrototype(VM& vm, JSObject* base) const
428 {
429     ASSERT(base->structure(vm) == this);
430
431     if (this->mayInterceptIndexedAccesses())
432         return true;
433
434     JSValue prototype = this->storedPrototype(base);
435     if (!prototype.isObject())
436         return false;
437     JSObject* object = asObject(prototype);
438
439     while (true) {
440         Structure& structure = *object->structure(vm);
441         if (hasIndexedProperties(object->indexingType()) || structure.mayInterceptIndexedAccesses())
442             return true;
443         prototype = structure.storedPrototype(object);
444         if (!prototype.isObject())
445             return false;
446         object = asObject(prototype);
447     }
448
449     RELEASE_ASSERT_NOT_REACHED();
450     return false;
451 }
452
453 Structure* Structure::addPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset)
454 {
455     Structure* newStructure = addPropertyTransitionToExistingStructure(
456         structure, propertyName, attributes, offset);
457     if (newStructure)
458         return newStructure;
459
460     return addNewPropertyTransition(
461         vm, structure, propertyName, attributes, offset, PutPropertySlot::UnknownContext);
462 }
463
464 Structure* Structure::addNewPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset, PutPropertySlot::Context context, DeferredStructureTransitionWatchpointFire* deferred)
465 {
466     ASSERT(!structure->isDictionary());
467     ASSERT(structure->isObject());
468     ASSERT(!Structure::addPropertyTransitionToExistingStructure(structure, propertyName, attributes, offset));
469     
470     int maxTransitionLength;
471     if (context == PutPropertySlot::PutById)
472         maxTransitionLength = s_maxTransitionLengthForNonEvalPutById;
473     else
474         maxTransitionLength = s_maxTransitionLength;
475     if (structure->transitionCount() > maxTransitionLength) {
476         ASSERT(!isCopyOnWrite(structure->indexingMode()));
477         Structure* transition = toCacheableDictionaryTransition(vm, structure, deferred);
478         ASSERT(structure != transition);
479         offset = transition->add(vm, propertyName, attributes);
480         return transition;
481     }
482     
483     Structure* transition = create(vm, structure, deferred);
484
485     transition->m_cachedPrototypeChain.setMayBeNull(vm, transition, structure->m_cachedPrototypeChain.get());
486     
487     // While we are adding the property, rematerializing the property table is super weird: we already
488     // have a m_nameInPrevious and attributesInPrevious but the m_offset is still wrong. If the
489     // materialization algorithm runs, it'll build a property table that already has the property but
490     // at a bogus offset. Rather than try to teach the materialization code how to create a table under
491     // those conditions, we just tell the GC not to blow the table away during this period of time.
492     // Holding the lock ensures that we either do this before the GC starts scanning the structure, in
493     // which case the GC will not blow the table away, or we do it after the GC already ran in which
494     // case all is well.  If it wasn't for the lock, the GC would have TOCTOU: if could read
495     // isAddingPropertyForTransition before we set it to true, and then blow the table away after.
496     {
497         ConcurrentJSLocker locker(transition->m_lock);
498         transition->setIsAddingPropertyForTransition(true);
499     }
500
501     transition->m_blob.setIndexingModeIncludingHistory(structure->indexingModeIncludingHistory() & ~CopyOnWrite);
502     transition->m_nameInPrevious = propertyName.uid();
503     transition->setAttributesInPrevious(attributes);
504     transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
505     transition->m_offset = structure->m_offset;
506     transition->m_inferredTypeTable.setMayBeNull(vm, transition, structure->m_inferredTypeTable.get());
507
508     offset = transition->add(vm, propertyName, attributes);
509
510     // Now that everything is fine with the new structure's bookkeeping, the GC is free to blow the
511     // table away if it wants. We can now rebuild it fine.
512     WTF::storeStoreFence();
513     transition->setIsAddingPropertyForTransition(false);
514
515     checkOffset(transition->m_offset, transition->inlineCapacity());
516     {
517         ConcurrentJSLocker locker(structure->m_lock);
518         structure->m_transitionTable.add(vm, transition);
519     }
520     transition->checkOffsetConsistency();
521     structure->checkOffsetConsistency();
522     return transition;
523 }
524
525 Structure* Structure::removePropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, PropertyOffset& offset)
526 {
527     // NOTE: There are some good reasons why this goes directly to uncacheable dictionary rather than
528     // caching the removal. We can fix all of these things, but we must remember to do so, if we ever try
529     // to optimize this case.
530     //
531     // - Cached transitions usually steal the property table, and assume that this is possible because they
532     //   can just rebuild the table by looking at past transitions. That code assumes that the table only
533     //   grew and never shrank. To support removals, we'd have to change the property table materialization
534     //   code to handle deletions. Also, we have logic to get the list of properties on a structure that
535     //   lacks a property table by just looking back through the set of transitions since the last
536     //   structure that had a pinned table. That logic would also have to be changed to handle cached
537     //   removals.
538     //
539     // - InferredTypeTable assumes that removal has never happened. This is important since if we could
540     //   remove a property and then re-add it later, then the "absence means top" optimization wouldn't
541     //   work anymore, unless removal also either poisoned type inference (by doing something equivalent to
542     //   hasBeenDictionary) or by strongly marking the entry as Top by ensuring that it is not absent, but
543     //   instead, has a null entry.
544     
545     ASSERT(!structure->isUncacheableDictionary());
546
547     Structure* transition = toUncacheableDictionaryTransition(vm, structure);
548
549     offset = transition->remove(propertyName);
550
551     transition->checkOffsetConsistency();
552     return transition;
553 }
554
555 Structure* Structure::changePrototypeTransition(VM& vm, Structure* structure, JSValue prototype, DeferredStructureTransitionWatchpointFire& deferred)
556 {
557     ASSERT(prototype.isObject() || prototype.isNull());
558
559     DeferGC deferGC(vm.heap);
560     Structure* transition = create(vm, structure, &deferred);
561
562     transition->m_prototype.set(vm, transition, prototype);
563
564     PropertyTable* table = structure->copyPropertyTableForPinning(vm);
565     transition->pin(holdLock(transition->m_lock), vm, table);
566     transition->m_offset = structure->m_offset;
567     
568     transition->checkOffsetConsistency();
569     return transition;
570 }
571
572 Structure* Structure::attributeChangeTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes)
573 {
574     if (!structure->isUncacheableDictionary()) {
575         Structure* transition = create(vm, structure);
576
577         PropertyTable* table = structure->copyPropertyTableForPinning(vm);
578         transition->pin(holdLock(transition->m_lock), vm, table);
579         transition->m_offset = structure->m_offset;
580         
581         structure = transition;
582     }
583
584     PropertyMapEntry* entry = structure->ensurePropertyTable(vm)->get(propertyName.uid());
585     ASSERT(entry);
586     entry->attributes = attributes;
587
588     structure->checkOffsetConsistency();
589     return structure;
590 }
591
592 Structure* Structure::toDictionaryTransition(VM& vm, Structure* structure, DictionaryKind kind, DeferredStructureTransitionWatchpointFire* deferred)
593 {
594     ASSERT(!structure->isUncacheableDictionary());
595     DeferGC deferGC(vm.heap);
596     
597     Structure* transition = create(vm, structure, deferred);
598
599     PropertyTable* table = structure->copyPropertyTableForPinning(vm);
600     transition->pin(holdLock(transition->m_lock), vm, table);
601     transition->m_offset = structure->m_offset;
602     transition->setDictionaryKind(kind);
603     transition->setHasBeenDictionary(true);
604     
605     transition->checkOffsetConsistency();
606     return transition;
607 }
608
609 Structure* Structure::toCacheableDictionaryTransition(VM& vm, Structure* structure, DeferredStructureTransitionWatchpointFire* deferred)
610 {
611     return toDictionaryTransition(vm, structure, CachedDictionaryKind, deferred);
612 }
613
614 Structure* Structure::toUncacheableDictionaryTransition(VM& vm, Structure* structure)
615 {
616     return toDictionaryTransition(vm, structure, UncachedDictionaryKind);
617 }
618
619 Structure* Structure::sealTransition(VM& vm, Structure* structure)
620 {
621     return nonPropertyTransition(vm, structure, NonPropertyTransition::Seal);
622 }
623
624 Structure* Structure::freezeTransition(VM& vm, Structure* structure)
625 {
626     return nonPropertyTransition(vm, structure, NonPropertyTransition::Freeze);
627 }
628
629 Structure* Structure::preventExtensionsTransition(VM& vm, Structure* structure)
630 {
631     return nonPropertyTransition(vm, structure, NonPropertyTransition::PreventExtensions);
632 }
633
634 PropertyTable* Structure::takePropertyTableOrCloneIfPinned(VM& vm)
635 {
636     // This must always return a property table. It can't return null.
637     PropertyTable* result = propertyTableOrNull();
638     if (result) {
639         if (isPinnedPropertyTable())
640             return result->copy(vm, result->size() + 1);
641         ConcurrentJSLocker locker(m_lock);
642         setPropertyTable(vm, nullptr);
643         return result;
644     }
645     bool setPropertyTable = false;
646     return materializePropertyTable(vm, setPropertyTable);
647 }
648
649 Structure* Structure::nonPropertyTransitionSlow(VM& vm, Structure* structure, NonPropertyTransition transitionKind)
650 {
651     unsigned attributes = toAttributes(transitionKind);
652     IndexingType indexingModeIncludingHistory = newIndexingType(structure->indexingModeIncludingHistory(), transitionKind);
653     
654     Structure* existingTransition;
655     if (!structure->isDictionary() && (existingTransition = structure->m_transitionTable.get(0, attributes))) {
656         ASSERT(existingTransition->attributesInPrevious() == attributes);
657         ASSERT(existingTransition->indexingModeIncludingHistory() == indexingModeIncludingHistory);
658         return existingTransition;
659     }
660     
661     DeferGC deferGC(vm.heap);
662     
663     Structure* transition = create(vm, structure);
664     transition->setAttributesInPrevious(attributes);
665     transition->m_blob.setIndexingModeIncludingHistory(indexingModeIncludingHistory);
666     
667     if (preventsExtensions(transitionKind))
668         transition->setDidPreventExtensions(true);
669     
670     if (setsDontDeleteOnAllProperties(transitionKind)
671         || setsReadOnlyOnNonAccessorProperties(transitionKind)) {
672         // We pin the property table on transitions that do wholesale editing of the property
673         // table, since our logic for walking the property transition chain to rematerialize the
674         // table doesn't know how to take into account such wholesale edits.
675
676         PropertyTable* table = structure->copyPropertyTableForPinning(vm);
677         transition->pinForCaching(holdLock(transition->m_lock), vm, table);
678         transition->m_offset = structure->m_offset;
679         
680         table = transition->propertyTableOrNull();
681         RELEASE_ASSERT(table);
682         for (auto& entry : *table) {
683             if (setsDontDeleteOnAllProperties(transitionKind))
684                 entry.attributes |= static_cast<unsigned>(PropertyAttribute::DontDelete);
685             if (setsReadOnlyOnNonAccessorProperties(transitionKind) && !(entry.attributes & PropertyAttribute::Accessor))
686                 entry.attributes |= static_cast<unsigned>(PropertyAttribute::ReadOnly);
687         }
688     } else {
689         transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm));
690         transition->m_offset = structure->m_offset;
691         checkOffset(transition->m_offset, transition->inlineCapacity());
692     }
693     
694     if (setsReadOnlyOnNonAccessorProperties(transitionKind)
695         && !transition->propertyTableOrNull()->isEmpty())
696         transition->setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true);
697     
698     if (structure->isDictionary()) {
699         PropertyTable* table = transition->ensurePropertyTable(vm);
700         transition->pin(holdLock(transition->m_lock), vm, table);
701     } else {
702         auto locker = holdLock(structure->m_lock);
703         structure->m_transitionTable.add(vm, transition);
704     }
705
706     transition->checkOffsetConsistency();
707     return transition;
708 }
709
710 // In future we may want to cache this property.
711 bool Structure::isSealed(VM& vm)
712 {
713     if (isStructureExtensible())
714         return false;
715
716     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
717     if (!table)
718         return true;
719     
720     PropertyTable::iterator end = table->end();
721     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
722         if ((iter->attributes & PropertyAttribute::DontDelete) != static_cast<unsigned>(PropertyAttribute::DontDelete))
723             return false;
724     }
725     return true;
726 }
727
728 // In future we may want to cache this property.
729 bool Structure::isFrozen(VM& vm)
730 {
731     if (isStructureExtensible())
732         return false;
733
734     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
735     if (!table)
736         return true;
737     
738     PropertyTable::iterator end = table->end();
739     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
740         if (!(iter->attributes & PropertyAttribute::DontDelete))
741             return false;
742         if (!(iter->attributes & (PropertyAttribute::ReadOnly | PropertyAttribute::Accessor)))
743             return false;
744     }
745     return true;
746 }
747
748 Structure* Structure::flattenDictionaryStructure(VM& vm, JSObject* object)
749 {
750     checkOffsetConsistency();
751     ASSERT(isDictionary());
752     
753     GCSafeConcurrentJSLocker locker(m_lock, vm.heap);
754     
755     object->setStructureIDDirectly(nuke(id()));
756     WTF::storeStoreFence();
757
758     size_t beforeOutOfLineCapacity = this->outOfLineCapacity();
759     if (isUncacheableDictionary()) {
760         PropertyTable* table = propertyTableOrNull();
761         ASSERT(table);
762
763         size_t propertyCount = table->size();
764
765         // Holds our values compacted by insertion order.
766         Vector<JSValue> values(propertyCount);
767
768         // Copies out our values from their hashed locations, compacting property table offsets as we go.
769         unsigned i = 0;
770         PropertyTable::iterator end = table->end();
771         m_offset = invalidOffset;
772         for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter, ++i) {
773             values[i] = object->getDirect(iter->offset);
774             m_offset = iter->offset = offsetForPropertyNumber(i, m_inlineCapacity);
775         }
776         
777         // Copies in our values to their compacted locations.
778         for (unsigned i = 0; i < propertyCount; i++)
779             object->putDirect(vm, offsetForPropertyNumber(i, m_inlineCapacity), values[i]);
780
781         table->clearDeletedOffsets();
782
783         // We need to zero our unused property space; otherwise the GC might see a
784         // stale pointer when we add properties in the future.
785         memset(
786             object->inlineStorageUnsafe() + inlineSize(),
787             0,
788             (inlineCapacity() - inlineSize()) * sizeof(EncodedJSValue));
789
790         Butterfly* butterfly = object->butterfly();
791         memset(
792             butterfly->base(butterfly->indexingHeader()->preCapacity(this), beforeOutOfLineCapacity),
793             0,
794             (beforeOutOfLineCapacity - outOfLineSize()) * sizeof(EncodedJSValue));
795         checkOffsetConsistency();
796     }
797
798     setDictionaryKind(NoneDictionaryKind);
799     setHasBeenFlattenedBefore(true);
800
801     size_t afterOutOfLineCapacity = this->outOfLineCapacity();
802
803     if (object->butterfly() && beforeOutOfLineCapacity != afterOutOfLineCapacity) {
804         ASSERT(beforeOutOfLineCapacity > afterOutOfLineCapacity);
805         // If the object had a Butterfly but after flattening/compacting we no longer have need of it,
806         // we need to zero it out because the collector depends on the Structure to know the size for copying.
807         if (!afterOutOfLineCapacity && !this->hasIndexingHeader(object))
808             object->setButterfly(vm, nullptr);
809         // If the object was down-sized to the point where the base of the Butterfly is no longer within the 
810         // first CopiedBlock::blockSize bytes, we'll get the wrong answer if we try to mask the base back to 
811         // the CopiedBlock header. To prevent this case we need to memmove the Butterfly down.
812         else
813             object->shiftButterflyAfterFlattening(locker, vm, this, afterOutOfLineCapacity);
814     }
815     
816     WTF::storeStoreFence();
817     object->setStructureIDDirectly(id());
818
819     // We need to do a writebarrier here because the GC thread might be scanning the butterfly while
820     // we are shuffling properties around. See: https://bugs.webkit.org/show_bug.cgi?id=166989
821     vm.heap.writeBarrier(object);
822
823     return this;
824 }
825
826 void Structure::pin(const AbstractLocker&, VM& vm, PropertyTable* table)
827 {
828     setIsPinnedPropertyTable(true);
829     setPropertyTable(vm, table);
830     clearPreviousID();
831     m_nameInPrevious = nullptr;
832 }
833
834 void Structure::pinForCaching(const AbstractLocker&, VM& vm, PropertyTable* table)
835 {
836     setIsPinnedPropertyTable(true);
837     setPropertyTable(vm, table);
838     m_nameInPrevious = nullptr;
839 }
840
841 void Structure::allocateRareData(VM& vm)
842 {
843     ASSERT(!hasRareData());
844     StructureRareData* rareData = StructureRareData::create(vm, previousID());
845     WTF::storeStoreFence();
846     m_previousOrRareData.set(vm, this, rareData);
847     ASSERT(hasRareData());
848 }
849
850 WatchpointSet* Structure::ensurePropertyReplacementWatchpointSet(VM& vm, PropertyOffset offset)
851 {
852     ASSERT(!isUncacheableDictionary());
853
854     // In some places it's convenient to call this with an invalid offset. So, we do the check here.
855     if (!isValidOffset(offset))
856         return nullptr;
857     
858     if (!hasRareData())
859         allocateRareData(vm);
860     ConcurrentJSLocker locker(m_lock);
861     StructureRareData* rareData = this->rareData();
862     if (!rareData->m_replacementWatchpointSets) {
863         rareData->m_replacementWatchpointSets =
864             std::make_unique<StructureRareData::PropertyWatchpointMap>();
865         WTF::storeStoreFence();
866     }
867     auto result = rareData->m_replacementWatchpointSets->add(offset, nullptr);
868     if (result.isNewEntry)
869         result.iterator->value = adoptRef(new WatchpointSet(IsWatched));
870     return result.iterator->value.get();
871 }
872
873 void Structure::startWatchingPropertyForReplacements(VM& vm, PropertyName propertyName)
874 {
875     ASSERT(!isUncacheableDictionary());
876     
877     startWatchingPropertyForReplacements(vm, get(vm, propertyName));
878 }
879
880 void Structure::didCachePropertyReplacement(VM& vm, PropertyOffset offset)
881 {
882     RELEASE_ASSERT(isValidOffset(offset));
883     ensurePropertyReplacementWatchpointSet(vm, offset)->fireAll(vm, "Did cache property replacement");
884 }
885
886 void Structure::startWatchingInternalProperties(VM& vm)
887 {
888     if (!isUncacheableDictionary()) {
889         startWatchingPropertyForReplacements(vm, vm.propertyNames->toString);
890         startWatchingPropertyForReplacements(vm, vm.propertyNames->valueOf);
891     }
892     setDidWatchInternalProperties(true);
893 }
894
895 void Structure::willStoreValueSlow(
896     VM& vm, PropertyName propertyName, JSValue value, bool shouldOptimize,
897     InferredTypeTable::StoredPropertyAge age)
898 {
899     ASSERT(!isCompilationThread());
900     ASSERT(structure(vm)->classInfo() == info());
901     ASSERT(!hasBeenDictionary());
902
903     ASSERT_WITH_MESSAGE(VM::canUseJIT(), "We don't want to use memory for inferred types unless we're using the JIT.");
904
905     // Create the inferred type table before doing anything else, so that we don't GC after we have already
906     // grabbed a pointer into the property map.
907     InferredTypeTable* table = m_inferredTypeTable.get();
908     if (!table) {
909         table = InferredTypeTable::create(vm);
910         WTF::storeStoreFence();
911         m_inferredTypeTable.set(vm, this, table);
912     }
913
914     // This only works if we've got a property table.
915     PropertyTable* propertyTable = ensurePropertyTable(vm);
916     
917     // We must be calling this after having created the given property or confirmed that it was present
918     // already, so the property must be present.
919     PropertyMapEntry* entry = propertyTable->get(propertyName.uid());
920     ASSERT(entry);
921     
922     if (shouldOptimize)
923         entry->hasInferredType = table->willStoreValue(vm, propertyName, value, age);
924     else {
925         table->makeTop(vm, propertyName, age);
926         entry->hasInferredType = false;
927     }
928     
929     propertyTable->use(); // This makes it safe to use entry above.
930 }
931
932 #if DUMP_PROPERTYMAP_STATS
933
934 PropertyMapHashTableStats* propertyMapHashTableStats = 0;
935
936 struct PropertyMapStatisticsExitLogger {
937     PropertyMapStatisticsExitLogger();
938     ~PropertyMapStatisticsExitLogger();
939 };
940
941 DEFINE_GLOBAL_FOR_LOGGING(PropertyMapStatisticsExitLogger, logger, );
942
943 PropertyMapStatisticsExitLogger::PropertyMapStatisticsExitLogger()
944 {
945     propertyMapHashTableStats = adoptPtr(new PropertyMapHashTableStats()).leakPtr();
946 }
947
948 PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
949 {
950     unsigned finds = propertyMapHashTableStats->numFinds;
951     unsigned collisions = propertyMapHashTableStats->numCollisions;
952     dataLogF("\nJSC::PropertyMap statistics for process %d\n\n", getCurrentProcessID());
953     dataLogF("%d finds\n", finds);
954     dataLogF("%d collisions (%.1f%%)\n", collisions, 100.0 * collisions / finds);
955     dataLogF("%d lookups\n", propertyMapHashTableStats->numLookups.load());
956     dataLogF("%d lookup probings\n", propertyMapHashTableStats->numLookupProbing.load());
957     dataLogF("%d adds\n", propertyMapHashTableStats->numAdds.load());
958     dataLogF("%d removes\n", propertyMapHashTableStats->numRemoves.load());
959     dataLogF("%d rehashes\n", propertyMapHashTableStats->numRehashes.load());
960     dataLogF("%d reinserts\n", propertyMapHashTableStats->numReinserts.load());
961 }
962
963 #endif
964
965 PropertyTable* Structure::copyPropertyTableForPinning(VM& vm)
966 {
967     if (PropertyTable* table = propertyTableOrNull())
968         return PropertyTable::clone(vm, *table);
969     bool setPropertyTable = false;
970     return materializePropertyTable(vm, setPropertyTable);
971 }
972
973 PropertyOffset Structure::getConcurrently(UniquedStringImpl* uid, unsigned& attributes)
974 {
975     PropertyOffset result = invalidOffset;
976     
977     forEachPropertyConcurrently(
978         [&] (const PropertyMapEntry& candidate) -> bool {
979             if (candidate.key != uid)
980                 return true;
981             
982             result = candidate.offset;
983             attributes = candidate.attributes;
984             return false;
985         });
986     
987     return result;
988 }
989
990 Vector<PropertyMapEntry> Structure::getPropertiesConcurrently()
991 {
992     Vector<PropertyMapEntry> result;
993
994     forEachPropertyConcurrently(
995         [&] (const PropertyMapEntry& entry) -> bool {
996             result.append(entry);
997             return true;
998         });
999     
1000     return result;
1001 }
1002
1003 PropertyOffset Structure::add(VM& vm, PropertyName propertyName, unsigned attributes)
1004 {
1005     return add<ShouldPin::No>(
1006         vm, propertyName, attributes,
1007         [this] (const GCSafeConcurrentJSLocker&, PropertyOffset, PropertyOffset newLastOffset) {
1008             setLastOffset(newLastOffset);
1009         });
1010 }
1011
1012 PropertyOffset Structure::remove(PropertyName propertyName)
1013 {
1014     return remove(propertyName, [] (const ConcurrentJSLocker&, PropertyOffset) { });
1015 }
1016
1017 void Structure::getPropertyNamesFromStructure(VM& vm, PropertyNameArray& propertyNames, EnumerationMode mode)
1018 {
1019     PropertyTable* table = ensurePropertyTableIfNotEmpty(vm);
1020     if (!table)
1021         return;
1022     
1023     bool knownUnique = propertyNames.canAddKnownUniqueForStructure();
1024     
1025     PropertyTable::iterator end = table->end();
1026     for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) {
1027         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !(iter->attributes & PropertyAttribute::DontEnum));
1028         ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !iter->key->isSymbol());
1029         if (!(iter->attributes & PropertyAttribute::DontEnum) || mode.includeDontEnumProperties()) {
1030             if (iter->key->isSymbol() && !propertyNames.includeSymbolProperties())
1031                 continue;
1032             if (knownUnique)
1033                 propertyNames.addUnchecked(iter->key);
1034             else
1035                 propertyNames.add(iter->key);
1036         }
1037     }
1038 }
1039
1040 void StructureFireDetail::dump(PrintStream& out) const
1041 {
1042     out.print("Structure transition from ", *m_structure);
1043 }
1044
1045 DeferredStructureTransitionWatchpointFire::DeferredStructureTransitionWatchpointFire(VM& vm, Structure* structure)
1046     : DeferredWatchpointFire(vm)
1047     , m_structure(structure)
1048 {
1049 }
1050
1051 DeferredStructureTransitionWatchpointFire::~DeferredStructureTransitionWatchpointFire()
1052 {
1053     fireAll();
1054 }
1055
1056 void DeferredStructureTransitionWatchpointFire::dump(PrintStream& out) const
1057 {
1058     out.print("Structure transition from ", *m_structure);
1059 }
1060
1061 void Structure::didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* deferred) const
1062 {
1063     // If the structure is being watched, and this is the kind of structure that the DFG would
1064     // like to watch, then make sure to note for all future versions of this structure that it's
1065     // unwise to watch it.
1066     if (m_transitionWatchpointSet.isBeingWatched())
1067         const_cast<Structure*>(this)->setTransitionWatchpointIsLikelyToBeFired(true);
1068
1069     if (deferred) {
1070         ASSERT(deferred->structure() == this);
1071         m_transitionWatchpointSet.fireAll(*vm(), deferred);
1072     } else
1073         m_transitionWatchpointSet.fireAll(*vm(), StructureFireDetail(this));
1074 }
1075
1076 void Structure::visitChildren(JSCell* cell, SlotVisitor& visitor)
1077 {
1078     Structure* thisObject = jsCast<Structure*>(cell);
1079     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1080
1081     Base::visitChildren(thisObject, visitor);
1082     
1083     ConcurrentJSLocker locker(thisObject->m_lock);
1084     
1085     visitor.append(thisObject->m_globalObject);
1086     if (!thisObject->isObject())
1087         thisObject->m_cachedPrototypeChain.clear();
1088     else {
1089         visitor.append(thisObject->m_prototype);
1090         visitor.append(thisObject->m_cachedPrototypeChain);
1091     }
1092     visitor.append(thisObject->m_previousOrRareData);
1093
1094     if (thisObject->isPinnedPropertyTable() || thisObject->isAddingPropertyForTransition()) {
1095         // NOTE: This can interleave in pin(), in which case it may see a null property table.
1096         // That's fine, because then the barrier will fire and we will scan this again.
1097         visitor.append(thisObject->m_propertyTableUnsafe);
1098     } else if (visitor.isBuildingHeapSnapshot())
1099         visitor.append(thisObject->m_propertyTableUnsafe);
1100     else if (thisObject->m_propertyTableUnsafe)
1101         thisObject->m_propertyTableUnsafe.clear();
1102
1103     visitor.append(thisObject->m_inferredTypeTable);
1104 }
1105
1106 bool Structure::isCheapDuringGC()
1107 {
1108     // FIXME: We could make this even safer by returning false if this structure's property table
1109     // has any large property names.
1110     // https://bugs.webkit.org/show_bug.cgi?id=157334
1111     
1112     return (!m_globalObject || Heap::isMarked(m_globalObject.get()))
1113         && (hasPolyProto() || !storedPrototypeObject() || Heap::isMarked(storedPrototypeObject()));
1114 }
1115
1116 bool Structure::markIfCheap(SlotVisitor& visitor)
1117 {
1118     if (!isCheapDuringGC())
1119         return Heap::isMarked(this);
1120     
1121     visitor.appendUnbarriered(this);
1122     return true;
1123 }
1124
1125 Ref<StructureShape> Structure::toStructureShape(JSValue value, bool& sawPolyProtoStructure)
1126 {
1127     Ref<StructureShape> baseShape = StructureShape::create();
1128     RefPtr<StructureShape> curShape = baseShape.ptr();
1129     Structure* curStructure = this;
1130     JSValue curValue = value;
1131     sawPolyProtoStructure = false;
1132     while (curStructure) {
1133         sawPolyProtoStructure |= curStructure->hasPolyProto();
1134         curStructure->forEachPropertyConcurrently(
1135             [&] (const PropertyMapEntry& entry) -> bool {
1136                 if (!PropertyName(entry.key).isPrivateName())
1137                     curShape->addProperty(*entry.key);
1138                 return true;
1139             });
1140
1141         if (JSObject* curObject = curValue.getObject())
1142             curShape->setConstructorName(JSObject::calculatedClassName(curObject));
1143         else
1144             curShape->setConstructorName(curStructure->classInfo()->className);
1145
1146         if (curStructure->isDictionary())
1147             curShape->enterDictionaryMode();
1148
1149         curShape->markAsFinal();
1150
1151         if (!curValue.isObject())
1152             break;
1153
1154         JSObject* object = asObject(curValue);
1155         JSObject* prototypeObject = object->structure()->storedPrototypeObject(object);
1156         if (!prototypeObject)
1157             break;
1158
1159         auto newShape = StructureShape::create();
1160         curShape->setProto(newShape.copyRef());
1161         curShape = WTFMove(newShape);
1162         curValue = prototypeObject;
1163         curStructure = prototypeObject->structure();
1164     }
1165     
1166     return baseShape;
1167 }
1168
1169 void Structure::dump(PrintStream& out) const
1170 {
1171     out.print(RawPointer(this), ":[", classInfo()->className, ", {");
1172     
1173     CommaPrinter comma;
1174     
1175     const_cast<Structure*>(this)->forEachPropertyConcurrently(
1176         [&] (const PropertyMapEntry& entry) -> bool {
1177             out.print(comma, entry.key, ":", static_cast<int>(entry.offset));
1178             return true;
1179         });
1180     
1181     out.print("}, ", IndexingTypeDump(indexingMode()));
1182     
1183     if (hasPolyProto())
1184         out.print(", PolyProto offset:", knownPolyProtoOffset);
1185     else if (m_prototype.get().isCell())
1186         out.print(", Proto:", RawPointer(m_prototype.get().asCell()));
1187
1188     switch (dictionaryKind()) {
1189     case NoneDictionaryKind:
1190         if (hasBeenDictionary())
1191             out.print(", Has been dictionary");
1192         break;
1193     case CachedDictionaryKind:
1194         out.print(", Dictionary");
1195         break;
1196     case UncachedDictionaryKind:
1197         out.print(", UncacheableDictionary");
1198         break;
1199     }
1200
1201     if (transitionWatchpointSetIsStillValid())
1202         out.print(", Leaf");
1203     else if (transitionWatchpointIsLikelyToBeFired())
1204         out.print(", Shady leaf");
1205     
1206     out.print("]");
1207 }
1208
1209 void Structure::dumpInContext(PrintStream& out, DumpContext* context) const
1210 {
1211     if (context)
1212         context->structures.dumpBrief(this, out);
1213     else
1214         dump(out);
1215 }
1216
1217 void Structure::dumpBrief(PrintStream& out, const CString& string) const
1218 {
1219     out.print("%", string, ":", classInfo()->className);
1220 }
1221
1222 void Structure::dumpContextHeader(PrintStream& out)
1223 {
1224     out.print("Structures:");
1225 }
1226
1227 bool ClassInfo::hasStaticSetterOrReadonlyProperties() const
1228 {
1229     for (const ClassInfo* ci = this; ci; ci = ci->parentClass) {
1230         if (const HashTable* table = ci->staticPropHashTable) {
1231             if (table->hasSetterOrReadonlyProperties)
1232                 return true;
1233         }
1234     }
1235     return false;
1236 }
1237
1238 void Structure::setCachedPropertyNameEnumerator(VM& vm, JSPropertyNameEnumerator* enumerator)
1239 {
1240     ASSERT(!isDictionary());
1241     if (!hasRareData())
1242         allocateRareData(vm);
1243     rareData()->setCachedPropertyNameEnumerator(vm, enumerator);
1244 }
1245
1246 JSPropertyNameEnumerator* Structure::cachedPropertyNameEnumerator() const
1247 {
1248     if (!hasRareData())
1249         return nullptr;
1250     return rareData()->cachedPropertyNameEnumerator();
1251 }
1252
1253 bool Structure::canCachePropertyNameEnumerator() const
1254 {
1255     if (!this->canCacheOwnKeys())
1256         return false;
1257
1258     StructureChain* structureChain = m_cachedPrototypeChain.get();
1259     ASSERT(structureChain);
1260     WriteBarrier<Structure>* structure = structureChain->head();
1261     while (true) {
1262         if (!structure->get())
1263             return true;
1264         if (!structure->get()->canCacheOwnKeys())
1265             return false;
1266         structure++;
1267     }
1268
1269     ASSERT_NOT_REACHED();
1270     return true;
1271 }
1272     
1273 bool Structure::canAccessPropertiesQuicklyForEnumeration() const
1274 {
1275     if (!isQuickPropertyAccessAllowedForEnumeration())
1276         return false;
1277     if (hasGetterSetterProperties())
1278         return false;
1279     if (isUncacheableDictionary())
1280         return false;
1281     return true;
1282 }
1283
1284 } // namespace JSC