We should support the ability to do a non-effectful getById
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "GetterSetter.h"
35 #include "Heap.h"
36 #include "JITOperations.h"
37 #include "JSCInlines.h"
38 #include "LinkBuffer.h"
39 #include "ScratchRegisterAllocator.h"
40 #include "StructureStubClearingWatchpoint.h"
41 #include "StructureStubInfo.h"
42 #include <wtf/CommaPrinter.h>
43 #include <wtf/ListDump.h>
44
45 namespace JSC {
46
47 static const bool verbose = false;
48
49 void AccessGenerationResult::dump(PrintStream& out) const
50 {
51     out.print(m_kind);
52     if (m_code)
53         out.print(":", m_code);
54 }
55
56 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
57 {
58     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
59         watchpoints, jit->codeBlock(), stubInfo, condition);
60 }
61
62 void AccessGenerationState::restoreScratch()
63 {
64     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
65 }
66
67 void AccessGenerationState::succeed()
68 {
69     restoreScratch();
70     success.append(jit->jump());
71 }
72
73 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
74 {
75     if (!m_calculatedRegistersForCallAndExceptionHandling) {
76         m_calculatedRegistersForCallAndExceptionHandling = true;
77
78         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
79         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
80         if (m_needsToRestoreRegistersIfException)
81             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
82
83         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
84         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
85     }
86 }
87
88 void AccessGenerationState::preserveLiveRegistersToStackForCall()
89 {
90     unsigned extraStackPadding = 0;
91     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
92     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
93         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
94     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
95 }
96
97 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
98 {
99     RegisterSet dontRestore;
100     if (isGetter) {
101         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
102         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
103         dontRestore.set(valueRegs);
104     }
105     restoreLiveRegistersFromStackForCall(dontRestore);
106 }
107
108 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
109 {
110     // Even if we're a getter, we don't want to ignore the result value like we normally do
111     // because the getter threw, and therefore, didn't return a value that means anything.
112     // Instead, we want to restore that register to what it was upon entering the getter
113     // inline cache. The subtlety here is if the base and the result are the same register,
114     // and the getter threw, we want OSR exit to see the original base value, not the result
115     // of the getter call.
116     RegisterSet dontRestore = liveRegistersForCall();
117     // As an optimization here, we only need to restore what is live for exception handling.
118     // We can construct the dontRestore set to accomplish this goal by having it contain only
119     // what is live for call but not live for exception handling. By ignoring things that are
120     // only live at the call but not the exception handler, we will only restore things live
121     // at the exception handler.
122     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
123     restoreLiveRegistersFromStackForCall(dontRestore);
124 }
125
126 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
127 {
128     unsigned extraStackPadding = 0;
129     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
130 }
131
132 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
133 {
134     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
135
136     if (!m_calculatedCallSiteIndex) {
137         m_calculatedCallSiteIndex = true;
138
139         if (m_needsToRestoreRegistersIfException)
140             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
141         else
142             m_callSiteIndex = originalCallSiteIndex();
143     }
144
145     return m_callSiteIndex;
146 }
147
148 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
149 {
150     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
151     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
152     RELEASE_ASSERT(exceptionHandler);
153     return *exceptionHandler;
154 }
155
156 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
157
158 AccessCase::AccessCase()
159 {
160 }
161
162 std::unique_ptr<AccessCase> AccessCase::tryGet(
163     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
164     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
165 {
166     std::unique_ptr<AccessCase> result(new AccessCase());
167
168     result->m_type = type;
169     result->m_offset = offset;
170     result->m_structure.set(vm, owner, structure);
171     result->m_conditionSet = conditionSet;
172
173     if (viaProxy || additionalSet) {
174         result->m_rareData = std::make_unique<RareData>();
175         result->m_rareData->viaProxy = viaProxy;
176         result->m_rareData->additionalSet = additionalSet;
177     }
178
179     return result;
180 }
181
182 std::unique_ptr<AccessCase> AccessCase::get(
183     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
184     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
185     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
186 {
187     std::unique_ptr<AccessCase> result(new AccessCase());
188
189     result->m_type = type;
190     result->m_offset = offset;
191     result->m_structure.set(vm, owner, structure);
192     result->m_conditionSet = conditionSet;
193
194     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
195         result->m_rareData = std::make_unique<RareData>();
196         result->m_rareData->viaProxy = viaProxy;
197         result->m_rareData->additionalSet = additionalSet;
198         result->m_rareData->customAccessor.getter = customGetter;
199         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
200     }
201
202     return result;
203 }
204
205 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
206 {
207     UNUSED_PARAM(vm);
208     UNUSED_PARAM(owner);
209     
210     if (GPRInfo::numberOfRegisters < 9)
211         return nullptr;
212     
213     std::unique_ptr<AccessCase> result(new AccessCase());
214     
215     result->m_type = MegamorphicLoad;
216     
217     return result;
218 }
219
220 std::unique_ptr<AccessCase> AccessCase::replace(
221     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
222 {
223     std::unique_ptr<AccessCase> result(new AccessCase());
224
225     result->m_type = Replace;
226     result->m_offset = offset;
227     result->m_structure.set(vm, owner, structure);
228
229     return result;
230 }
231
232 std::unique_ptr<AccessCase> AccessCase::transition(
233     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
234     const ObjectPropertyConditionSet& conditionSet)
235 {
236     RELEASE_ASSERT(oldStructure == newStructure->previousID());
237
238     // Skip optimizing the case where we need a realloc, if we don't have
239     // enough registers to make it happen.
240     if (GPRInfo::numberOfRegisters < 6
241         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
242         && oldStructure->outOfLineCapacity()) {
243         return nullptr;
244     }
245
246     // Skip optimizing the case where we need realloc, and the structure has
247     // indexing storage.
248     // FIXME: We shouldn't skip this! Implement it!
249     // https://bugs.webkit.org/show_bug.cgi?id=130914
250     if (oldStructure->couldHaveIndexingHeader())
251         return nullptr;
252
253     std::unique_ptr<AccessCase> result(new AccessCase());
254
255     result->m_type = Transition;
256     result->m_offset = offset;
257     result->m_structure.set(vm, owner, newStructure);
258     result->m_conditionSet = conditionSet;
259
260     return result;
261 }
262
263 std::unique_ptr<AccessCase> AccessCase::setter(
264     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
265     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
266     JSObject* customSlotBase)
267 {
268     std::unique_ptr<AccessCase> result(new AccessCase());
269
270     result->m_type = type;
271     result->m_offset = offset;
272     result->m_structure.set(vm, owner, structure);
273     result->m_conditionSet = conditionSet;
274     result->m_rareData = std::make_unique<RareData>();
275     result->m_rareData->customAccessor.setter = customSetter;
276     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
277
278     return result;
279 }
280
281 std::unique_ptr<AccessCase> AccessCase::in(
282     VM& vm, JSCell* owner, AccessType type, Structure* structure,
283     const ObjectPropertyConditionSet& conditionSet)
284 {
285     std::unique_ptr<AccessCase> result(new AccessCase());
286
287     result->m_type = type;
288     result->m_structure.set(vm, owner, structure);
289     result->m_conditionSet = conditionSet;
290
291     return result;
292 }
293
294 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
295 {
296     std::unique_ptr<AccessCase> result(new AccessCase());
297
298     result->m_type = type;
299
300     return result;
301 }
302
303 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
304     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
305     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
306 {
307     std::unique_ptr<AccessCase> result(new AccessCase());
308
309     result->m_type = IntrinsicGetter;
310     result->m_structure.set(vm, owner, structure);
311     result->m_conditionSet = conditionSet;
312     result->m_offset = offset;
313
314     result->m_rareData = std::make_unique<RareData>();
315     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
316
317     return result;
318 }
319
320 AccessCase::~AccessCase()
321 {
322 }
323
324 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
325     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
326 {
327     switch (stubInfo.cacheType) {
328     case CacheType::GetByIdSelf:
329         return get(
330             vm, owner, Load, stubInfo.u.byIdSelf.offset,
331             stubInfo.u.byIdSelf.baseObjectStructure.get());
332
333     case CacheType::PutByIdReplace:
334         return replace(
335             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
336
337     default:
338         return nullptr;
339     }
340 }
341
342 std::unique_ptr<AccessCase> AccessCase::clone() const
343 {
344     std::unique_ptr<AccessCase> result(new AccessCase());
345     result->m_type = m_type;
346     result->m_offset = m_offset;
347     result->m_structure = m_structure;
348     result->m_conditionSet = m_conditionSet;
349     if (RareData* rareData = m_rareData.get()) {
350         result->m_rareData = std::make_unique<RareData>();
351         result->m_rareData->viaProxy = rareData->viaProxy;
352         result->m_rareData->additionalSet = rareData->additionalSet;
353         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
354         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
355         result->m_rareData->customSlotBase = rareData->customSlotBase;
356         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
357     }
358     return result;
359 }
360
361 bool AccessCase::guardedByStructureCheck() const
362 {
363     if (viaProxy())
364         return false;
365
366     switch (m_type) {
367     case MegamorphicLoad:
368     case ArrayLength:
369     case StringLength:
370         return false;
371     default:
372         return true;
373     }
374 }
375
376 JSObject* AccessCase::alternateBase() const
377 {
378     if (customSlotBase())
379         return customSlotBase();
380     return conditionSet().slotBaseCondition().object();
381 }
382
383 bool AccessCase::couldStillSucceed() const
384 {
385     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
386 }
387
388 bool AccessCase::canBeReplacedByMegamorphicLoad() const
389 {
390     return type() == Load
391         && !viaProxy()
392         && conditionSet().isEmpty()
393         && !additionalSet()
394         && !customSlotBase();
395 }
396
397 bool AccessCase::canReplace(const AccessCase& other) const
398 {
399     // We could do a lot better here, but for now we just do something obvious.
400     
401     if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
402         return true;
403
404     if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
405         // FIXME: Implement this!
406         return false;
407     }
408
409     return structure() == other.structure();
410 }
411
412 void AccessCase::dump(PrintStream& out) const
413 {
414     out.print(m_type, ":(");
415
416     CommaPrinter comma;
417
418     if (m_type == Transition)
419         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
420     else if (m_structure)
421         out.print(comma, "structure = ", pointerDump(m_structure.get()));
422
423     if (isValidOffset(m_offset))
424         out.print(comma, "offset = ", m_offset);
425     if (!m_conditionSet.isEmpty())
426         out.print(comma, "conditions = ", m_conditionSet);
427
428     if (RareData* rareData = m_rareData.get()) {
429         if (rareData->viaProxy)
430             out.print(comma, "viaProxy = ", rareData->viaProxy);
431         if (rareData->additionalSet)
432             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
433         if (rareData->callLinkInfo)
434             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
435         if (rareData->customAccessor.opaque)
436             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
437         if (rareData->customSlotBase)
438             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
439     }
440
441     out.print(")");
442 }
443
444 bool AccessCase::visitWeak(VM& vm) const
445 {
446     if (m_structure && !Heap::isMarked(m_structure.get()))
447         return false;
448     if (!m_conditionSet.areStillLive())
449         return false;
450     if (m_rareData) {
451         if (m_rareData->callLinkInfo)
452             m_rareData->callLinkInfo->visitWeak(vm);
453         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
454             return false;
455         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
456             return false;
457     }
458     return true;
459 }
460
461 void AccessCase::generateWithGuard(
462     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
463 {
464     CCallHelpers& jit = *state.jit;
465     VM& vm = *jit.vm();
466     const Identifier& ident = *state.ident;
467     StructureStubInfo& stubInfo = *state.stubInfo;
468     JSValueRegs valueRegs = state.valueRegs;
469     GPRReg baseGPR = state.baseGPR;
470     GPRReg scratchGPR = state.scratchGPR;
471     
472     UNUSED_PARAM(vm);
473
474     switch (m_type) {
475     case ArrayLength: {
476         ASSERT(!viaProxy());
477         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
478         fallThrough.append(
479             jit.branchTest32(
480                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
481         fallThrough.append(
482             jit.branchTest32(
483                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
484         break;
485     }
486
487     case StringLength: {
488         ASSERT(!viaProxy());
489         fallThrough.append(
490             jit.branch8(
491                 CCallHelpers::NotEqual,
492                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
493                 CCallHelpers::TrustedImm32(StringType)));
494         break;
495     }
496         
497     case MegamorphicLoad: {
498         UniquedStringImpl* key = ident.impl();
499         unsigned hash = IdentifierRepHash::hash(key);
500         
501         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
502         allocator.lock(baseGPR);
503 #if USE(JSVALUE32_64)
504         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
505 #endif
506         allocator.lock(valueRegs);
507         allocator.lock(scratchGPR);
508         
509         GPRReg intermediateGPR = scratchGPR;
510         GPRReg maskGPR = allocator.allocateScratchGPR();
511         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
512         GPRReg indexGPR = allocator.allocateScratchGPR();
513         GPRReg offsetGPR = allocator.allocateScratchGPR();
514         
515         if (verbose) {
516             dataLog("baseGPR = ", baseGPR, "\n");
517             dataLog("valueRegs = ", valueRegs, "\n");
518             dataLog("scratchGPR = ", scratchGPR, "\n");
519             dataLog("intermediateGPR = ", intermediateGPR, "\n");
520             dataLog("maskGPR = ", maskGPR, "\n");
521             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
522             dataLog("indexGPR = ", indexGPR, "\n");
523             dataLog("offsetGPR = ", offsetGPR, "\n");
524         }
525
526         ScratchRegisterAllocator::PreservedState preservedState =
527             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
528
529         CCallHelpers::JumpList myFailAndIgnore;
530         CCallHelpers::JumpList myFallThrough;
531         
532         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
533         jit.loadPtr(
534             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
535             intermediateGPR);
536         
537         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
538         
539         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
540         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
541         jit.load32(
542             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
543             intermediateGPR);
544
545         jit.move(maskGPR, maskedHashGPR);
546         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
547         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
548         jit.addPtr(indexGPR, intermediateGPR);
549         
550         CCallHelpers::Label loop = jit.label();
551         
552         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
553         
554         myFallThrough.append(
555             jit.branch32(
556                 CCallHelpers::Equal,
557                 offsetGPR,
558                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
559         
560         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
561         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
562         jit.addPtr(intermediateGPR, offsetGPR);
563         
564         CCallHelpers::Jump collision =  jit.branchPtr(
565             CCallHelpers::NotEqual,
566             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
567             CCallHelpers::TrustedImmPtr(key));
568         
569         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
570         // Check them and then attempt the load.
571         
572         myFallThrough.append(
573             jit.branchTest32(
574                 CCallHelpers::NonZero,
575                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
576                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
577         
578         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
579         
580         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
581         
582         allocator.restoreReusedRegistersByPopping(jit, preservedState);
583         state.succeed();
584         
585         collision.link(&jit);
586
587         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
588         
589         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
590         // around isn't super common so we could, for example, recompute the mask from the difference between
591         // the table and index. But before we do that we should probably make it easier to multiply and
592         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
593         // to have a power-of-2 size.
594         jit.and32(maskGPR, maskedHashGPR);
595         jit.jump().linkTo(loop, &jit);
596         
597         if (allocator.didReuseRegisters()) {
598             myFailAndIgnore.link(&jit);
599             allocator.restoreReusedRegistersByPopping(jit, preservedState);
600             state.failAndIgnore.append(jit.jump());
601             
602             myFallThrough.link(&jit);
603             allocator.restoreReusedRegistersByPopping(jit, preservedState);
604             fallThrough.append(jit.jump());
605         } else {
606             state.failAndIgnore.append(myFailAndIgnore);
607             fallThrough.append(myFallThrough);
608         }
609         return;
610     }
611
612     default: {
613         if (viaProxy()) {
614             fallThrough.append(
615                 jit.branch8(
616                     CCallHelpers::NotEqual,
617                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
618                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
619
620             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
621
622             fallThrough.append(
623                 jit.branchStructure(
624                     CCallHelpers::NotEqual,
625                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
626                     structure()));
627         } else {
628             fallThrough.append(
629                 jit.branchStructure(
630                     CCallHelpers::NotEqual,
631                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
632                     structure()));
633         }
634         break;
635     } };
636
637     generate(state);
638 }
639
640 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
641 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
642 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
643 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
644 #else
645 #define EABI_32BIT_DUMMY_ARG
646 #endif
647
648 void AccessCase::generate(AccessGenerationState& state)
649 {
650     if (verbose)
651         dataLog("Generating code for: ", *this, "\n");
652     
653     CCallHelpers& jit = *state.jit;
654     VM& vm = *jit.vm();
655     CodeBlock* codeBlock = jit.codeBlock();
656     StructureStubInfo& stubInfo = *state.stubInfo;
657     const Identifier& ident = *state.ident;
658     JSValueRegs valueRegs = state.valueRegs;
659     GPRReg baseGPR = state.baseGPR;
660     GPRReg scratchGPR = state.scratchGPR;
661
662     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
663
664     if ((structure() && structure()->needImpurePropertyWatchpoint())
665         || m_conditionSet.needImpurePropertyWatchpoint())
666         vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
667
668     if (additionalSet())
669         additionalSet()->add(state.addWatchpoint());
670
671     for (const ObjectPropertyCondition& condition : m_conditionSet) {
672         Structure* structure = condition.object()->structure();
673
674         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
675             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
676             continue;
677         }
678
679         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
680             dataLog("This condition is no longer met: ", condition, "\n");
681             RELEASE_ASSERT_NOT_REACHED();
682         }
683
684         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
685         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
686         
687         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
688         state.failAndRepatch.append(
689             jit.branchStructure(
690                 CCallHelpers::NotEqual,
691                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
692                 structure));
693     }
694
695     switch (m_type) {
696     case InHit:
697     case InMiss:
698         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
699         state.succeed();
700         return;
701
702     case Miss:
703         jit.moveTrustedValue(jsUndefined(), valueRegs);
704         state.succeed();
705         return;
706
707     case Load:
708     case GetGetter:
709     case Getter:
710     case Setter:
711     case CustomValueGetter:
712     case CustomAccessorGetter:
713     case CustomValueSetter:
714     case CustomAccessorSetter: {
715         if (isValidOffset(m_offset)) {
716             Structure* currStructure;
717             if (m_conditionSet.isEmpty())
718                 currStructure = structure();
719             else
720                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
721             currStructure->startWatchingPropertyForReplacements(vm, offset());
722         }
723
724         GPRReg baseForGetGPR;
725         if (viaProxy()) {
726             baseForGetGPR = valueRegs.payloadGPR();
727             jit.loadPtr(
728                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
729                 baseForGetGPR);
730         } else
731             baseForGetGPR = baseGPR;
732
733         GPRReg baseForAccessGPR;
734         if (!m_conditionSet.isEmpty()) {
735             jit.move(
736                 CCallHelpers::TrustedImmPtr(alternateBase()),
737                 scratchGPR);
738             baseForAccessGPR = scratchGPR;
739         } else
740             baseForAccessGPR = baseForGetGPR;
741
742         GPRReg loadedValueGPR = InvalidGPRReg;
743         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
744             if (m_type == Load || m_type == GetGetter)
745                 loadedValueGPR = valueRegs.payloadGPR();
746             else
747                 loadedValueGPR = scratchGPR;
748
749             GPRReg storageGPR;
750             if (isInlineOffset(m_offset))
751                 storageGPR = baseForAccessGPR;
752             else {
753                 jit.loadPtr(
754                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
755                     loadedValueGPR);
756                 storageGPR = loadedValueGPR;
757             }
758
759 #if USE(JSVALUE64)
760             jit.load64(
761                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
762 #else
763             if (m_type == Load || m_type == GetGetter) {
764                 jit.load32(
765                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
766                     valueRegs.tagGPR());
767             }
768             jit.load32(
769                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
770                 loadedValueGPR);
771 #endif
772         }
773
774         if (m_type == Load || m_type == GetGetter) {
775             state.succeed();
776             return;
777         }
778
779         // Stuff for custom getters/setters.
780         CCallHelpers::Call operationCall;
781         CCallHelpers::Call lookupExceptionHandlerCall;
782
783         // Stuff for JS getters/setters.
784         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
785         CCallHelpers::Call fastPathCall;
786         CCallHelpers::Call slowPathCall;
787
788         CCallHelpers::Jump success;
789         CCallHelpers::Jump fail;
790
791         // This also does the necessary calculations of whether or not we're an
792         // exception handling call site.
793         state.calculateLiveRegistersForCallAndExceptionHandling();
794         state.preserveLiveRegistersToStackForCall();
795
796         // Need to make sure that whenever this call is made in the future, we remember the
797         // place that we made it from.
798         jit.store32(
799             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
800             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
801
802         if (m_type == Getter || m_type == Setter) {
803             // Create a JS call using a JS call inline cache. Assume that:
804             //
805             // - SP is aligned and represents the extent of the calling compiler's stack usage.
806             //
807             // - FP is set correctly (i.e. it points to the caller's call frame header).
808             //
809             // - SP - FP is an aligned difference.
810             //
811             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
812             //   code.
813             //
814             // Therefore, we temporarily grow the stack for the purpose of the call and then
815             // shrink it after.
816
817             RELEASE_ASSERT(!m_rareData->callLinkInfo);
818             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
819             
820             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
821             // stub, which then jumped back to the main code, then we'd have a reachability
822             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
823             // call stub stayed alive, and it would ensure that the main code stayed alive, but
824             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
825             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
826             // reference to the getter stub.
827             // https://bugs.webkit.org/show_bug.cgi?id=148914
828             m_rareData->callLinkInfo->disallowStubs();
829             
830             m_rareData->callLinkInfo->setUpCall(
831                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
832
833             CCallHelpers::JumpList done;
834
835             // There is a "this" argument.
836             unsigned numberOfParameters = 1;
837             // ... and a value argument if we're calling a setter.
838             if (m_type == Setter)
839                 numberOfParameters++;
840
841             // Get the accessor; if there ain't one then the result is jsUndefined().
842             if (m_type == Setter) {
843                 jit.loadPtr(
844                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
845                     loadedValueGPR);
846             } else {
847                 jit.loadPtr(
848                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
849                     loadedValueGPR);
850             }
851
852             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
853                 CCallHelpers::Zero, loadedValueGPR);
854
855             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
856
857             unsigned numberOfBytesForCall =
858                 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
859
860             unsigned alignedNumberOfBytesForCall =
861                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
862
863             jit.subPtr(
864                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
865                 CCallHelpers::stackPointerRegister);
866
867             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
868                 CCallHelpers::stackPointerRegister,
869                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
870
871             jit.store32(
872                 CCallHelpers::TrustedImm32(numberOfParameters),
873                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
874
875             jit.storeCell(
876                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
877
878             jit.storeCell(
879                 baseForGetGPR,
880                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
881
882             if (m_type == Setter) {
883                 jit.storeValue(
884                     valueRegs,
885                     calleeFrame.withOffset(
886                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
887             }
888
889             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
890                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
891                 CCallHelpers::TrustedImmPtr(0));
892
893             fastPathCall = jit.nearCall();
894             if (m_type == Getter)
895                 jit.setupResults(valueRegs);
896             done.append(jit.jump());
897
898             slowCase.link(&jit);
899             jit.move(loadedValueGPR, GPRInfo::regT0);
900 #if USE(JSVALUE32_64)
901             // We *always* know that the getter/setter, if non-null, is a cell.
902             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
903 #endif
904             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
905             slowPathCall = jit.nearCall();
906             if (m_type == Getter)
907                 jit.setupResults(valueRegs);
908             done.append(jit.jump());
909
910             returnUndefined.link(&jit);
911             if (m_type == Getter)
912                 jit.moveTrustedValue(jsUndefined(), valueRegs);
913
914             done.link(&jit);
915
916             jit.addPtr(CCallHelpers::TrustedImm32((jit.codeBlock()->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
917                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
918             state.restoreLiveRegistersFromStackForCall(isGetter());
919
920             state.callbacks.append(
921                 [=, &vm] (LinkBuffer& linkBuffer) {
922                     m_rareData->callLinkInfo->setCallLocations(
923                         linkBuffer.locationOfNearCall(slowPathCall),
924                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
925                         linkBuffer.locationOfNearCall(fastPathCall));
926
927                     linkBuffer.link(
928                         slowPathCall,
929                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
930                 });
931         } else {
932             // Need to make room for the C call so any of our stack spillage isn't overwritten.
933             // We also need to make room because we may be an inline cache in the FTL and not
934             // have a JIT call frame.
935             bool needsToMakeRoomOnStackForCCall = state.numberOfStackBytesUsedForRegisterPreservation() || codeBlock->jitType() == JITCode::FTLJIT;
936             if (needsToMakeRoomOnStackForCCall)
937                 jit.makeSpaceOnStackForCCall();
938
939             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
940             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
941             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
942             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
943 #if USE(JSVALUE64)
944             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
945                 jit.setupArgumentsWithExecState(
946                     baseForCustomValue,
947                     CCallHelpers::TrustedImmPtr(ident.impl()));
948             } else
949                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
950 #else
951             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
952                 jit.setupArgumentsWithExecState(
953                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
954                     CCallHelpers::TrustedImm32(JSValue::CellTag),
955                     CCallHelpers::TrustedImmPtr(ident.impl()));
956             } else {
957                 jit.setupArgumentsWithExecState(
958                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
959                     CCallHelpers::TrustedImm32(JSValue::CellTag),
960                     valueRegs.payloadGPR(), valueRegs.tagGPR());
961             }
962 #endif
963             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
964
965             operationCall = jit.call();
966             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
967                 jit.setupResults(valueRegs);
968             if (needsToMakeRoomOnStackForCCall)
969                 jit.reclaimSpaceOnStackForCCall();
970
971             CCallHelpers::Jump noException =
972                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
973
974             bool didSetLookupExceptionHandler = false;
975             state.restoreLiveRegistersFromStackForCallWithThrownException();
976             state.restoreScratch();
977             jit.copyCalleeSavesToVMCalleeSavesBuffer();
978             if (state.needsToRestoreRegistersIfException()) {
979                 // To the JIT that produces the original exception handling
980                 // call site, they will expect the OSR exit to be arrived
981                 // at from genericUnwind. Therefore we must model what genericUnwind
982                 // does here. I.e, set callFrameForCatch and copy callee saves.
983
984                 jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
985                 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
986
987                 // We don't need to insert a new exception handler in the table
988                 // because we're doing a manual exception check here. i.e, we'll
989                 // never arrive here from genericUnwind().
990                 HandlerInfo originalHandler = state.originalExceptionHandler();
991                 state.callbacks.append(
992                     [=] (LinkBuffer& linkBuffer) {
993                         linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
994                     });
995             } else {
996                 jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister);
997                 lookupExceptionHandlerCall = jit.call();
998                 didSetLookupExceptionHandler = true;
999                 jit.jumpToExceptionHandler();
1000             }
1001         
1002             noException.link(&jit);
1003             state.restoreLiveRegistersFromStackForCall(isGetter());
1004
1005             state.callbacks.append(
1006                 [=] (LinkBuffer& linkBuffer) {
1007                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1008                     if (didSetLookupExceptionHandler)
1009                         linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
1010                 });
1011         }
1012         state.succeed();
1013         return;
1014     }
1015
1016     case Replace: {
1017         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1018             if (verbose)
1019                 dataLog("Have type: ", type->descriptor(), "\n");
1020             state.failAndRepatch.append(
1021                 jit.branchIfNotType(
1022                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1023         } else if (verbose)
1024             dataLog("Don't have type.\n");
1025         
1026         if (isInlineOffset(m_offset)) {
1027             jit.storeValue(
1028                 valueRegs,
1029                 CCallHelpers::Address(
1030                     baseGPR,
1031                     JSObject::offsetOfInlineStorage() +
1032                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1033         } else {
1034             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1035             jit.storeValue(
1036                 valueRegs,
1037                 CCallHelpers::Address(
1038                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1039         }
1040         state.succeed();
1041         return;
1042     }
1043
1044     case Transition: {
1045         // AccessCase::transition() should have returned null.
1046         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1047         RELEASE_ASSERT(!structure()->couldHaveIndexingHeader());
1048
1049         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1050             if (verbose)
1051                 dataLog("Have type: ", type->descriptor(), "\n");
1052             state.failAndRepatch.append(
1053                 jit.branchIfNotType(
1054                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1055         } else if (verbose)
1056             dataLog("Don't have type.\n");
1057         
1058         CCallHelpers::JumpList slowPath;
1059
1060         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1061         allocator.lock(baseGPR);
1062 #if USE(JSVALUE32_64)
1063         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1064 #endif
1065         allocator.lock(valueRegs);
1066         allocator.lock(scratchGPR);
1067
1068         GPRReg scratchGPR2 = allocator.allocateScratchGPR();
1069         GPRReg scratchGPR3;
1070         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
1071             && structure()->outOfLineCapacity())
1072             scratchGPR3 = allocator.allocateScratchGPR();
1073         else
1074             scratchGPR3 = InvalidGPRReg;
1075
1076         ScratchRegisterAllocator::PreservedState preservedState =
1077             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1078
1079         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1080
1081         bool scratchGPRHasStorage = false;
1082         bool needsToMakeRoomOnStackForCCall = !preservedState.numberOfBytesPreserved && codeBlock->jitType() == JITCode::FTLJIT;
1083
1084         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1085             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1086             CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1087
1088             if (!structure()->outOfLineCapacity()) {
1089                 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1090                 slowPath.append(
1091                     jit.branchSubPtr(
1092                         CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1093                 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1094                 jit.negPtr(scratchGPR);
1095                 jit.addPtr(
1096                     CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1097                 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1098             } else {
1099                 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1100                 ASSERT(newSize > oldSize);
1101             
1102                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1103                 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1104                 slowPath.append(
1105                     jit.branchSubPtr(
1106                         CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1107                 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1108                 jit.negPtr(scratchGPR);
1109                 jit.addPtr(
1110                     CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1111                 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1112                 // We have scratchGPR = new storage, scratchGPR3 = old storage,
1113                 // scratchGPR2 = available
1114                 for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1115                     jit.loadPtr(
1116                         CCallHelpers::Address(
1117                             scratchGPR3,
1118                             -static_cast<ptrdiff_t>(
1119                                 offset + sizeof(JSValue) + sizeof(void*))),
1120                         scratchGPR2);
1121                     jit.storePtr(
1122                         scratchGPR2,
1123                         CCallHelpers::Address(
1124                             scratchGPR,
1125                             -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1126                 }
1127             }
1128
1129             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1130             scratchGPRHasStorage = true;
1131         }
1132
1133         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1134         jit.store32(
1135             CCallHelpers::TrustedImm32(structureBits),
1136             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1137
1138         if (isInlineOffset(m_offset)) {
1139             jit.storeValue(
1140                 valueRegs,
1141                 CCallHelpers::Address(
1142                     baseGPR,
1143                     JSObject::offsetOfInlineStorage() +
1144                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1145         } else {
1146             if (!scratchGPRHasStorage)
1147                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1148             jit.storeValue(
1149                 valueRegs,
1150                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1151         }
1152
1153         ScratchBuffer* scratchBuffer = nullptr;
1154         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity())
1155             scratchBuffer = vm.scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
1156
1157         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1158             CCallHelpers::Call callFlushWriteBarrierBuffer;
1159             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1160             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1161             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1162             CCallHelpers::Jump needToFlush =
1163                 jit.branch32(
1164                     CCallHelpers::AboveOrEqual, scratchGPR2,
1165                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity()));
1166
1167             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1168             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1169
1170             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR);
1171             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1172             jit.storePtr(
1173                 baseGPR,
1174                 CCallHelpers::BaseIndex(
1175                     scratchGPR, scratchGPR2, CCallHelpers::ScalePtr,
1176                     static_cast<int32_t>(-sizeof(void*))));
1177
1178             CCallHelpers::Jump doneWithBarrier = jit.jump();
1179             needToFlush.link(&jit);
1180
1181             // FIXME: We should restoreReusedRegistersByPopping() before this. Then, we wouldn't need
1182             // padding in preserveReusedRegistersByPushing(). Or, maybe it would be even better if the
1183             // barrier slow path was just the normal slow path, below.
1184             // https://bugs.webkit.org/show_bug.cgi?id=149030
1185             allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR2);
1186             if (needsToMakeRoomOnStackForCCall)
1187                 jit.makeSpaceOnStackForCCall();
1188             jit.setupArgumentsWithExecState(baseGPR);
1189             callFlushWriteBarrierBuffer = jit.call();
1190             if (needsToMakeRoomOnStackForCCall)
1191                 jit.reclaimSpaceOnStackForCCall();
1192             allocator.restoreUsedRegistersFromScratchBufferForCall(
1193                 jit, scratchBuffer, scratchGPR2);
1194
1195             doneWithBarrier.link(&jit);
1196             ownerIsRememberedOrInEden.link(&jit);
1197
1198             state.callbacks.append(
1199                 [=] (LinkBuffer& linkBuffer) {
1200                     linkBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer);
1201                 });
1202         }
1203         
1204         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1205         state.succeed();
1206
1207         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1208             slowPath.link(&jit);
1209             allocator.restoreReusedRegistersByPopping(jit, preservedState);
1210             allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR);
1211             if (needsToMakeRoomOnStackForCCall)
1212                 jit.makeSpaceOnStackForCCall();
1213 #if USE(JSVALUE64)
1214             jit.setupArgumentsWithExecState(
1215                 baseGPR,
1216                 CCallHelpers::TrustedImmPtr(newStructure()),
1217                 CCallHelpers::TrustedImm32(m_offset),
1218                 valueRegs.gpr());
1219 #else
1220             jit.setupArgumentsWithExecState(
1221                 baseGPR,
1222                 CCallHelpers::TrustedImmPtr(newStructure()),
1223                 CCallHelpers::TrustedImm32(m_offset),
1224                 valueRegs.payloadGPR(), valueRegs.tagGPR());
1225 #endif
1226             CCallHelpers::Call operationCall = jit.call();
1227             if (needsToMakeRoomOnStackForCCall)
1228                 jit.reclaimSpaceOnStackForCCall();
1229             allocator.restoreUsedRegistersFromScratchBufferForCall(jit, scratchBuffer, scratchGPR);
1230             state.succeed();
1231
1232             state.callbacks.append(
1233                 [=] (LinkBuffer& linkBuffer) {
1234                     linkBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
1235                 });
1236         }
1237         return;
1238     }
1239
1240     case ArrayLength: {
1241         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1242         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1243         state.failAndIgnore.append(
1244             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1245         jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1246         state.succeed();
1247         return;
1248     }
1249
1250     case StringLength: {
1251         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1252         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1253         state.succeed();
1254         return;
1255     }
1256
1257     case IntrinsicGetter: {
1258         RELEASE_ASSERT(isValidOffset(offset()));
1259
1260         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1261         // are immutable so we just need to watch the property not any value inside it.
1262         Structure* currStructure;
1263         if (m_conditionSet.isEmpty())
1264             currStructure = structure();
1265         else
1266             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1267         currStructure->startWatchingPropertyForReplacements(vm, offset());
1268
1269         emitIntrinsicGetter(state);
1270         return;
1271     }
1272     
1273     case MegamorphicLoad:
1274         // These need to be handled by generateWithGuard(), since the guard is part of the megamorphic load
1275         // algorithm. We can be sure that nobody will call generate() directly for MegamorphicLoad since
1276         // MegamorphicLoad is not guarded by a structure check.
1277         RELEASE_ASSERT_NOT_REACHED();
1278     }
1279     
1280     RELEASE_ASSERT_NOT_REACHED();
1281 }
1282
1283 PolymorphicAccess::PolymorphicAccess() { }
1284 PolymorphicAccess::~PolymorphicAccess() { }
1285
1286 AccessGenerationResult PolymorphicAccess::regenerateWithCases(
1287     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1288     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1289 {
1290     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1291     // invariants:
1292     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1293     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1294     //   can be removed via the canReplace() rule.
1295     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1296     //   cascade through the cases in reverse order, you will get the most recent cases first.
1297     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1298     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1299     //   add more things after failure.
1300     
1301     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1302     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1303     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1304         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1305
1306         // Add it only if it is not replaced by the subsequent cases in the list.
1307         bool found = false;
1308         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1309             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1310                 found = true;
1311                 break;
1312             }
1313         }
1314
1315         if (found)
1316             continue;
1317         
1318         casesToAdd.append(WTFMove(myCase));
1319     }
1320
1321     if (verbose)
1322         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1323
1324     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1325     // new stub that will be identical to the old one. Returning null should tell the caller to just
1326     // keep doing what they were doing before.
1327     if (casesToAdd.isEmpty())
1328         return AccessGenerationResult::MadeNoChanges;
1329
1330     // Now construct the list of cases as they should appear if we are successful. This means putting
1331     // all of the previous cases in this list in order but excluding those that can be replaced, and
1332     // then adding the new cases.
1333     ListType newCases;
1334     for (auto& oldCase : m_list) {
1335         // Ignore old cases that cannot possibly succeed anymore.
1336         if (!oldCase->couldStillSucceed())
1337             continue;
1338
1339         // Figure out if this is replaced by any new cases.
1340         bool found = false;
1341         for (auto& caseToAdd : casesToAdd) {
1342             if (caseToAdd->canReplace(*oldCase)) {
1343                 found = true;
1344                 break;
1345             }
1346         }
1347         if (found)
1348             continue;
1349         
1350         newCases.append(oldCase->clone());
1351     }
1352     for (auto& caseToAdd : casesToAdd)
1353         newCases.append(WTFMove(caseToAdd));
1354
1355     if (verbose)
1356         dataLog("newCases: ", listDump(newCases), "\n");
1357     
1358     // See if we are close to having too many cases and if some of those cases can be subsumed by a
1359     // megamorphic load.
1360     if (newCases.size() >= Options::maxAccessVariantListSize()) {
1361         unsigned numSelfLoads = 0;
1362         for (auto& newCase : newCases) {
1363             if (newCase->canBeReplacedByMegamorphicLoad())
1364                 numSelfLoads++;
1365         }
1366         
1367         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1368             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1369                 newCases.removeAllMatching(
1370                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1371                         return newCase->canBeReplacedByMegamorphicLoad();
1372                     });
1373                 
1374                 newCases.append(WTFMove(mega));
1375             }
1376         }
1377     }
1378
1379     if (newCases.size() > Options::maxAccessVariantListSize()) {
1380         if (verbose)
1381             dataLog("Too many cases.\n");
1382         return AccessGenerationResult::GaveUp;
1383     }
1384
1385     MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
1386     if (!result)
1387         return AccessGenerationResult::GaveUp;
1388
1389     m_list = WTFMove(newCases);
1390     return result;
1391 }
1392
1393 AccessGenerationResult PolymorphicAccess::regenerateWithCase(
1394     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1395     std::unique_ptr<AccessCase> newAccess)
1396 {
1397     Vector<std::unique_ptr<AccessCase>> newAccesses;
1398     newAccesses.append(WTFMove(newAccess));
1399     return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1400 }
1401
1402 bool PolymorphicAccess::visitWeak(VM& vm) const
1403 {
1404     for (unsigned i = 0; i < size(); ++i) {
1405         if (!at(i).visitWeak(vm))
1406             return false;
1407     }
1408     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1409         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1410             if (!Heap::isMarked(weakReference.get()))
1411                 return false;
1412         }
1413     }
1414     return true;
1415 }
1416
1417 void PolymorphicAccess::dump(PrintStream& out) const
1418 {
1419     out.print(RawPointer(this), ":[");
1420     CommaPrinter comma;
1421     for (auto& entry : m_list)
1422         out.print(comma, *entry);
1423     out.print("]");
1424 }
1425
1426 MacroAssemblerCodePtr PolymorphicAccess::regenerate(
1427     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1428     PolymorphicAccess::ListType& cases)
1429 {
1430     if (verbose)
1431         dataLog("Generating code for cases: ", listDump(cases), "\n");
1432     
1433     AccessGenerationState state;
1434
1435     state.access = this;
1436     state.stubInfo = &stubInfo;
1437     state.ident = &ident;
1438     
1439     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1440     state.valueRegs = JSValueRegs(
1441 #if USE(JSVALUE32_64)
1442         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1443 #endif
1444         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1445
1446     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1447     state.allocator = &allocator;
1448     allocator.lock(state.baseGPR);
1449     allocator.lock(state.valueRegs);
1450 #if USE(JSVALUE32_64)
1451     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1452 #endif
1453
1454     state.scratchGPR = allocator.allocateScratchGPR();
1455     
1456     CCallHelpers jit(&vm, codeBlock);
1457     state.jit = &jit;
1458
1459     state.preservedReusedRegisterState =
1460         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1461
1462     bool allGuardedByStructureCheck = true;
1463     bool hasJSGetterSetterCall = false;
1464     for (auto& entry : cases) {
1465         allGuardedByStructureCheck &= entry->guardedByStructureCheck();
1466         if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
1467             hasJSGetterSetterCall = true;
1468     }
1469
1470     if (cases.isEmpty()) {
1471         // This is super unlikely, but we make it legal anyway.
1472         state.failAndRepatch.append(jit.jump());
1473     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1474         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1475         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1476         // one case.
1477         CCallHelpers::JumpList fallThrough;
1478
1479         // Cascade through the list, preferring newer entries.
1480         for (unsigned i = cases.size(); i--;) {
1481             fallThrough.link(&jit);
1482             cases[i]->generateWithGuard(state, fallThrough);
1483         }
1484         state.failAndRepatch.append(fallThrough);
1485     } else {
1486         jit.load32(
1487             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1488             state.scratchGPR);
1489         
1490         Vector<int64_t> caseValues(cases.size());
1491         for (unsigned i = 0; i < cases.size(); ++i)
1492             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1493         
1494         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1495         while (binarySwitch.advance(jit))
1496             cases[binarySwitch.caseIndex()]->generate(state);
1497         state.failAndRepatch.append(binarySwitch.fallThrough());
1498     }
1499
1500     if (!state.failAndIgnore.empty()) {
1501         state.failAndIgnore.link(&jit);
1502         
1503         // Make sure that the inline cache optimization code knows that we are taking slow path because
1504         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1505         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1506         // that the slow path does not try to patch.
1507         jit.load8(&stubInfo.countdown, state.scratchGPR);
1508         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1509         jit.store8(state.scratchGPR, &stubInfo.countdown);
1510     }
1511
1512     CCallHelpers::JumpList failure;
1513     if (allocator.didReuseRegisters()) {
1514         state.failAndRepatch.link(&jit);
1515         state.restoreScratch();
1516     } else
1517         failure = state.failAndRepatch;
1518     failure.append(jit.jump());
1519
1520     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1521     CallSiteIndex callSiteIndexForExceptionHandling;
1522     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1523         // Emit the exception handler.
1524         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1525         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1526         // their own exception handling logic that doesn't go through genericUnwind.
1527         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1528
1529         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1530         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1531         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1532
1533         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1534         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1535
1536         state.restoreLiveRegistersFromStackForCallWithThrownException();
1537         state.restoreScratch();
1538         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1539
1540         HandlerInfo oldHandler = state.originalExceptionHandler();
1541         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1542         state.callbacks.append(
1543             [=] (LinkBuffer& linkBuffer) {
1544                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1545
1546                 HandlerInfo handlerToRegister = oldHandler;
1547                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1548                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1549                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1550                 codeBlock->appendExceptionHandler(handlerToRegister);
1551             });
1552
1553         // We set these to indicate to the stub to remove itself from the CodeBlock's
1554         // exception handler table when it is deallocated.
1555         codeBlockThatOwnsExceptionHandlers = codeBlock;
1556         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1557         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1558     }
1559
1560     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1561     if (linkBuffer.didFailToAllocate()) {
1562         if (verbose)
1563             dataLog("Did fail to allocate.\n");
1564         return MacroAssemblerCodePtr();
1565     }
1566
1567     CodeLocationLabel successLabel =
1568         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1569         
1570     linkBuffer.link(state.success, successLabel);
1571
1572     linkBuffer.link(
1573         failure,
1574         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1575     
1576     for (auto callback : state.callbacks)
1577         callback(linkBuffer);
1578
1579     if (verbose)
1580         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1581
1582     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1583         codeBlock, linkBuffer,
1584         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1585
1586     bool doesCalls = false;
1587     for (auto& entry : cases)
1588         doesCalls |= entry->doesCalls();
1589     
1590     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1591     m_watchpoints = WTFMove(state.watchpoints);
1592     if (!state.weakReferences.isEmpty())
1593         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1594     if (verbose)
1595         dataLog("Returning: ", code.code(), "\n");
1596     return code.code();
1597 }
1598
1599 void PolymorphicAccess::aboutToDie()
1600 {
1601     m_stubRoutine->aboutToDie();
1602 }
1603
1604 } // namespace JSC
1605
1606 namespace WTF {
1607
1608 using namespace JSC;
1609
1610 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1611 {
1612     switch (kind) {
1613     case AccessGenerationResult::MadeNoChanges:
1614         out.print("MadeNoChanges");
1615         return;
1616     case AccessGenerationResult::GaveUp:
1617         out.print("GaveUp");
1618         return;
1619     case AccessGenerationResult::GeneratedNewCode:
1620         out.print("GeneratedNewCode");
1621         return;
1622     }
1623     
1624     RELEASE_ASSERT_NOT_REACHED();
1625 }
1626
1627 void printInternal(PrintStream& out, AccessCase::AccessType type)
1628 {
1629     switch (type) {
1630     case AccessCase::Load:
1631         out.print("Load");
1632         return;
1633     case AccessCase::MegamorphicLoad:
1634         out.print("MegamorphicLoad");
1635         return;
1636     case AccessCase::Transition:
1637         out.print("Transition");
1638         return;
1639     case AccessCase::Replace:
1640         out.print("Replace");
1641         return;
1642     case AccessCase::Miss:
1643         out.print("Miss");
1644         return;
1645     case AccessCase::GetGetter:
1646         out.print("GetGetter");
1647         return;
1648     case AccessCase::Getter:
1649         out.print("Getter");
1650         return;
1651     case AccessCase::Setter:
1652         out.print("Setter");
1653         return;
1654     case AccessCase::CustomValueGetter:
1655         out.print("CustomValueGetter");
1656         return;
1657     case AccessCase::CustomAccessorGetter:
1658         out.print("CustomAccessorGetter");
1659         return;
1660     case AccessCase::CustomValueSetter:
1661         out.print("CustomValueSetter");
1662         return;
1663     case AccessCase::CustomAccessorSetter:
1664         out.print("CustomAccessorSetter");
1665         return;
1666     case AccessCase::IntrinsicGetter:
1667         out.print("IntrinsicGetter");
1668         return;
1669     case AccessCase::InHit:
1670         out.print("InHit");
1671         return;
1672     case AccessCase::InMiss:
1673         out.print("InMiss");
1674         return;
1675     case AccessCase::ArrayLength:
1676         out.print("ArrayLength");
1677         return;
1678     case AccessCase::StringLength:
1679         out.print("StringLength");
1680         return;
1681     }
1682
1683     RELEASE_ASSERT_NOT_REACHED();
1684 }
1685
1686 } // namespace WTF
1687
1688 #endif // ENABLE(JIT)
1689
1690