33982e50d93af325970acaad19497eaa384b4aae
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "GetterSetter.h"
35 #include "Heap.h"
36 #include "JITOperations.h"
37 #include "JSCInlines.h"
38 #include "LinkBuffer.h"
39 #include "ScratchRegisterAllocator.h"
40 #include "StructureStubClearingWatchpoint.h"
41 #include "StructureStubInfo.h"
42 #include <wtf/CommaPrinter.h>
43 #include <wtf/ListDump.h>
44
45 namespace JSC {
46
47 static const bool verbose = false;
48
49 void AccessGenerationResult::dump(PrintStream& out) const
50 {
51     out.print(m_kind);
52     if (m_code)
53         out.print(":", m_code);
54 }
55
56 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
57 {
58     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
59         watchpoints, jit->codeBlock(), stubInfo, condition);
60 }
61
62 void AccessGenerationState::restoreScratch()
63 {
64     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
65 }
66
67 void AccessGenerationState::succeed()
68 {
69     restoreScratch();
70     success.append(jit->jump());
71 }
72
73 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
74 {
75     if (!m_calculatedRegistersForCallAndExceptionHandling) {
76         m_calculatedRegistersForCallAndExceptionHandling = true;
77
78         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
79         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
80         if (m_needsToRestoreRegistersIfException)
81             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
82
83         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
84         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
85     }
86 }
87
88 void AccessGenerationState::preserveLiveRegistersToStackForCall()
89 {
90     unsigned extraStackPadding = 0;
91     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
92     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
93         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
94     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
95 }
96
97 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
98 {
99     RegisterSet dontRestore;
100     if (isGetter) {
101         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
102         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
103         dontRestore.set(valueRegs);
104     }
105     restoreLiveRegistersFromStackForCall(dontRestore);
106 }
107
108 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
109 {
110     // Even if we're a getter, we don't want to ignore the result value like we normally do
111     // because the getter threw, and therefore, didn't return a value that means anything.
112     // Instead, we want to restore that register to what it was upon entering the getter
113     // inline cache. The subtlety here is if the base and the result are the same register,
114     // and the getter threw, we want OSR exit to see the original base value, not the result
115     // of the getter call.
116     RegisterSet dontRestore = liveRegistersForCall();
117     // As an optimization here, we only need to restore what is live for exception handling.
118     // We can construct the dontRestore set to accomplish this goal by having it contain only
119     // what is live for call but not live for exception handling. By ignoring things that are
120     // only live at the call but not the exception handler, we will only restore things live
121     // at the exception handler.
122     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
123     restoreLiveRegistersFromStackForCall(dontRestore);
124 }
125
126 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
127 {
128     unsigned extraStackPadding = 0;
129     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
130 }
131
132 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
133 {
134     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
135
136     if (!m_calculatedCallSiteIndex) {
137         m_calculatedCallSiteIndex = true;
138
139         if (m_needsToRestoreRegistersIfException)
140             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
141         else
142             m_callSiteIndex = originalCallSiteIndex();
143     }
144
145     return m_callSiteIndex;
146 }
147
148 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
149 {
150     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
151     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
152     RELEASE_ASSERT(exceptionHandler);
153     return *exceptionHandler;
154 }
155
156 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
157
158 AccessCase::AccessCase()
159 {
160 }
161
162 std::unique_ptr<AccessCase> AccessCase::get(
163     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
164     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
165     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
166 {
167     std::unique_ptr<AccessCase> result(new AccessCase());
168
169     result->m_type = type;
170     result->m_offset = offset;
171     result->m_structure.set(vm, owner, structure);
172     result->m_conditionSet = conditionSet;
173
174     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
175         result->m_rareData = std::make_unique<RareData>();
176         result->m_rareData->viaProxy = viaProxy;
177         result->m_rareData->additionalSet = additionalSet;
178         result->m_rareData->customAccessor.getter = customGetter;
179         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
180     }
181
182     return result;
183 }
184
185 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
186 {
187     UNUSED_PARAM(vm);
188     UNUSED_PARAM(owner);
189     
190     if (GPRInfo::numberOfRegisters < 9)
191         return nullptr;
192     
193     std::unique_ptr<AccessCase> result(new AccessCase());
194     
195     result->m_type = MegamorphicLoad;
196     
197     return result;
198 }
199
200 std::unique_ptr<AccessCase> AccessCase::replace(
201     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
202 {
203     std::unique_ptr<AccessCase> result(new AccessCase());
204
205     result->m_type = Replace;
206     result->m_offset = offset;
207     result->m_structure.set(vm, owner, structure);
208
209     return result;
210 }
211
212 std::unique_ptr<AccessCase> AccessCase::transition(
213     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
214     const ObjectPropertyConditionSet& conditionSet)
215 {
216     RELEASE_ASSERT(oldStructure == newStructure->previousID());
217
218     // Skip optimizing the case where we need a realloc, if we don't have
219     // enough registers to make it happen.
220     if (GPRInfo::numberOfRegisters < 6
221         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
222         && oldStructure->outOfLineCapacity()) {
223         return nullptr;
224     }
225
226     // Skip optimizing the case where we need realloc, and the structure has
227     // indexing storage.
228     // FIXME: We shouldn't skip this! Implement it!
229     // https://bugs.webkit.org/show_bug.cgi?id=130914
230     if (oldStructure->couldHaveIndexingHeader())
231         return nullptr;
232
233     std::unique_ptr<AccessCase> result(new AccessCase());
234
235     result->m_type = Transition;
236     result->m_offset = offset;
237     result->m_structure.set(vm, owner, newStructure);
238     result->m_conditionSet = conditionSet;
239
240     return result;
241 }
242
243 std::unique_ptr<AccessCase> AccessCase::setter(
244     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
245     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
246     JSObject* customSlotBase)
247 {
248     std::unique_ptr<AccessCase> result(new AccessCase());
249
250     result->m_type = type;
251     result->m_offset = offset;
252     result->m_structure.set(vm, owner, structure);
253     result->m_conditionSet = conditionSet;
254     result->m_rareData = std::make_unique<RareData>();
255     result->m_rareData->customAccessor.setter = customSetter;
256     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
257
258     return result;
259 }
260
261 std::unique_ptr<AccessCase> AccessCase::in(
262     VM& vm, JSCell* owner, AccessType type, Structure* structure,
263     const ObjectPropertyConditionSet& conditionSet)
264 {
265     std::unique_ptr<AccessCase> result(new AccessCase());
266
267     result->m_type = type;
268     result->m_structure.set(vm, owner, structure);
269     result->m_conditionSet = conditionSet;
270
271     return result;
272 }
273
274 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
275 {
276     std::unique_ptr<AccessCase> result(new AccessCase());
277
278     result->m_type = type;
279
280     return result;
281 }
282
283 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
284     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
285     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
286 {
287     std::unique_ptr<AccessCase> result(new AccessCase());
288
289     result->m_type = IntrinsicGetter;
290     result->m_structure.set(vm, owner, structure);
291     result->m_conditionSet = conditionSet;
292     result->m_offset = offset;
293
294     result->m_rareData = std::make_unique<RareData>();
295     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
296
297     return result;
298 }
299
300 AccessCase::~AccessCase()
301 {
302 }
303
304 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
305     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
306 {
307     switch (stubInfo.cacheType) {
308     case CacheType::GetByIdSelf:
309         return get(
310             vm, owner, Load, stubInfo.u.byIdSelf.offset,
311             stubInfo.u.byIdSelf.baseObjectStructure.get());
312
313     case CacheType::PutByIdReplace:
314         return replace(
315             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
316
317     default:
318         return nullptr;
319     }
320 }
321
322 std::unique_ptr<AccessCase> AccessCase::clone() const
323 {
324     std::unique_ptr<AccessCase> result(new AccessCase());
325     result->m_type = m_type;
326     result->m_offset = m_offset;
327     result->m_structure = m_structure;
328     result->m_conditionSet = m_conditionSet;
329     if (RareData* rareData = m_rareData.get()) {
330         result->m_rareData = std::make_unique<RareData>();
331         result->m_rareData->viaProxy = rareData->viaProxy;
332         result->m_rareData->additionalSet = rareData->additionalSet;
333         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
334         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
335         result->m_rareData->customSlotBase = rareData->customSlotBase;
336         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
337     }
338     return result;
339 }
340
341 bool AccessCase::guardedByStructureCheck() const
342 {
343     if (viaProxy())
344         return false;
345
346     switch (m_type) {
347     case MegamorphicLoad:
348     case ArrayLength:
349     case StringLength:
350         return false;
351     default:
352         return true;
353     }
354 }
355
356 JSObject* AccessCase::alternateBase() const
357 {
358     if (customSlotBase())
359         return customSlotBase();
360     return conditionSet().slotBaseCondition().object();
361 }
362
363 bool AccessCase::couldStillSucceed() const
364 {
365     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
366 }
367
368 bool AccessCase::canBeReplacedByMegamorphicLoad() const
369 {
370     return type() == Load
371         && !viaProxy()
372         && conditionSet().isEmpty()
373         && !additionalSet()
374         && !customSlotBase();
375 }
376
377 bool AccessCase::canReplace(const AccessCase& other) const
378 {
379     // We could do a lot better here, but for now we just do something obvious.
380     
381     if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
382         return true;
383
384     if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
385         // FIXME: Implement this!
386         return false;
387     }
388
389     return structure() == other.structure();
390 }
391
392 void AccessCase::dump(PrintStream& out) const
393 {
394     out.print(m_type, ":(");
395
396     CommaPrinter comma;
397
398     if (m_type == Transition)
399         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
400     else if (m_structure)
401         out.print(comma, "structure = ", pointerDump(m_structure.get()));
402
403     if (isValidOffset(m_offset))
404         out.print(comma, "offset = ", m_offset);
405     if (!m_conditionSet.isEmpty())
406         out.print(comma, "conditions = ", m_conditionSet);
407
408     if (RareData* rareData = m_rareData.get()) {
409         if (rareData->viaProxy)
410             out.print(comma, "viaProxy = ", rareData->viaProxy);
411         if (rareData->additionalSet)
412             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
413         if (rareData->callLinkInfo)
414             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
415         if (rareData->customAccessor.opaque)
416             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
417         if (rareData->customSlotBase)
418             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
419     }
420
421     out.print(")");
422 }
423
424 bool AccessCase::visitWeak(VM& vm) const
425 {
426     if (m_structure && !Heap::isMarked(m_structure.get()))
427         return false;
428     if (!m_conditionSet.areStillLive())
429         return false;
430     if (m_rareData) {
431         if (m_rareData->callLinkInfo)
432             m_rareData->callLinkInfo->visitWeak(vm);
433         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
434             return false;
435         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
436             return false;
437     }
438     return true;
439 }
440
441 void AccessCase::generateWithGuard(
442     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
443 {
444     CCallHelpers& jit = *state.jit;
445     VM& vm = *jit.vm();
446     const Identifier& ident = *state.ident;
447     StructureStubInfo& stubInfo = *state.stubInfo;
448     JSValueRegs valueRegs = state.valueRegs;
449     GPRReg baseGPR = state.baseGPR;
450     GPRReg scratchGPR = state.scratchGPR;
451     
452     UNUSED_PARAM(vm);
453
454     switch (m_type) {
455     case ArrayLength: {
456         ASSERT(!viaProxy());
457         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
458         fallThrough.append(
459             jit.branchTest32(
460                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
461         fallThrough.append(
462             jit.branchTest32(
463                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
464         break;
465     }
466
467     case StringLength: {
468         ASSERT(!viaProxy());
469         fallThrough.append(
470             jit.branch8(
471                 CCallHelpers::NotEqual,
472                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
473                 CCallHelpers::TrustedImm32(StringType)));
474         break;
475     }
476         
477     case MegamorphicLoad: {
478         UniquedStringImpl* key = ident.impl();
479         unsigned hash = IdentifierRepHash::hash(key);
480         
481         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
482         allocator.lock(baseGPR);
483 #if USE(JSVALUE32_64)
484         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
485 #endif
486         allocator.lock(valueRegs);
487         allocator.lock(scratchGPR);
488         
489         GPRReg intermediateGPR = scratchGPR;
490         GPRReg maskGPR = allocator.allocateScratchGPR();
491         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
492         GPRReg indexGPR = allocator.allocateScratchGPR();
493         GPRReg offsetGPR = allocator.allocateScratchGPR();
494         
495         if (verbose) {
496             dataLog("baseGPR = ", baseGPR, "\n");
497             dataLog("valueRegs = ", valueRegs, "\n");
498             dataLog("scratchGPR = ", scratchGPR, "\n");
499             dataLog("intermediateGPR = ", intermediateGPR, "\n");
500             dataLog("maskGPR = ", maskGPR, "\n");
501             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
502             dataLog("indexGPR = ", indexGPR, "\n");
503             dataLog("offsetGPR = ", offsetGPR, "\n");
504         }
505
506         ScratchRegisterAllocator::PreservedState preservedState =
507             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
508
509         CCallHelpers::JumpList myFailAndIgnore;
510         CCallHelpers::JumpList myFallThrough;
511         
512         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
513         jit.loadPtr(
514             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
515             intermediateGPR);
516         
517         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
518         
519         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
520         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
521         jit.load32(
522             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
523             intermediateGPR);
524
525         jit.move(maskGPR, maskedHashGPR);
526         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
527         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
528         jit.addPtr(indexGPR, intermediateGPR);
529         
530         CCallHelpers::Label loop = jit.label();
531         
532         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
533         
534         myFallThrough.append(
535             jit.branch32(
536                 CCallHelpers::Equal,
537                 offsetGPR,
538                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
539         
540         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
541         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
542         jit.addPtr(intermediateGPR, offsetGPR);
543         
544         CCallHelpers::Jump collision =  jit.branchPtr(
545             CCallHelpers::NotEqual,
546             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
547             CCallHelpers::TrustedImmPtr(key));
548         
549         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
550         // Check them and then attempt the load.
551         
552         myFallThrough.append(
553             jit.branchTest32(
554                 CCallHelpers::NonZero,
555                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
556                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
557         
558         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
559         
560         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
561         
562         allocator.restoreReusedRegistersByPopping(jit, preservedState);
563         state.succeed();
564         
565         collision.link(&jit);
566
567         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
568         
569         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
570         // around isn't super common so we could, for example, recompute the mask from the difference between
571         // the table and index. But before we do that we should probably make it easier to multiply and
572         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
573         // to have a power-of-2 size.
574         jit.and32(maskGPR, maskedHashGPR);
575         jit.jump().linkTo(loop, &jit);
576         
577         if (allocator.didReuseRegisters()) {
578             myFailAndIgnore.link(&jit);
579             allocator.restoreReusedRegistersByPopping(jit, preservedState);
580             state.failAndIgnore.append(jit.jump());
581             
582             myFallThrough.link(&jit);
583             allocator.restoreReusedRegistersByPopping(jit, preservedState);
584             fallThrough.append(jit.jump());
585         } else {
586             state.failAndIgnore.append(myFailAndIgnore);
587             fallThrough.append(myFallThrough);
588         }
589         return;
590     }
591
592     default: {
593         if (viaProxy()) {
594             fallThrough.append(
595                 jit.branch8(
596                     CCallHelpers::NotEqual,
597                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
598                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
599
600             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
601
602             fallThrough.append(
603                 jit.branchStructure(
604                     CCallHelpers::NotEqual,
605                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
606                     structure()));
607         } else {
608             fallThrough.append(
609                 jit.branchStructure(
610                     CCallHelpers::NotEqual,
611                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
612                     structure()));
613         }
614         break;
615     } };
616
617     generate(state);
618 }
619
620 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
621 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
622 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
623 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
624 #else
625 #define EABI_32BIT_DUMMY_ARG
626 #endif
627
628 void AccessCase::generate(AccessGenerationState& state)
629 {
630     if (verbose)
631         dataLog("Generating code for: ", *this, "\n");
632     
633     CCallHelpers& jit = *state.jit;
634     VM& vm = *jit.vm();
635     CodeBlock* codeBlock = jit.codeBlock();
636     StructureStubInfo& stubInfo = *state.stubInfo;
637     const Identifier& ident = *state.ident;
638     JSValueRegs valueRegs = state.valueRegs;
639     GPRReg baseGPR = state.baseGPR;
640     GPRReg scratchGPR = state.scratchGPR;
641
642     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
643
644     if ((structure() && structure()->needImpurePropertyWatchpoint())
645         || m_conditionSet.needImpurePropertyWatchpoint())
646         vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
647
648     if (additionalSet())
649         additionalSet()->add(state.addWatchpoint());
650
651     for (const ObjectPropertyCondition& condition : m_conditionSet) {
652         Structure* structure = condition.object()->structure();
653
654         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
655             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
656             continue;
657         }
658
659         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
660             dataLog("This condition is no longer met: ", condition, "\n");
661             RELEASE_ASSERT_NOT_REACHED();
662         }
663
664         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
665         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
666         
667         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
668         state.failAndRepatch.append(
669             jit.branchStructure(
670                 CCallHelpers::NotEqual,
671                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
672                 structure));
673     }
674
675     switch (m_type) {
676     case InHit:
677     case InMiss:
678         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
679         state.succeed();
680         return;
681
682     case Miss:
683         jit.moveTrustedValue(jsUndefined(), valueRegs);
684         state.succeed();
685         return;
686
687     case Load:
688     case Getter:
689     case Setter:
690     case CustomValueGetter:
691     case CustomAccessorGetter:
692     case CustomValueSetter:
693     case CustomAccessorSetter: {
694         if (isValidOffset(m_offset)) {
695             Structure* currStructure;
696             if (m_conditionSet.isEmpty())
697                 currStructure = structure();
698             else
699                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
700             currStructure->startWatchingPropertyForReplacements(vm, offset());
701         }
702
703         GPRReg baseForGetGPR;
704         if (viaProxy()) {
705             baseForGetGPR = valueRegs.payloadGPR();
706             jit.loadPtr(
707                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
708                 baseForGetGPR);
709         } else
710             baseForGetGPR = baseGPR;
711
712         GPRReg baseForAccessGPR;
713         if (!m_conditionSet.isEmpty()) {
714             jit.move(
715                 CCallHelpers::TrustedImmPtr(alternateBase()),
716                 scratchGPR);
717             baseForAccessGPR = scratchGPR;
718         } else
719             baseForAccessGPR = baseForGetGPR;
720
721         GPRReg loadedValueGPR = InvalidGPRReg;
722         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
723             if (m_type == Load)
724                 loadedValueGPR = valueRegs.payloadGPR();
725             else
726                 loadedValueGPR = scratchGPR;
727
728             GPRReg storageGPR;
729             if (isInlineOffset(m_offset))
730                 storageGPR = baseForAccessGPR;
731             else {
732                 jit.loadPtr(
733                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
734                     loadedValueGPR);
735                 storageGPR = loadedValueGPR;
736             }
737
738 #if USE(JSVALUE64)
739             jit.load64(
740                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
741 #else
742             if (m_type == Load) {
743                 jit.load32(
744                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
745                     valueRegs.tagGPR());
746             }
747             jit.load32(
748                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
749                 loadedValueGPR);
750 #endif
751         }
752
753         if (m_type == Load) {
754             state.succeed();
755             return;
756         }
757
758         // Stuff for custom getters/setters.
759         CCallHelpers::Call operationCall;
760         CCallHelpers::Call lookupExceptionHandlerCall;
761
762         // Stuff for JS getters/setters.
763         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
764         CCallHelpers::Call fastPathCall;
765         CCallHelpers::Call slowPathCall;
766
767         CCallHelpers::Jump success;
768         CCallHelpers::Jump fail;
769
770         // This also does the necessary calculations of whether or not we're an
771         // exception handling call site.
772         state.calculateLiveRegistersForCallAndExceptionHandling();
773         state.preserveLiveRegistersToStackForCall();
774
775         // Need to make sure that whenever this call is made in the future, we remember the
776         // place that we made it from.
777         jit.store32(
778             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
779             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
780
781         if (m_type == Getter || m_type == Setter) {
782             // Create a JS call using a JS call inline cache. Assume that:
783             //
784             // - SP is aligned and represents the extent of the calling compiler's stack usage.
785             //
786             // - FP is set correctly (i.e. it points to the caller's call frame header).
787             //
788             // - SP - FP is an aligned difference.
789             //
790             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
791             //   code.
792             //
793             // Therefore, we temporarily grow the stack for the purpose of the call and then
794             // shrink it after.
795
796             RELEASE_ASSERT(!m_rareData->callLinkInfo);
797             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
798             
799             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
800             // stub, which then jumped back to the main code, then we'd have a reachability
801             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
802             // call stub stayed alive, and it would ensure that the main code stayed alive, but
803             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
804             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
805             // reference to the getter stub.
806             // https://bugs.webkit.org/show_bug.cgi?id=148914
807             m_rareData->callLinkInfo->disallowStubs();
808             
809             m_rareData->callLinkInfo->setUpCall(
810                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
811
812             CCallHelpers::JumpList done;
813
814             // There is a "this" argument.
815             unsigned numberOfParameters = 1;
816             // ... and a value argument if we're calling a setter.
817             if (m_type == Setter)
818                 numberOfParameters++;
819
820             // Get the accessor; if there ain't one then the result is jsUndefined().
821             if (m_type == Setter) {
822                 jit.loadPtr(
823                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
824                     loadedValueGPR);
825             } else {
826                 jit.loadPtr(
827                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
828                     loadedValueGPR);
829             }
830
831             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
832                 CCallHelpers::Zero, loadedValueGPR);
833
834             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
835
836             unsigned numberOfBytesForCall =
837                 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
838
839             unsigned alignedNumberOfBytesForCall =
840                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
841
842             jit.subPtr(
843                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
844                 CCallHelpers::stackPointerRegister);
845
846             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
847                 CCallHelpers::stackPointerRegister,
848                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
849
850             jit.store32(
851                 CCallHelpers::TrustedImm32(numberOfParameters),
852                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
853
854             jit.storeCell(
855                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
856
857             jit.storeCell(
858                 baseForGetGPR,
859                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
860
861             if (m_type == Setter) {
862                 jit.storeValue(
863                     valueRegs,
864                     calleeFrame.withOffset(
865                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
866             }
867
868             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
869                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
870                 CCallHelpers::TrustedImmPtr(0));
871
872             fastPathCall = jit.nearCall();
873             if (m_type == Getter)
874                 jit.setupResults(valueRegs);
875             done.append(jit.jump());
876
877             slowCase.link(&jit);
878             jit.move(loadedValueGPR, GPRInfo::regT0);
879 #if USE(JSVALUE32_64)
880             // We *always* know that the getter/setter, if non-null, is a cell.
881             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
882 #endif
883             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
884             slowPathCall = jit.nearCall();
885             if (m_type == Getter)
886                 jit.setupResults(valueRegs);
887             done.append(jit.jump());
888
889             returnUndefined.link(&jit);
890             if (m_type == Getter)
891                 jit.moveTrustedValue(jsUndefined(), valueRegs);
892
893             done.link(&jit);
894
895             jit.addPtr(CCallHelpers::TrustedImm32((jit.codeBlock()->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
896                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
897             state.restoreLiveRegistersFromStackForCall(isGetter());
898
899             state.callbacks.append(
900                 [=, &vm] (LinkBuffer& linkBuffer) {
901                     m_rareData->callLinkInfo->setCallLocations(
902                         linkBuffer.locationOfNearCall(slowPathCall),
903                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
904                         linkBuffer.locationOfNearCall(fastPathCall));
905
906                     linkBuffer.link(
907                         slowPathCall,
908                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
909                 });
910         } else {
911             // Need to make room for the C call so any of our stack spillage isn't overwritten.
912             // We also need to make room because we may be an inline cache in the FTL and not
913             // have a JIT call frame.
914             bool needsToMakeRoomOnStackForCCall = state.numberOfStackBytesUsedForRegisterPreservation() || codeBlock->jitType() == JITCode::FTLJIT;
915             if (needsToMakeRoomOnStackForCCall)
916                 jit.makeSpaceOnStackForCCall();
917
918             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
919             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
920             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
921             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
922 #if USE(JSVALUE64)
923             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
924                 jit.setupArgumentsWithExecState(
925                     baseForCustomValue,
926                     CCallHelpers::TrustedImmPtr(ident.impl()));
927             } else
928                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
929 #else
930             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
931                 jit.setupArgumentsWithExecState(
932                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
933                     CCallHelpers::TrustedImm32(JSValue::CellTag),
934                     CCallHelpers::TrustedImmPtr(ident.impl()));
935             } else {
936                 jit.setupArgumentsWithExecState(
937                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
938                     CCallHelpers::TrustedImm32(JSValue::CellTag),
939                     valueRegs.payloadGPR(), valueRegs.tagGPR());
940             }
941 #endif
942             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
943
944             operationCall = jit.call();
945             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
946                 jit.setupResults(valueRegs);
947             if (needsToMakeRoomOnStackForCCall)
948                 jit.reclaimSpaceOnStackForCCall();
949
950             CCallHelpers::Jump noException =
951                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
952
953             bool didSetLookupExceptionHandler = false;
954             state.restoreLiveRegistersFromStackForCallWithThrownException();
955             state.restoreScratch();
956             jit.copyCalleeSavesToVMCalleeSavesBuffer();
957             if (state.needsToRestoreRegistersIfException()) {
958                 // To the JIT that produces the original exception handling
959                 // call site, they will expect the OSR exit to be arrived
960                 // at from genericUnwind. Therefore we must model what genericUnwind
961                 // does here. I.e, set callFrameForCatch and copy callee saves.
962
963                 jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
964                 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
965
966                 // We don't need to insert a new exception handler in the table
967                 // because we're doing a manual exception check here. i.e, we'll
968                 // never arrive here from genericUnwind().
969                 HandlerInfo originalHandler = state.originalExceptionHandler();
970                 state.callbacks.append(
971                     [=] (LinkBuffer& linkBuffer) {
972                         linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
973                     });
974             } else {
975                 jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister);
976                 lookupExceptionHandlerCall = jit.call();
977                 didSetLookupExceptionHandler = true;
978                 jit.jumpToExceptionHandler();
979             }
980         
981             noException.link(&jit);
982             state.restoreLiveRegistersFromStackForCall(isGetter());
983
984             state.callbacks.append(
985                 [=] (LinkBuffer& linkBuffer) {
986                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
987                     if (didSetLookupExceptionHandler)
988                         linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
989                 });
990         }
991         state.succeed();
992         return;
993     }
994
995     case Replace: {
996         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
997             if (verbose)
998                 dataLog("Have type: ", type->descriptor(), "\n");
999             state.failAndRepatch.append(
1000                 jit.branchIfNotType(
1001                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1002         } else if (verbose)
1003             dataLog("Don't have type.\n");
1004         
1005         if (isInlineOffset(m_offset)) {
1006             jit.storeValue(
1007                 valueRegs,
1008                 CCallHelpers::Address(
1009                     baseGPR,
1010                     JSObject::offsetOfInlineStorage() +
1011                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1012         } else {
1013             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1014             jit.storeValue(
1015                 valueRegs,
1016                 CCallHelpers::Address(
1017                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1018         }
1019         state.succeed();
1020         return;
1021     }
1022
1023     case Transition: {
1024         // AccessCase::transition() should have returned null.
1025         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1026         RELEASE_ASSERT(!structure()->couldHaveIndexingHeader());
1027
1028         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1029             if (verbose)
1030                 dataLog("Have type: ", type->descriptor(), "\n");
1031             state.failAndRepatch.append(
1032                 jit.branchIfNotType(
1033                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1034         } else if (verbose)
1035             dataLog("Don't have type.\n");
1036         
1037         CCallHelpers::JumpList slowPath;
1038
1039         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1040         allocator.lock(baseGPR);
1041 #if USE(JSVALUE32_64)
1042         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1043 #endif
1044         allocator.lock(valueRegs);
1045         allocator.lock(scratchGPR);
1046
1047         GPRReg scratchGPR2 = allocator.allocateScratchGPR();
1048         GPRReg scratchGPR3;
1049         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
1050             && structure()->outOfLineCapacity())
1051             scratchGPR3 = allocator.allocateScratchGPR();
1052         else
1053             scratchGPR3 = InvalidGPRReg;
1054
1055         ScratchRegisterAllocator::PreservedState preservedState =
1056             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1057
1058         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1059
1060         bool scratchGPRHasStorage = false;
1061         bool needsToMakeRoomOnStackForCCall = !preservedState.numberOfBytesPreserved && codeBlock->jitType() == JITCode::FTLJIT;
1062
1063         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1064             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1065             CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1066
1067             if (!structure()->outOfLineCapacity()) {
1068                 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1069                 slowPath.append(
1070                     jit.branchSubPtr(
1071                         CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1072                 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1073                 jit.negPtr(scratchGPR);
1074                 jit.addPtr(
1075                     CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1076                 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1077             } else {
1078                 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1079                 ASSERT(newSize > oldSize);
1080             
1081                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1082                 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1083                 slowPath.append(
1084                     jit.branchSubPtr(
1085                         CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1086                 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1087                 jit.negPtr(scratchGPR);
1088                 jit.addPtr(
1089                     CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1090                 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1091                 // We have scratchGPR = new storage, scratchGPR3 = old storage,
1092                 // scratchGPR2 = available
1093                 for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1094                     jit.loadPtr(
1095                         CCallHelpers::Address(
1096                             scratchGPR3,
1097                             -static_cast<ptrdiff_t>(
1098                                 offset + sizeof(JSValue) + sizeof(void*))),
1099                         scratchGPR2);
1100                     jit.storePtr(
1101                         scratchGPR2,
1102                         CCallHelpers::Address(
1103                             scratchGPR,
1104                             -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1105                 }
1106             }
1107
1108             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1109             scratchGPRHasStorage = true;
1110         }
1111
1112         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1113         jit.store32(
1114             CCallHelpers::TrustedImm32(structureBits),
1115             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1116
1117         if (isInlineOffset(m_offset)) {
1118             jit.storeValue(
1119                 valueRegs,
1120                 CCallHelpers::Address(
1121                     baseGPR,
1122                     JSObject::offsetOfInlineStorage() +
1123                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1124         } else {
1125             if (!scratchGPRHasStorage)
1126                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1127             jit.storeValue(
1128                 valueRegs,
1129                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1130         }
1131
1132         ScratchBuffer* scratchBuffer = nullptr;
1133         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity())
1134             scratchBuffer = vm.scratchBufferForSize(allocator.desiredScratchBufferSizeForCall());
1135
1136         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1137             CCallHelpers::Call callFlushWriteBarrierBuffer;
1138             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1139             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1140             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1141             CCallHelpers::Jump needToFlush =
1142                 jit.branch32(
1143                     CCallHelpers::AboveOrEqual, scratchGPR2,
1144                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity()));
1145
1146             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1147             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1148
1149             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR);
1150             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1151             jit.storePtr(
1152                 baseGPR,
1153                 CCallHelpers::BaseIndex(
1154                     scratchGPR, scratchGPR2, CCallHelpers::ScalePtr,
1155                     static_cast<int32_t>(-sizeof(void*))));
1156
1157             CCallHelpers::Jump doneWithBarrier = jit.jump();
1158             needToFlush.link(&jit);
1159
1160             // FIXME: We should restoreReusedRegistersByPopping() before this. Then, we wouldn't need
1161             // padding in preserveReusedRegistersByPushing(). Or, maybe it would be even better if the
1162             // barrier slow path was just the normal slow path, below.
1163             // https://bugs.webkit.org/show_bug.cgi?id=149030
1164             allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR2);
1165             if (needsToMakeRoomOnStackForCCall)
1166                 jit.makeSpaceOnStackForCCall();
1167             jit.setupArgumentsWithExecState(baseGPR);
1168             callFlushWriteBarrierBuffer = jit.call();
1169             if (needsToMakeRoomOnStackForCCall)
1170                 jit.reclaimSpaceOnStackForCCall();
1171             allocator.restoreUsedRegistersFromScratchBufferForCall(
1172                 jit, scratchBuffer, scratchGPR2);
1173
1174             doneWithBarrier.link(&jit);
1175             ownerIsRememberedOrInEden.link(&jit);
1176
1177             state.callbacks.append(
1178                 [=] (LinkBuffer& linkBuffer) {
1179                     linkBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer);
1180                 });
1181         }
1182         
1183         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1184         state.succeed();
1185
1186         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
1187             slowPath.link(&jit);
1188             allocator.restoreReusedRegistersByPopping(jit, preservedState);
1189             allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR);
1190             if (needsToMakeRoomOnStackForCCall)
1191                 jit.makeSpaceOnStackForCCall();
1192 #if USE(JSVALUE64)
1193             jit.setupArgumentsWithExecState(
1194                 baseGPR,
1195                 CCallHelpers::TrustedImmPtr(newStructure()),
1196                 CCallHelpers::TrustedImm32(m_offset),
1197                 valueRegs.gpr());
1198 #else
1199             jit.setupArgumentsWithExecState(
1200                 baseGPR,
1201                 CCallHelpers::TrustedImmPtr(newStructure()),
1202                 CCallHelpers::TrustedImm32(m_offset),
1203                 valueRegs.payloadGPR(), valueRegs.tagGPR());
1204 #endif
1205             CCallHelpers::Call operationCall = jit.call();
1206             if (needsToMakeRoomOnStackForCCall)
1207                 jit.reclaimSpaceOnStackForCCall();
1208             allocator.restoreUsedRegistersFromScratchBufferForCall(jit, scratchBuffer, scratchGPR);
1209             state.succeed();
1210
1211             state.callbacks.append(
1212                 [=] (LinkBuffer& linkBuffer) {
1213                     linkBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
1214                 });
1215         }
1216         return;
1217     }
1218
1219     case ArrayLength: {
1220         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1221         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1222         state.failAndIgnore.append(
1223             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1224         jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1225         state.succeed();
1226         return;
1227     }
1228
1229     case StringLength: {
1230         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1231         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1232         state.succeed();
1233         return;
1234     }
1235
1236     case IntrinsicGetter: {
1237         RELEASE_ASSERT(isValidOffset(offset()));
1238
1239         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1240         // are immutable so we just need to watch the property not any value inside it.
1241         Structure* currStructure;
1242         if (m_conditionSet.isEmpty())
1243             currStructure = structure();
1244         else
1245             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1246         currStructure->startWatchingPropertyForReplacements(vm, offset());
1247
1248         emitIntrinsicGetter(state);
1249         return;
1250     }
1251     
1252     case MegamorphicLoad:
1253         // These need to be handled by generateWithGuard(), since the guard is part of the megamorphic load
1254         // algorithm. We can be sure that nobody will call generate() directly for MegamorphicLoad since
1255         // MegamorphicLoad is not guarded by a structure check.
1256         RELEASE_ASSERT_NOT_REACHED();
1257     }
1258     
1259     RELEASE_ASSERT_NOT_REACHED();
1260 }
1261
1262 PolymorphicAccess::PolymorphicAccess() { }
1263 PolymorphicAccess::~PolymorphicAccess() { }
1264
1265 AccessGenerationResult PolymorphicAccess::regenerateWithCases(
1266     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1267     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1268 {
1269     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1270     // invariants:
1271     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1272     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1273     //   can be removed via the canReplace() rule.
1274     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1275     //   cascade through the cases in reverse order, you will get the most recent cases first.
1276     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1277     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1278     //   add more things after failure.
1279     
1280     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1281     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1282     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1283         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1284
1285         // Add it only if it is not replaced by the subsequent cases in the list.
1286         bool found = false;
1287         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1288             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1289                 found = true;
1290                 break;
1291             }
1292         }
1293
1294         if (found)
1295             continue;
1296         
1297         casesToAdd.append(WTFMove(myCase));
1298     }
1299
1300     if (verbose)
1301         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1302
1303     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1304     // new stub that will be identical to the old one. Returning null should tell the caller to just
1305     // keep doing what they were doing before.
1306     if (casesToAdd.isEmpty())
1307         return AccessGenerationResult::MadeNoChanges;
1308
1309     // Now construct the list of cases as they should appear if we are successful. This means putting
1310     // all of the previous cases in this list in order but excluding those that can be replaced, and
1311     // then adding the new cases.
1312     ListType newCases;
1313     for (auto& oldCase : m_list) {
1314         // Ignore old cases that cannot possibly succeed anymore.
1315         if (!oldCase->couldStillSucceed())
1316             continue;
1317
1318         // Figure out if this is replaced by any new cases.
1319         bool found = false;
1320         for (auto& caseToAdd : casesToAdd) {
1321             if (caseToAdd->canReplace(*oldCase)) {
1322                 found = true;
1323                 break;
1324             }
1325         }
1326         if (found)
1327             continue;
1328         
1329         newCases.append(oldCase->clone());
1330     }
1331     for (auto& caseToAdd : casesToAdd)
1332         newCases.append(WTFMove(caseToAdd));
1333
1334     if (verbose)
1335         dataLog("newCases: ", listDump(newCases), "\n");
1336     
1337     // See if we are close to having too many cases and if some of those cases can be subsumed by a
1338     // megamorphic load.
1339     if (newCases.size() >= Options::maxAccessVariantListSize()) {
1340         unsigned numSelfLoads = 0;
1341         for (auto& newCase : newCases) {
1342             if (newCase->canBeReplacedByMegamorphicLoad())
1343                 numSelfLoads++;
1344         }
1345         
1346         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1347             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1348                 newCases.removeAllMatching(
1349                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1350                         return newCase->canBeReplacedByMegamorphicLoad();
1351                     });
1352                 
1353                 newCases.append(WTFMove(mega));
1354             }
1355         }
1356     }
1357
1358     if (newCases.size() > Options::maxAccessVariantListSize()) {
1359         if (verbose)
1360             dataLog("Too many cases.\n");
1361         return AccessGenerationResult::GaveUp;
1362     }
1363
1364     MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
1365     if (!result)
1366         return AccessGenerationResult::GaveUp;
1367
1368     m_list = WTFMove(newCases);
1369     return result;
1370 }
1371
1372 AccessGenerationResult PolymorphicAccess::regenerateWithCase(
1373     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1374     std::unique_ptr<AccessCase> newAccess)
1375 {
1376     Vector<std::unique_ptr<AccessCase>> newAccesses;
1377     newAccesses.append(WTFMove(newAccess));
1378     return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1379 }
1380
1381 bool PolymorphicAccess::visitWeak(VM& vm) const
1382 {
1383     for (unsigned i = 0; i < size(); ++i) {
1384         if (!at(i).visitWeak(vm))
1385             return false;
1386     }
1387     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1388         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1389             if (!Heap::isMarked(weakReference.get()))
1390                 return false;
1391         }
1392     }
1393     return true;
1394 }
1395
1396 void PolymorphicAccess::dump(PrintStream& out) const
1397 {
1398     out.print(RawPointer(this), ":[");
1399     CommaPrinter comma;
1400     for (auto& entry : m_list)
1401         out.print(comma, *entry);
1402     out.print("]");
1403 }
1404
1405 MacroAssemblerCodePtr PolymorphicAccess::regenerate(
1406     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1407     PolymorphicAccess::ListType& cases)
1408 {
1409     if (verbose)
1410         dataLog("Generating code for cases: ", listDump(cases), "\n");
1411     
1412     AccessGenerationState state;
1413
1414     state.access = this;
1415     state.stubInfo = &stubInfo;
1416     state.ident = &ident;
1417     
1418     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1419     state.valueRegs = JSValueRegs(
1420 #if USE(JSVALUE32_64)
1421         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1422 #endif
1423         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1424
1425     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1426     state.allocator = &allocator;
1427     allocator.lock(state.baseGPR);
1428     allocator.lock(state.valueRegs);
1429 #if USE(JSVALUE32_64)
1430     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1431 #endif
1432
1433     state.scratchGPR = allocator.allocateScratchGPR();
1434     
1435     CCallHelpers jit(&vm, codeBlock);
1436     state.jit = &jit;
1437
1438     state.preservedReusedRegisterState =
1439         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1440
1441     bool allGuardedByStructureCheck = true;
1442     bool hasJSGetterSetterCall = false;
1443     for (auto& entry : cases) {
1444         allGuardedByStructureCheck &= entry->guardedByStructureCheck();
1445         if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
1446             hasJSGetterSetterCall = true;
1447     }
1448
1449     if (cases.isEmpty()) {
1450         // This is super unlikely, but we make it legal anyway.
1451         state.failAndRepatch.append(jit.jump());
1452     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1453         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1454         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1455         // one case.
1456         CCallHelpers::JumpList fallThrough;
1457
1458         // Cascade through the list, preferring newer entries.
1459         for (unsigned i = cases.size(); i--;) {
1460             fallThrough.link(&jit);
1461             cases[i]->generateWithGuard(state, fallThrough);
1462         }
1463         state.failAndRepatch.append(fallThrough);
1464     } else {
1465         jit.load32(
1466             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1467             state.scratchGPR);
1468         
1469         Vector<int64_t> caseValues(cases.size());
1470         for (unsigned i = 0; i < cases.size(); ++i)
1471             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1472         
1473         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1474         while (binarySwitch.advance(jit))
1475             cases[binarySwitch.caseIndex()]->generate(state);
1476         state.failAndRepatch.append(binarySwitch.fallThrough());
1477     }
1478
1479     if (!state.failAndIgnore.empty()) {
1480         state.failAndIgnore.link(&jit);
1481         
1482         // Make sure that the inline cache optimization code knows that we are taking slow path because
1483         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1484         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1485         // that the slow path does not try to patch.
1486         jit.load8(&stubInfo.countdown, state.scratchGPR);
1487         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1488         jit.store8(state.scratchGPR, &stubInfo.countdown);
1489     }
1490
1491     CCallHelpers::JumpList failure;
1492     if (allocator.didReuseRegisters()) {
1493         state.failAndRepatch.link(&jit);
1494         state.restoreScratch();
1495     } else
1496         failure = state.failAndRepatch;
1497     failure.append(jit.jump());
1498
1499     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1500     CallSiteIndex callSiteIndexForExceptionHandling;
1501     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1502         // Emit the exception handler.
1503         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1504         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1505         // their own exception handling logic that doesn't go through genericUnwind.
1506         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1507
1508         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1509         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1510         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1511
1512         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1513         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1514
1515         state.restoreLiveRegistersFromStackForCallWithThrownException();
1516         state.restoreScratch();
1517         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1518
1519         HandlerInfo oldHandler = state.originalExceptionHandler();
1520         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1521         state.callbacks.append(
1522             [=] (LinkBuffer& linkBuffer) {
1523                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1524
1525                 HandlerInfo handlerToRegister = oldHandler;
1526                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1527                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1528                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1529                 codeBlock->appendExceptionHandler(handlerToRegister);
1530             });
1531
1532         // We set these to indicate to the stub to remove itself from the CodeBlock's
1533         // exception handler table when it is deallocated.
1534         codeBlockThatOwnsExceptionHandlers = codeBlock;
1535         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1536         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1537     }
1538
1539     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1540     if (linkBuffer.didFailToAllocate()) {
1541         if (verbose)
1542             dataLog("Did fail to allocate.\n");
1543         return MacroAssemblerCodePtr();
1544     }
1545
1546     CodeLocationLabel successLabel =
1547         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1548         
1549     linkBuffer.link(state.success, successLabel);
1550
1551     linkBuffer.link(
1552         failure,
1553         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1554     
1555     for (auto callback : state.callbacks)
1556         callback(linkBuffer);
1557
1558     if (verbose)
1559         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1560
1561     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1562         codeBlock, linkBuffer,
1563         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1564
1565     bool doesCalls = false;
1566     for (auto& entry : cases)
1567         doesCalls |= entry->doesCalls();
1568     
1569     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1570     m_watchpoints = WTFMove(state.watchpoints);
1571     if (!state.weakReferences.isEmpty())
1572         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1573     if (verbose)
1574         dataLog("Returning: ", code.code(), "\n");
1575     return code.code();
1576 }
1577
1578 void PolymorphicAccess::aboutToDie()
1579 {
1580     m_stubRoutine->aboutToDie();
1581 }
1582
1583 } // namespace JSC
1584
1585 namespace WTF {
1586
1587 using namespace JSC;
1588
1589 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1590 {
1591     switch (kind) {
1592     case AccessGenerationResult::MadeNoChanges:
1593         out.print("MadeNoChanges");
1594         return;
1595     case AccessGenerationResult::GaveUp:
1596         out.print("GaveUp");
1597         return;
1598     case AccessGenerationResult::GeneratedNewCode:
1599         out.print("GeneratedNewCode");
1600         return;
1601     }
1602     
1603     RELEASE_ASSERT_NOT_REACHED();
1604 }
1605
1606 void printInternal(PrintStream& out, AccessCase::AccessType type)
1607 {
1608     switch (type) {
1609     case AccessCase::Load:
1610         out.print("Load");
1611         return;
1612     case AccessCase::MegamorphicLoad:
1613         out.print("MegamorphicLoad");
1614         return;
1615     case AccessCase::Transition:
1616         out.print("Transition");
1617         return;
1618     case AccessCase::Replace:
1619         out.print("Replace");
1620         return;
1621     case AccessCase::Miss:
1622         out.print("Miss");
1623         return;
1624     case AccessCase::Getter:
1625         out.print("Getter");
1626         return;
1627     case AccessCase::Setter:
1628         out.print("Setter");
1629         return;
1630     case AccessCase::CustomValueGetter:
1631         out.print("CustomValueGetter");
1632         return;
1633     case AccessCase::CustomAccessorGetter:
1634         out.print("CustomAccessorGetter");
1635         return;
1636     case AccessCase::CustomValueSetter:
1637         out.print("CustomValueSetter");
1638         return;
1639     case AccessCase::CustomAccessorSetter:
1640         out.print("CustomAccessorSetter");
1641         return;
1642     case AccessCase::IntrinsicGetter:
1643         out.print("IntrinsicGetter");
1644         return;
1645     case AccessCase::InHit:
1646         out.print("InHit");
1647         return;
1648     case AccessCase::InMiss:
1649         out.print("InMiss");
1650         return;
1651     case AccessCase::ArrayLength:
1652         out.print("ArrayLength");
1653         return;
1654     case AccessCase::StringLength:
1655         out.print("StringLength");
1656         return;
1657     }
1658
1659     RELEASE_ASSERT_NOT_REACHED();
1660 }
1661
1662 } // namespace WTF
1663
1664 #endif // ENABLE(JIT)
1665
1666