References from code to Structures should be stronger than weak
[WebKit.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
52 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
53 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
54 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
55 #else
56 #define EABI_32BIT_DUMMY_ARG
57 #endif
58
59 void AccessGenerationResult::dump(PrintStream& out) const
60 {
61     out.print(m_kind);
62     if (m_code)
63         out.print(":", m_code);
64 }
65
66 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
67 {
68     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
69         watchpoints, jit->codeBlock(), stubInfo, condition);
70 }
71
72 void AccessGenerationState::restoreScratch()
73 {
74     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
75 }
76
77 void AccessGenerationState::succeed()
78 {
79     restoreScratch();
80     success.append(jit->jump());
81 }
82
83 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
84 {
85     if (!m_calculatedRegistersForCallAndExceptionHandling) {
86         m_calculatedRegistersForCallAndExceptionHandling = true;
87
88         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
89         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
90         if (m_needsToRestoreRegistersIfException)
91             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
92
93         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
94         m_liveRegistersForCall.merge(extra);
95         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
96         m_liveRegistersForCall.merge(extra);
97     }
98 }
99
100 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
101 {
102     calculateLiveRegistersForCallAndExceptionHandling(extra);
103     
104     unsigned extraStackPadding = 0;
105     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
106     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
107         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
108     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
109 }
110
111 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
112 {
113     RegisterSet dontRestore;
114     if (isGetter) {
115         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
116         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
117         dontRestore.set(valueRegs);
118     }
119     restoreLiveRegistersFromStackForCall(dontRestore);
120 }
121
122 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
123 {
124     // Even if we're a getter, we don't want to ignore the result value like we normally do
125     // because the getter threw, and therefore, didn't return a value that means anything.
126     // Instead, we want to restore that register to what it was upon entering the getter
127     // inline cache. The subtlety here is if the base and the result are the same register,
128     // and the getter threw, we want OSR exit to see the original base value, not the result
129     // of the getter call.
130     RegisterSet dontRestore = liveRegistersForCall();
131     // As an optimization here, we only need to restore what is live for exception handling.
132     // We can construct the dontRestore set to accomplish this goal by having it contain only
133     // what is live for call but not live for exception handling. By ignoring things that are
134     // only live at the call but not the exception handler, we will only restore things live
135     // at the exception handler.
136     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
137     restoreLiveRegistersFromStackForCall(dontRestore);
138 }
139
140 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
141 {
142     unsigned extraStackPadding = 0;
143     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
144 }
145
146 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
147 {
148     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
149
150     if (!m_calculatedCallSiteIndex) {
151         m_calculatedCallSiteIndex = true;
152
153         if (m_needsToRestoreRegistersIfException)
154             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
155         else
156             m_callSiteIndex = originalCallSiteIndex();
157     }
158
159     return m_callSiteIndex;
160 }
161
162 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
163 {
164     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
165     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
166     RELEASE_ASSERT(exceptionHandler);
167     return *exceptionHandler;
168 }
169
170 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
171
172 void AccessGenerationState::emitExplicitExceptionHandler()
173 {
174     restoreScratch();
175     jit->copyCalleeSavesToVMCalleeSavesBuffer();
176     if (needsToRestoreRegistersIfException()) {
177         // To the JIT that produces the original exception handling
178         // call site, they will expect the OSR exit to be arrived
179         // at from genericUnwind. Therefore we must model what genericUnwind
180         // does here. I.e, set callFrameForCatch and copy callee saves.
181
182         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
183         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
184
185         // We don't need to insert a new exception handler in the table
186         // because we're doing a manual exception check here. i.e, we'll
187         // never arrive here from genericUnwind().
188         HandlerInfo originalHandler = originalExceptionHandler();
189         jit->addLinkTask(
190             [=] (LinkBuffer& linkBuffer) {
191                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
192             });
193     } else {
194         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
195         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
196         jit->addLinkTask(
197             [=] (LinkBuffer& linkBuffer) {
198                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
199             });
200         jit->jumpToExceptionHandler();
201     }
202 }
203
204 AccessCase::AccessCase()
205 {
206 }
207
208 std::unique_ptr<AccessCase> AccessCase::tryGet(
209     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
210     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
211 {
212     std::unique_ptr<AccessCase> result(new AccessCase());
213
214     result->m_type = type;
215     result->m_offset = offset;
216     result->m_structure.set(vm, owner, structure);
217     result->m_conditionSet = conditionSet;
218
219     if (viaProxy || additionalSet) {
220         result->m_rareData = std::make_unique<RareData>();
221         result->m_rareData->viaProxy = viaProxy;
222         result->m_rareData->additionalSet = additionalSet;
223     }
224
225     return result;
226 }
227
228 std::unique_ptr<AccessCase> AccessCase::get(
229     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
230     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
231     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
232 {
233     std::unique_ptr<AccessCase> result(new AccessCase());
234
235     result->m_type = type;
236     result->m_offset = offset;
237     result->m_structure.set(vm, owner, structure);
238     result->m_conditionSet = conditionSet;
239
240     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
241         result->m_rareData = std::make_unique<RareData>();
242         result->m_rareData->viaProxy = viaProxy;
243         result->m_rareData->additionalSet = additionalSet;
244         result->m_rareData->customAccessor.getter = customGetter;
245         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
246     }
247
248     return result;
249 }
250
251 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
252 {
253     UNUSED_PARAM(vm);
254     UNUSED_PARAM(owner);
255     
256     if (GPRInfo::numberOfRegisters < 9)
257         return nullptr;
258     
259     std::unique_ptr<AccessCase> result(new AccessCase());
260     
261     result->m_type = MegamorphicLoad;
262     
263     return result;
264 }
265
266 std::unique_ptr<AccessCase> AccessCase::replace(
267     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
268 {
269     std::unique_ptr<AccessCase> result(new AccessCase());
270
271     result->m_type = Replace;
272     result->m_offset = offset;
273     result->m_structure.set(vm, owner, structure);
274
275     return result;
276 }
277
278 std::unique_ptr<AccessCase> AccessCase::transition(
279     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
280     const ObjectPropertyConditionSet& conditionSet)
281 {
282     RELEASE_ASSERT(oldStructure == newStructure->previousID());
283
284     // Skip optimizing the case where we need a realloc, if we don't have
285     // enough registers to make it happen.
286     if (GPRInfo::numberOfRegisters < 6
287         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
288         && oldStructure->outOfLineCapacity()) {
289         return nullptr;
290     }
291
292     std::unique_ptr<AccessCase> result(new AccessCase());
293
294     result->m_type = Transition;
295     result->m_offset = offset;
296     result->m_structure.set(vm, owner, newStructure);
297     result->m_conditionSet = conditionSet;
298
299     return result;
300 }
301
302 std::unique_ptr<AccessCase> AccessCase::setter(
303     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
304     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
305     JSObject* customSlotBase)
306 {
307     std::unique_ptr<AccessCase> result(new AccessCase());
308
309     result->m_type = type;
310     result->m_offset = offset;
311     result->m_structure.set(vm, owner, structure);
312     result->m_conditionSet = conditionSet;
313     result->m_rareData = std::make_unique<RareData>();
314     result->m_rareData->customAccessor.setter = customSetter;
315     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
316
317     return result;
318 }
319
320 std::unique_ptr<AccessCase> AccessCase::in(
321     VM& vm, JSCell* owner, AccessType type, Structure* structure,
322     const ObjectPropertyConditionSet& conditionSet)
323 {
324     std::unique_ptr<AccessCase> result(new AccessCase());
325
326     result->m_type = type;
327     result->m_structure.set(vm, owner, structure);
328     result->m_conditionSet = conditionSet;
329
330     return result;
331 }
332
333 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
334 {
335     std::unique_ptr<AccessCase> result(new AccessCase());
336
337     result->m_type = type;
338
339     return result;
340 }
341
342 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
343     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
344     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
345 {
346     std::unique_ptr<AccessCase> result(new AccessCase());
347
348     result->m_type = IntrinsicGetter;
349     result->m_structure.set(vm, owner, structure);
350     result->m_conditionSet = conditionSet;
351     result->m_offset = offset;
352
353     result->m_rareData = std::make_unique<RareData>();
354     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
355
356     return result;
357 }
358
359 AccessCase::~AccessCase()
360 {
361 }
362
363 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
364     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
365 {
366     switch (stubInfo.cacheType) {
367     case CacheType::GetByIdSelf:
368         return get(
369             vm, owner, Load, stubInfo.u.byIdSelf.offset,
370             stubInfo.u.byIdSelf.baseObjectStructure.get());
371
372     case CacheType::PutByIdReplace:
373         return replace(
374             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
375
376     default:
377         return nullptr;
378     }
379 }
380
381 std::unique_ptr<AccessCase> AccessCase::clone() const
382 {
383     std::unique_ptr<AccessCase> result(new AccessCase());
384     result->m_type = m_type;
385     result->m_offset = m_offset;
386     result->m_structure = m_structure;
387     result->m_conditionSet = m_conditionSet;
388     if (RareData* rareData = m_rareData.get()) {
389         result->m_rareData = std::make_unique<RareData>();
390         result->m_rareData->viaProxy = rareData->viaProxy;
391         result->m_rareData->additionalSet = rareData->additionalSet;
392         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
393         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
394         result->m_rareData->customSlotBase = rareData->customSlotBase;
395         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
396     }
397     return result;
398 }
399
400 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
401 {
402     // It's fine to commit something that is already committed. That arises when we switch to using
403     // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
404     // because most AccessCases have no extra watchpoints anyway.
405     RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
406     
407     Vector<WatchpointSet*, 2> result;
408     
409     if ((structure() && structure()->needImpurePropertyWatchpoint())
410         || m_conditionSet.needImpurePropertyWatchpoint())
411         result.append(vm.ensureWatchpointSetForImpureProperty(ident));
412
413     if (additionalSet())
414         result.append(additionalSet());
415     
416     m_state = Committed;
417     
418     return result;
419 }
420
421 bool AccessCase::guardedByStructureCheck() const
422 {
423     if (viaProxy())
424         return false;
425
426     switch (m_type) {
427     case MegamorphicLoad:
428     case ArrayLength:
429     case StringLength:
430     case DirectArgumentsLength:
431     case ScopedArgumentsLength:
432         return false;
433     default:
434         return true;
435     }
436 }
437
438 JSObject* AccessCase::alternateBase() const
439 {
440     if (customSlotBase())
441         return customSlotBase();
442     return conditionSet().slotBaseCondition().object();
443 }
444
445 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
446 {
447     switch (type()) {
448     case Getter:
449     case Setter:
450     case CustomValueGetter:
451     case CustomAccessorGetter:
452     case CustomValueSetter:
453     case CustomAccessorSetter:
454         return true;
455     case Transition:
456         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
457             && structure()->couldHaveIndexingHeader()) {
458             if (cellsToMark)
459                 cellsToMark->append(newStructure());
460             return true;
461         }
462         return false;
463     default:
464         return false;
465     }
466 }
467
468 bool AccessCase::couldStillSucceed() const
469 {
470     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
471 }
472
473 bool AccessCase::canBeReplacedByMegamorphicLoad() const
474 {
475     if (type() == MegamorphicLoad)
476         return true;
477     
478     return type() == Load
479         && !viaProxy()
480         && conditionSet().isEmpty()
481         && !additionalSet()
482         && !customSlotBase();
483 }
484
485 bool AccessCase::canReplace(const AccessCase& other) const
486 {
487     // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
488     // It's fine for this to return false if it's in doubt.
489
490     switch (type()) {
491     case MegamorphicLoad:
492         return other.canBeReplacedByMegamorphicLoad();
493     case ArrayLength:
494     case StringLength:
495     case DirectArgumentsLength:
496     case ScopedArgumentsLength:
497         return other.type() == type();
498     default:
499         if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
500             return false;
501         
502         return structure() == other.structure();
503     }
504 }
505
506 void AccessCase::dump(PrintStream& out) const
507 {
508     out.print(m_type, ":(");
509
510     CommaPrinter comma;
511     
512     out.print(comma, m_state);
513
514     if (m_type == Transition)
515         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
516     else if (m_structure)
517         out.print(comma, "structure = ", pointerDump(m_structure.get()));
518
519     if (isValidOffset(m_offset))
520         out.print(comma, "offset = ", m_offset);
521     if (!m_conditionSet.isEmpty())
522         out.print(comma, "conditions = ", m_conditionSet);
523
524     if (RareData* rareData = m_rareData.get()) {
525         if (rareData->viaProxy)
526             out.print(comma, "viaProxy = ", rareData->viaProxy);
527         if (rareData->additionalSet)
528             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
529         if (rareData->callLinkInfo)
530             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
531         if (rareData->customAccessor.opaque)
532             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
533         if (rareData->customSlotBase)
534             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
535     }
536
537     out.print(")");
538 }
539
540 bool AccessCase::visitWeak(VM& vm) const
541 {
542     if (m_structure && !Heap::isMarked(m_structure.get()))
543         return false;
544     if (!m_conditionSet.areStillLive())
545         return false;
546     if (m_rareData) {
547         if (m_rareData->callLinkInfo)
548             m_rareData->callLinkInfo->visitWeak(vm);
549         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
550             return false;
551         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
552             return false;
553     }
554     return true;
555 }
556
557 bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
558 {
559     bool result = true;
560     
561     if (m_structure)
562         result &= m_structure->markIfCheap(visitor);
563     
564     switch (m_type) {
565     case Transition:
566         if (Heap::isMarked(m_structure->previousID()))
567             visitor.appendUnbarrieredReadOnlyPointer(m_structure.get());
568         else
569             result = false;
570         break;
571     default:
572         break;
573     }
574     
575     return result;
576 }
577
578 void AccessCase::generateWithGuard(
579     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
580 {
581     SuperSamplerScope superSamplerScope(false);
582
583     RELEASE_ASSERT(m_state == Committed);
584     m_state = Generated;
585     
586     CCallHelpers& jit = *state.jit;
587     VM& vm = *jit.vm();
588     const Identifier& ident = *state.ident;
589     StructureStubInfo& stubInfo = *state.stubInfo;
590     JSValueRegs valueRegs = state.valueRegs;
591     GPRReg baseGPR = state.baseGPR;
592     GPRReg scratchGPR = state.scratchGPR;
593     
594     UNUSED_PARAM(vm);
595
596     switch (m_type) {
597     case ArrayLength: {
598         ASSERT(!viaProxy());
599         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
600         fallThrough.append(
601             jit.branchTest32(
602                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
603         fallThrough.append(
604             jit.branchTest32(
605                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
606         break;
607     }
608
609     case StringLength: {
610         ASSERT(!viaProxy());
611         fallThrough.append(
612             jit.branch8(
613                 CCallHelpers::NotEqual,
614                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
615                 CCallHelpers::TrustedImm32(StringType)));
616         break;
617     }
618         
619     case DirectArgumentsLength: {
620         ASSERT(!viaProxy());
621         fallThrough.append(
622             jit.branch8(
623                 CCallHelpers::NotEqual,
624                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
625                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
626
627         fallThrough.append(
628             jit.branchTestPtr(
629                 CCallHelpers::NonZero,
630                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
631         jit.load32(
632             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
633             valueRegs.payloadGPR());
634         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
635         state.succeed();
636         return;
637     }
638         
639     case ScopedArgumentsLength: {
640         ASSERT(!viaProxy());
641         fallThrough.append(
642             jit.branch8(
643                 CCallHelpers::NotEqual,
644                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
645                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
646
647         fallThrough.append(
648             jit.branchTest8(
649                 CCallHelpers::NonZero,
650                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
651         jit.load32(
652             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
653             valueRegs.payloadGPR());
654         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
655         state.succeed();
656         return;
657     }
658         
659     case MegamorphicLoad: {
660         UniquedStringImpl* key = ident.impl();
661         unsigned hash = IdentifierRepHash::hash(key);
662         
663         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
664         allocator.lock(baseGPR);
665 #if USE(JSVALUE32_64)
666         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
667 #endif
668         allocator.lock(valueRegs);
669         allocator.lock(scratchGPR);
670         
671         GPRReg intermediateGPR = scratchGPR;
672         GPRReg maskGPR = allocator.allocateScratchGPR();
673         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
674         GPRReg indexGPR = allocator.allocateScratchGPR();
675         GPRReg offsetGPR = allocator.allocateScratchGPR();
676         
677         if (verbose) {
678             dataLog("baseGPR = ", baseGPR, "\n");
679             dataLog("valueRegs = ", valueRegs, "\n");
680             dataLog("scratchGPR = ", scratchGPR, "\n");
681             dataLog("intermediateGPR = ", intermediateGPR, "\n");
682             dataLog("maskGPR = ", maskGPR, "\n");
683             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
684             dataLog("indexGPR = ", indexGPR, "\n");
685             dataLog("offsetGPR = ", offsetGPR, "\n");
686         }
687
688         ScratchRegisterAllocator::PreservedState preservedState =
689             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
690
691         CCallHelpers::JumpList myFailAndIgnore;
692         CCallHelpers::JumpList myFallThrough;
693         
694         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
695         jit.loadPtr(
696             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
697             intermediateGPR);
698         
699         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
700         
701         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
702         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
703         jit.load32(
704             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
705             intermediateGPR);
706
707         jit.move(maskGPR, maskedHashGPR);
708         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
709         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
710         jit.addPtr(indexGPR, intermediateGPR);
711         
712         CCallHelpers::Label loop = jit.label();
713         
714         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
715         
716         myFallThrough.append(
717             jit.branch32(
718                 CCallHelpers::Equal,
719                 offsetGPR,
720                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
721         
722         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
723         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
724         jit.addPtr(intermediateGPR, offsetGPR);
725         
726         CCallHelpers::Jump collision =  jit.branchPtr(
727             CCallHelpers::NotEqual,
728             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
729             CCallHelpers::TrustedImmPtr(key));
730         
731         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
732         // Check them and then attempt the load.
733         
734         myFallThrough.append(
735             jit.branchTest32(
736                 CCallHelpers::NonZero,
737                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
738                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
739         
740         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
741         
742         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
743         
744         allocator.restoreReusedRegistersByPopping(jit, preservedState);
745         state.succeed();
746         
747         collision.link(&jit);
748
749         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
750         
751         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
752         // around isn't super common so we could, for example, recompute the mask from the difference between
753         // the table and index. But before we do that we should probably make it easier to multiply and
754         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
755         // to have a power-of-2 size.
756         jit.and32(maskGPR, maskedHashGPR);
757         jit.jump().linkTo(loop, &jit);
758         
759         if (allocator.didReuseRegisters()) {
760             myFailAndIgnore.link(&jit);
761             allocator.restoreReusedRegistersByPopping(jit, preservedState);
762             state.failAndIgnore.append(jit.jump());
763             
764             myFallThrough.link(&jit);
765             allocator.restoreReusedRegistersByPopping(jit, preservedState);
766             fallThrough.append(jit.jump());
767         } else {
768             state.failAndIgnore.append(myFailAndIgnore);
769             fallThrough.append(myFallThrough);
770         }
771         return;
772     }
773
774     default: {
775         if (viaProxy()) {
776             fallThrough.append(
777                 jit.branch8(
778                     CCallHelpers::NotEqual,
779                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
780                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
781
782             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
783
784             fallThrough.append(
785                 jit.branchStructure(
786                     CCallHelpers::NotEqual,
787                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
788                     structure()));
789         } else {
790             fallThrough.append(
791                 jit.branchStructure(
792                     CCallHelpers::NotEqual,
793                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
794                     structure()));
795         }
796         break;
797     } };
798
799     generateImpl(state);
800 }
801
802 void AccessCase::generate(AccessGenerationState& state)
803 {
804     RELEASE_ASSERT(m_state == Committed);
805     m_state = Generated;
806     
807     generateImpl(state);
808 }
809
810 void AccessCase::generateImpl(AccessGenerationState& state)
811 {
812     SuperSamplerScope superSamplerScope(false);
813     if (verbose)
814         dataLog("Generating code for: ", *this, "\n");
815     
816     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
817     
818     CCallHelpers& jit = *state.jit;
819     VM& vm = *jit.vm();
820     CodeBlock* codeBlock = jit.codeBlock();
821     StructureStubInfo& stubInfo = *state.stubInfo;
822     const Identifier& ident = *state.ident;
823     JSValueRegs valueRegs = state.valueRegs;
824     GPRReg baseGPR = state.baseGPR;
825     GPRReg scratchGPR = state.scratchGPR;
826
827     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
828
829     for (const ObjectPropertyCondition& condition : m_conditionSet) {
830         Structure* structure = condition.object()->structure();
831
832         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
833             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
834             continue;
835         }
836
837         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
838             // The reason why this cannot happen is that we require that PolymorphicAccess calls
839             // AccessCase::generate() only after it has verified that
840             // AccessCase::couldStillSucceed() returned true.
841             
842             dataLog("This condition is no longer met: ", condition, "\n");
843             RELEASE_ASSERT_NOT_REACHED();
844         }
845
846         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
847         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
848         
849         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
850         state.failAndRepatch.append(
851             jit.branchStructure(
852                 CCallHelpers::NotEqual,
853                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
854                 structure));
855     }
856
857     switch (m_type) {
858     case InHit:
859     case InMiss:
860         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
861         state.succeed();
862         return;
863
864     case Miss:
865         jit.moveTrustedValue(jsUndefined(), valueRegs);
866         state.succeed();
867         return;
868
869     case Load:
870     case GetGetter:
871     case Getter:
872     case Setter:
873     case CustomValueGetter:
874     case CustomAccessorGetter:
875     case CustomValueSetter:
876     case CustomAccessorSetter: {
877         if (isValidOffset(m_offset)) {
878             Structure* currStructure;
879             if (m_conditionSet.isEmpty())
880                 currStructure = structure();
881             else
882                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
883             currStructure->startWatchingPropertyForReplacements(vm, offset());
884         }
885
886         GPRReg baseForGetGPR;
887         if (viaProxy()) {
888             baseForGetGPR = valueRegs.payloadGPR();
889             jit.loadPtr(
890                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
891                 baseForGetGPR);
892         } else
893             baseForGetGPR = baseGPR;
894
895         GPRReg baseForAccessGPR;
896         if (!m_conditionSet.isEmpty()) {
897             jit.move(
898                 CCallHelpers::TrustedImmPtr(alternateBase()),
899                 scratchGPR);
900             baseForAccessGPR = scratchGPR;
901         } else
902             baseForAccessGPR = baseForGetGPR;
903
904         GPRReg loadedValueGPR = InvalidGPRReg;
905         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
906             if (m_type == Load || m_type == GetGetter)
907                 loadedValueGPR = valueRegs.payloadGPR();
908             else
909                 loadedValueGPR = scratchGPR;
910
911             GPRReg storageGPR;
912             if (isInlineOffset(m_offset))
913                 storageGPR = baseForAccessGPR;
914             else {
915                 jit.loadPtr(
916                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
917                     loadedValueGPR);
918                 storageGPR = loadedValueGPR;
919             }
920
921 #if USE(JSVALUE64)
922             jit.load64(
923                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
924 #else
925             if (m_type == Load || m_type == GetGetter) {
926                 jit.load32(
927                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
928                     valueRegs.tagGPR());
929             }
930             jit.load32(
931                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
932                 loadedValueGPR);
933 #endif
934         }
935
936         if (m_type == Load || m_type == GetGetter) {
937             state.succeed();
938             return;
939         }
940
941         // Stuff for custom getters/setters.
942         CCallHelpers::Call operationCall;
943
944         // Stuff for JS getters/setters.
945         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
946         CCallHelpers::Call fastPathCall;
947         CCallHelpers::Call slowPathCall;
948
949         CCallHelpers::Jump success;
950         CCallHelpers::Jump fail;
951
952         // This also does the necessary calculations of whether or not we're an
953         // exception handling call site.
954         state.preserveLiveRegistersToStackForCall();
955
956         jit.store32(
957             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
958             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
959
960         if (m_type == Getter || m_type == Setter) {
961             // Create a JS call using a JS call inline cache. Assume that:
962             //
963             // - SP is aligned and represents the extent of the calling compiler's stack usage.
964             //
965             // - FP is set correctly (i.e. it points to the caller's call frame header).
966             //
967             // - SP - FP is an aligned difference.
968             //
969             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
970             //   code.
971             //
972             // Therefore, we temporarily grow the stack for the purpose of the call and then
973             // shrink it after.
974
975             RELEASE_ASSERT(!m_rareData->callLinkInfo);
976             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
977             
978             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
979             // stub, which then jumped back to the main code, then we'd have a reachability
980             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
981             // call stub stayed alive, and it would ensure that the main code stayed alive, but
982             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
983             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
984             // reference to the getter stub.
985             // https://bugs.webkit.org/show_bug.cgi?id=148914
986             m_rareData->callLinkInfo->disallowStubs();
987             
988             m_rareData->callLinkInfo->setUpCall(
989                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
990
991             CCallHelpers::JumpList done;
992
993             // There is a "this" argument.
994             unsigned numberOfParameters = 1;
995             // ... and a value argument if we're calling a setter.
996             if (m_type == Setter)
997                 numberOfParameters++;
998
999             // Get the accessor; if there ain't one then the result is jsUndefined().
1000             if (m_type == Setter) {
1001                 jit.loadPtr(
1002                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
1003                     loadedValueGPR);
1004             } else {
1005                 jit.loadPtr(
1006                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
1007                     loadedValueGPR);
1008             }
1009
1010             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
1011                 CCallHelpers::Zero, loadedValueGPR);
1012
1013             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
1014
1015             unsigned numberOfBytesForCall =
1016                 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
1017
1018             unsigned alignedNumberOfBytesForCall =
1019                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
1020
1021             jit.subPtr(
1022                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
1023                 CCallHelpers::stackPointerRegister);
1024
1025             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
1026                 CCallHelpers::stackPointerRegister,
1027                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
1028
1029             jit.store32(
1030                 CCallHelpers::TrustedImm32(numberOfParameters),
1031                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
1032
1033             jit.storeCell(
1034                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
1035
1036             jit.storeCell(
1037                 baseForGetGPR,
1038                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1039
1040             if (m_type == Setter) {
1041                 jit.storeValue(
1042                     valueRegs,
1043                     calleeFrame.withOffset(
1044                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
1045             }
1046
1047             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1048                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1049                 CCallHelpers::TrustedImmPtr(0));
1050
1051             fastPathCall = jit.nearCall();
1052             if (m_type == Getter)
1053                 jit.setupResults(valueRegs);
1054             done.append(jit.jump());
1055
1056             slowCase.link(&jit);
1057             jit.move(loadedValueGPR, GPRInfo::regT0);
1058 #if USE(JSVALUE32_64)
1059             // We *always* know that the getter/setter, if non-null, is a cell.
1060             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1061 #endif
1062             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1063             slowPathCall = jit.nearCall();
1064             if (m_type == Getter)
1065                 jit.setupResults(valueRegs);
1066             done.append(jit.jump());
1067
1068             returnUndefined.link(&jit);
1069             if (m_type == Getter)
1070                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1071
1072             done.link(&jit);
1073
1074             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1075                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1076             state.restoreLiveRegistersFromStackForCall(isGetter());
1077
1078             jit.addLinkTask(
1079                 [=, &vm] (LinkBuffer& linkBuffer) {
1080                     m_rareData->callLinkInfo->setCallLocations(
1081                         linkBuffer.locationOfNearCall(slowPathCall),
1082                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
1083                         linkBuffer.locationOfNearCall(fastPathCall));
1084
1085                     linkBuffer.link(
1086                         slowPathCall,
1087                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1088                 });
1089         } else {
1090             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1091             // hard to track if someone did spillage or not, so we just assume that we always need
1092             // to make some space here.
1093             jit.makeSpaceOnStackForCCall();
1094
1095             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1096             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1097             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1098             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1099 #if USE(JSVALUE64)
1100             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1101                 jit.setupArgumentsWithExecState(
1102                     baseForCustomValue,
1103                     CCallHelpers::TrustedImmPtr(ident.impl()));
1104             } else
1105                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1106 #else
1107             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1108                 jit.setupArgumentsWithExecState(
1109                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1110                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1111                     CCallHelpers::TrustedImmPtr(ident.impl()));
1112             } else {
1113                 jit.setupArgumentsWithExecState(
1114                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1115                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1116                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1117             }
1118 #endif
1119             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1120
1121             operationCall = jit.call();
1122             jit.addLinkTask(
1123                 [=] (LinkBuffer& linkBuffer) {
1124                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1125                 });
1126
1127             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1128                 jit.setupResults(valueRegs);
1129             jit.reclaimSpaceOnStackForCCall();
1130
1131             CCallHelpers::Jump noException =
1132                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1133
1134             state.restoreLiveRegistersFromStackForCallWithThrownException();
1135             state.emitExplicitExceptionHandler();
1136         
1137             noException.link(&jit);
1138             state.restoreLiveRegistersFromStackForCall(isGetter());
1139         }
1140         state.succeed();
1141         return;
1142     }
1143
1144     case Replace: {
1145         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1146             if (verbose)
1147                 dataLog("Have type: ", type->descriptor(), "\n");
1148             state.failAndRepatch.append(
1149                 jit.branchIfNotType(
1150                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1151         } else if (verbose)
1152             dataLog("Don't have type.\n");
1153         
1154         if (isInlineOffset(m_offset)) {
1155             jit.storeValue(
1156                 valueRegs,
1157                 CCallHelpers::Address(
1158                     baseGPR,
1159                     JSObject::offsetOfInlineStorage() +
1160                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1161         } else {
1162             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1163             jit.storeValue(
1164                 valueRegs,
1165                 CCallHelpers::Address(
1166                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1167         }
1168         state.succeed();
1169         return;
1170     }
1171
1172     case Transition: {
1173         // AccessCase::transition() should have returned null if this wasn't true.
1174         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1175
1176         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1177             if (verbose)
1178                 dataLog("Have type: ", type->descriptor(), "\n");
1179             state.failAndRepatch.append(
1180                 jit.branchIfNotType(
1181                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1182         } else if (verbose)
1183             dataLog("Don't have type.\n");
1184         
1185         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1186         // exactly when this would make calls.
1187         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1188         bool reallocating = allocating && structure()->outOfLineCapacity();
1189         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1190
1191         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1192         allocator.lock(baseGPR);
1193 #if USE(JSVALUE32_64)
1194         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1195 #endif
1196         allocator.lock(valueRegs);
1197         allocator.lock(scratchGPR);
1198
1199         GPRReg scratchGPR2 = InvalidGPRReg;
1200         GPRReg scratchGPR3 = InvalidGPRReg;
1201         if (allocatingInline) {
1202             scratchGPR2 = allocator.allocateScratchGPR();
1203             scratchGPR3 = allocator.allocateScratchGPR();
1204         }
1205
1206         ScratchRegisterAllocator::PreservedState preservedState =
1207             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1208         
1209         CCallHelpers::JumpList slowPath;
1210
1211         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1212
1213         if (allocating) {
1214             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1215             
1216             if (allocatingInline) {
1217                 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1218
1219                 if (!reallocating) {
1220                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1221                     slowPath.append(
1222                         jit.branchSubPtr(
1223                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1224                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1225                     jit.negPtr(scratchGPR);
1226                     jit.addPtr(
1227                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1228                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1229                 } else {
1230                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1231                     // already had out-of-line property storage).
1232                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1233                     ASSERT(newSize > oldSize);
1234             
1235                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1236                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1237                     slowPath.append(
1238                         jit.branchSubPtr(
1239                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1240                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1241                     jit.negPtr(scratchGPR);
1242                     jit.addPtr(
1243                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1244                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1245                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1246                     // scratchGPR2 = available
1247                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1248                         jit.loadPtr(
1249                             CCallHelpers::Address(
1250                                 scratchGPR3,
1251                                 -static_cast<ptrdiff_t>(
1252                                     offset + sizeof(JSValue) + sizeof(void*))),
1253                             scratchGPR2);
1254                         jit.storePtr(
1255                             scratchGPR2,
1256                             CCallHelpers::Address(
1257                                 scratchGPR,
1258                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1259                     }
1260                 }
1261             } else {
1262                 // Handle the case where we are allocating out-of-line using an operation.
1263                 RegisterSet extraRegistersToPreserve;
1264                 extraRegistersToPreserve.set(baseGPR);
1265                 extraRegistersToPreserve.set(valueRegs);
1266                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1267                 
1268                 jit.store32(
1269                     CCallHelpers::TrustedImm32(
1270                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1271                     CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1272                 
1273                 jit.makeSpaceOnStackForCCall();
1274                 
1275                 if (!reallocating) {
1276                     jit.setupArgumentsWithExecState(baseGPR);
1277                     
1278                     CCallHelpers::Call operationCall = jit.call();
1279                     jit.addLinkTask(
1280                         [=] (LinkBuffer& linkBuffer) {
1281                             linkBuffer.link(
1282                                 operationCall,
1283                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1284                         });
1285                 } else {
1286                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1287                     // already had out-of-line property storage).
1288                     jit.setupArgumentsWithExecState(
1289                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1290                     
1291                     CCallHelpers::Call operationCall = jit.call();
1292                     jit.addLinkTask(
1293                         [=] (LinkBuffer& linkBuffer) {
1294                             linkBuffer.link(
1295                                 operationCall,
1296                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1297                         });
1298                 }
1299                 
1300                 jit.reclaimSpaceOnStackForCCall();
1301                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1302                 
1303                 CCallHelpers::Jump noException =
1304                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1305                 
1306                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1307                 state.emitExplicitExceptionHandler();
1308                 
1309                 noException.link(&jit);
1310                 state.restoreLiveRegistersFromStackForCall();
1311             }
1312         }
1313
1314         if (isInlineOffset(m_offset)) {
1315             jit.storeValue(
1316                 valueRegs,
1317                 CCallHelpers::Address(
1318                     baseGPR,
1319                     JSObject::offsetOfInlineStorage() +
1320                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1321         } else {
1322             if (!allocating)
1323                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1324             jit.storeValue(
1325                 valueRegs,
1326                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1327         }
1328         
1329         // If we had allocated using an operation then we would have already executed the store
1330         // barrier and we would have already stored the butterfly into the object.
1331         if (allocatingInline) {
1332             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1333             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1334             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1335             slowPath.append(
1336                 jit.branch32(
1337                     CCallHelpers::AboveOrEqual, scratchGPR2,
1338                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1339             
1340             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1341             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1342             
1343             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1344             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1345             jit.storePtr(
1346                 baseGPR,
1347                 CCallHelpers::BaseIndex(
1348                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1349                     static_cast<int32_t>(-sizeof(void*))));
1350             ownerIsRememberedOrInEden.link(&jit);
1351             
1352             // We set the new butterfly and the structure last. Doing it this way ensures that
1353             // whatever we had done up to this point is forgotten if we choose to branch to slow
1354             // path.
1355             
1356             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1357         }
1358         
1359         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1360         jit.store32(
1361             CCallHelpers::TrustedImm32(structureBits),
1362             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1363
1364         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1365         state.succeed();
1366         
1367         // We will have a slow path if we were allocating without the help of an operation.
1368         if (allocatingInline) {
1369             if (allocator.didReuseRegisters()) {
1370                 slowPath.link(&jit);
1371                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1372                 state.failAndIgnore.append(jit.jump());
1373             } else
1374                 state.failAndIgnore.append(slowPath);
1375         } else
1376             RELEASE_ASSERT(slowPath.empty());
1377         return;
1378     }
1379
1380     case ArrayLength: {
1381         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1382         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1383         state.failAndIgnore.append(
1384             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1385         jit.boxInt32(scratchGPR, valueRegs);
1386         state.succeed();
1387         return;
1388     }
1389
1390     case StringLength: {
1391         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1392         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1393         state.succeed();
1394         return;
1395     }
1396         
1397     case IntrinsicGetter: {
1398         RELEASE_ASSERT(isValidOffset(offset()));
1399
1400         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1401         // are immutable so we just need to watch the property not any value inside it.
1402         Structure* currStructure;
1403         if (m_conditionSet.isEmpty())
1404             currStructure = structure();
1405         else
1406             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1407         currStructure->startWatchingPropertyForReplacements(vm, offset());
1408
1409         emitIntrinsicGetter(state);
1410         return;
1411     }
1412
1413     case DirectArgumentsLength:
1414     case ScopedArgumentsLength:
1415     case MegamorphicLoad:
1416         // These need to be handled by generateWithGuard(), since the guard is part of the
1417         // algorithm. We can be sure that nobody will call generate() directly for these since they
1418         // are not guarded by structure checks.
1419         RELEASE_ASSERT_NOT_REACHED();
1420     }
1421     
1422     RELEASE_ASSERT_NOT_REACHED();
1423 }
1424
1425 PolymorphicAccess::PolymorphicAccess() { }
1426 PolymorphicAccess::~PolymorphicAccess() { }
1427
1428 AccessGenerationResult PolymorphicAccess::addCases(
1429     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1430     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1431 {
1432     SuperSamplerScope superSamplerScope(false);
1433     
1434     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1435     // invariants:
1436     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1437     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1438     //   can be removed via the canReplace() rule.
1439     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1440     //   cascade through the cases in reverse order, you will get the most recent cases first.
1441     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1442     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1443     //   add more things after failure.
1444     
1445     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1446     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1447     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1448         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1449
1450         // Add it only if it is not replaced by the subsequent cases in the list.
1451         bool found = false;
1452         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1453             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1454                 found = true;
1455                 break;
1456             }
1457         }
1458
1459         if (found)
1460             continue;
1461         
1462         casesToAdd.append(WTFMove(myCase));
1463     }
1464
1465     if (verbose)
1466         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1467
1468     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1469     // new stub that will be identical to the old one. Returning null should tell the caller to just
1470     // keep doing what they were doing before.
1471     if (casesToAdd.isEmpty())
1472         return AccessGenerationResult::MadeNoChanges;
1473
1474     // Now add things to the new list. Note that at this point, we will still have old cases that
1475     // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1476     for (auto& caseToAdd : casesToAdd) {
1477         commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1478         m_list.append(WTFMove(caseToAdd));
1479     }
1480     
1481     if (verbose)
1482         dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1483
1484     return AccessGenerationResult::Buffered;
1485 }
1486
1487 AccessGenerationResult PolymorphicAccess::addCase(
1488     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1489     std::unique_ptr<AccessCase> newAccess)
1490 {
1491     Vector<std::unique_ptr<AccessCase>> newAccesses;
1492     newAccesses.append(WTFMove(newAccess));
1493     return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1494 }
1495
1496 bool PolymorphicAccess::visitWeak(VM& vm) const
1497 {
1498     for (unsigned i = 0; i < size(); ++i) {
1499         if (!at(i).visitWeak(vm))
1500             return false;
1501     }
1502     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1503         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1504             if (!Heap::isMarked(weakReference.get()))
1505                 return false;
1506         }
1507     }
1508     return true;
1509 }
1510
1511 bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
1512 {
1513     bool result = true;
1514     for (unsigned i = 0; i < size(); ++i)
1515         result &= at(i).propagateTransitions(visitor);
1516     return result;
1517 }
1518
1519 void PolymorphicAccess::dump(PrintStream& out) const
1520 {
1521     out.print(RawPointer(this), ":[");
1522     CommaPrinter comma;
1523     for (auto& entry : m_list)
1524         out.print(comma, *entry);
1525     out.print("]");
1526 }
1527
1528 void PolymorphicAccess::commit(
1529     VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1530     StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1531 {
1532     // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1533     // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1534     // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1535     // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1536     // Those common kinds of JSC object accesses don't hit this case.
1537     
1538     for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1539         Watchpoint* watchpoint =
1540             WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1541                 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1542         
1543         set->add(watchpoint);
1544     }
1545 }
1546
1547 AccessGenerationResult PolymorphicAccess::regenerate(
1548     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1549 {
1550     SuperSamplerScope superSamplerScope(false);
1551     
1552     if (verbose)
1553         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1554     
1555     AccessGenerationState state;
1556
1557     state.access = this;
1558     state.stubInfo = &stubInfo;
1559     state.ident = &ident;
1560     
1561     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1562     state.valueRegs = JSValueRegs(
1563 #if USE(JSVALUE32_64)
1564         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1565 #endif
1566         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1567
1568     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1569     state.allocator = &allocator;
1570     allocator.lock(state.baseGPR);
1571     allocator.lock(state.valueRegs);
1572 #if USE(JSVALUE32_64)
1573     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1574 #endif
1575
1576     state.scratchGPR = allocator.allocateScratchGPR();
1577     
1578     CCallHelpers jit(&vm, codeBlock);
1579     state.jit = &jit;
1580
1581     state.preservedReusedRegisterState =
1582         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1583
1584     // Regenerating is our opportunity to figure out what our list of cases should look like. We
1585     // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1586     // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1587     // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1588     // from the code of the current stub (aka previous).
1589     ListType cases;
1590     unsigned srcIndex = 0;
1591     unsigned dstIndex = 0;
1592     while (srcIndex < m_list.size()) {
1593         std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
1594         
1595         // If the case had been generated, then we have to keep the original in m_list in case we
1596         // fail to regenerate. That case may have data structures that are used by the code that it
1597         // had generated. If the case had not been generated, then we want to remove it from m_list.
1598         bool isGenerated = someCase->state() == AccessCase::Generated;
1599         
1600         [&] () {
1601             if (!someCase->couldStillSucceed())
1602                 return;
1603
1604             // Figure out if this is replaced by any later case.
1605             for (unsigned j = srcIndex; j < m_list.size(); ++j) {
1606                 if (m_list[j]->canReplace(*someCase))
1607                     return;
1608             }
1609             
1610             if (isGenerated)
1611                 cases.append(someCase->clone());
1612             else
1613                 cases.append(WTFMove(someCase));
1614         }();
1615         
1616         if (isGenerated)
1617             m_list[dstIndex++] = WTFMove(someCase);
1618     }
1619     m_list.resize(dstIndex);
1620     
1621     if (verbose)
1622         dataLog("In regenerate: cases: ", listDump(cases), "\n");
1623     
1624     // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1625     // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1626     // size. It would be faster to just allow more repatching with many load cases, and avoid the
1627     // megamorphicLoad optimization, if we had infinite executable memory.
1628     if (cases.size() >= Options::maxAccessVariantListSize()) {
1629         unsigned numSelfLoads = 0;
1630         for (auto& newCase : cases) {
1631             if (newCase->canBeReplacedByMegamorphicLoad())
1632                 numSelfLoads++;
1633         }
1634         
1635         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1636             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1637                 cases.removeAllMatching(
1638                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1639                         return newCase->canBeReplacedByMegamorphicLoad();
1640                     });
1641                 
1642                 cases.append(WTFMove(mega));
1643             }
1644         }
1645     }
1646     
1647     if (verbose)
1648         dataLog("Optimized cases: ", listDump(cases), "\n");
1649     
1650     // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1651     // won't change that set anymore.
1652     
1653     bool allGuardedByStructureCheck = true;
1654     bool hasJSGetterSetterCall = false;
1655     for (auto& newCase : cases) {
1656         commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1657         allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1658         if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1659             hasJSGetterSetterCall = true;
1660     }
1661
1662     if (cases.isEmpty()) {
1663         // This is super unlikely, but we make it legal anyway.
1664         state.failAndRepatch.append(jit.jump());
1665     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1666         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1667         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1668         // one case.
1669         CCallHelpers::JumpList fallThrough;
1670
1671         // Cascade through the list, preferring newer entries.
1672         for (unsigned i = cases.size(); i--;) {
1673             fallThrough.link(&jit);
1674             cases[i]->generateWithGuard(state, fallThrough);
1675         }
1676         state.failAndRepatch.append(fallThrough);
1677     } else {
1678         jit.load32(
1679             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1680             state.scratchGPR);
1681         
1682         Vector<int64_t> caseValues(cases.size());
1683         for (unsigned i = 0; i < cases.size(); ++i)
1684             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1685         
1686         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1687         while (binarySwitch.advance(jit))
1688             cases[binarySwitch.caseIndex()]->generate(state);
1689         state.failAndRepatch.append(binarySwitch.fallThrough());
1690     }
1691
1692     if (!state.failAndIgnore.empty()) {
1693         state.failAndIgnore.link(&jit);
1694         
1695         // Make sure that the inline cache optimization code knows that we are taking slow path because
1696         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1697         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1698         // that the slow path does not try to patch.
1699 #if CPU(X86) || CPU(X86_64)
1700         jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
1701         jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
1702 #else
1703         jit.load8(&stubInfo.countdown, state.scratchGPR);
1704         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1705         jit.store8(state.scratchGPR, &stubInfo.countdown);
1706 #endif
1707     }
1708
1709     CCallHelpers::JumpList failure;
1710     if (allocator.didReuseRegisters()) {
1711         state.failAndRepatch.link(&jit);
1712         state.restoreScratch();
1713     } else
1714         failure = state.failAndRepatch;
1715     failure.append(jit.jump());
1716
1717     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1718     CallSiteIndex callSiteIndexForExceptionHandling;
1719     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1720         // Emit the exception handler.
1721         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1722         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1723         // their own exception handling logic that doesn't go through genericUnwind.
1724         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1725
1726         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1727         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1728         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1729
1730         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1731         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1732
1733         state.restoreLiveRegistersFromStackForCallWithThrownException();
1734         state.restoreScratch();
1735         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1736
1737         HandlerInfo oldHandler = state.originalExceptionHandler();
1738         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1739         jit.addLinkTask(
1740             [=] (LinkBuffer& linkBuffer) {
1741                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1742
1743                 HandlerInfo handlerToRegister = oldHandler;
1744                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1745                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1746                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1747                 codeBlock->appendExceptionHandler(handlerToRegister);
1748             });
1749
1750         // We set these to indicate to the stub to remove itself from the CodeBlock's
1751         // exception handler table when it is deallocated.
1752         codeBlockThatOwnsExceptionHandlers = codeBlock;
1753         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1754         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1755     }
1756
1757     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1758     if (linkBuffer.didFailToAllocate()) {
1759         if (verbose)
1760             dataLog("Did fail to allocate.\n");
1761         return AccessGenerationResult::GaveUp;
1762     }
1763
1764     CodeLocationLabel successLabel =
1765         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1766         
1767     linkBuffer.link(state.success, successLabel);
1768
1769     linkBuffer.link(
1770         failure,
1771         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1772     
1773     if (verbose)
1774         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1775
1776     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1777         codeBlock, linkBuffer,
1778         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1779
1780     bool doesCalls = false;
1781     Vector<JSCell*> cellsToMark;
1782     for (auto& entry : cases)
1783         doesCalls |= entry->doesCalls(&cellsToMark);
1784     
1785     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1786     m_watchpoints = WTFMove(state.watchpoints);
1787     if (!state.weakReferences.isEmpty())
1788         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1789     if (verbose)
1790         dataLog("Returning: ", code.code(), "\n");
1791     
1792     m_list = WTFMove(cases);
1793     
1794     AccessGenerationResult::Kind resultKind;
1795     if (m_list.size() >= Options::maxAccessVariantListSize())
1796         resultKind = AccessGenerationResult::GeneratedFinalCode;
1797     else
1798         resultKind = AccessGenerationResult::GeneratedNewCode;
1799     
1800     return AccessGenerationResult(resultKind, code.code());
1801 }
1802
1803 void PolymorphicAccess::aboutToDie()
1804 {
1805     if (m_stubRoutine)
1806         m_stubRoutine->aboutToDie();
1807 }
1808
1809 } // namespace JSC
1810
1811 namespace WTF {
1812
1813 using namespace JSC;
1814
1815 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1816 {
1817     switch (kind) {
1818     case AccessGenerationResult::MadeNoChanges:
1819         out.print("MadeNoChanges");
1820         return;
1821     case AccessGenerationResult::GaveUp:
1822         out.print("GaveUp");
1823         return;
1824     case AccessGenerationResult::Buffered:
1825         out.print("Buffered");
1826         return;
1827     case AccessGenerationResult::GeneratedNewCode:
1828         out.print("GeneratedNewCode");
1829         return;
1830     case AccessGenerationResult::GeneratedFinalCode:
1831         out.print("GeneratedFinalCode");
1832         return;
1833     }
1834     
1835     RELEASE_ASSERT_NOT_REACHED();
1836 }
1837
1838 void printInternal(PrintStream& out, AccessCase::AccessType type)
1839 {
1840     switch (type) {
1841     case AccessCase::Load:
1842         out.print("Load");
1843         return;
1844     case AccessCase::MegamorphicLoad:
1845         out.print("MegamorphicLoad");
1846         return;
1847     case AccessCase::Transition:
1848         out.print("Transition");
1849         return;
1850     case AccessCase::Replace:
1851         out.print("Replace");
1852         return;
1853     case AccessCase::Miss:
1854         out.print("Miss");
1855         return;
1856     case AccessCase::GetGetter:
1857         out.print("GetGetter");
1858         return;
1859     case AccessCase::Getter:
1860         out.print("Getter");
1861         return;
1862     case AccessCase::Setter:
1863         out.print("Setter");
1864         return;
1865     case AccessCase::CustomValueGetter:
1866         out.print("CustomValueGetter");
1867         return;
1868     case AccessCase::CustomAccessorGetter:
1869         out.print("CustomAccessorGetter");
1870         return;
1871     case AccessCase::CustomValueSetter:
1872         out.print("CustomValueSetter");
1873         return;
1874     case AccessCase::CustomAccessorSetter:
1875         out.print("CustomAccessorSetter");
1876         return;
1877     case AccessCase::IntrinsicGetter:
1878         out.print("IntrinsicGetter");
1879         return;
1880     case AccessCase::InHit:
1881         out.print("InHit");
1882         return;
1883     case AccessCase::InMiss:
1884         out.print("InMiss");
1885         return;
1886     case AccessCase::ArrayLength:
1887         out.print("ArrayLength");
1888         return;
1889     case AccessCase::StringLength:
1890         out.print("StringLength");
1891         return;
1892     case AccessCase::DirectArgumentsLength:
1893         out.print("DirectArgumentsLength");
1894         return;
1895     case AccessCase::ScopedArgumentsLength:
1896         out.print("ScopedArgumentsLength");
1897         return;
1898     }
1899
1900     RELEASE_ASSERT_NOT_REACHED();
1901 }
1902
1903 void printInternal(PrintStream& out, AccessCase::State state)
1904 {
1905     switch (state) {
1906     case AccessCase::Primordial:
1907         out.print("Primordial");
1908         return;
1909     case AccessCase::Committed:
1910         out.print("Committed");
1911         return;
1912     case AccessCase::Generated:
1913         out.print("Generated");
1914         return;
1915     }
1916
1917     RELEASE_ASSERT_NOT_REACHED();
1918 }
1919
1920 } // namespace WTF
1921
1922 #endif // ENABLE(JIT)
1923
1924