PolymorphicAccess should buffer AccessCases before regenerating
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
52 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
53 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
54 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
55 #else
56 #define EABI_32BIT_DUMMY_ARG
57 #endif
58
59 void AccessGenerationResult::dump(PrintStream& out) const
60 {
61     out.print(m_kind);
62     if (m_code)
63         out.print(":", m_code);
64 }
65
66 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
67 {
68     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
69         watchpoints, jit->codeBlock(), stubInfo, condition);
70 }
71
72 void AccessGenerationState::restoreScratch()
73 {
74     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
75 }
76
77 void AccessGenerationState::succeed()
78 {
79     restoreScratch();
80     success.append(jit->jump());
81 }
82
83 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
84 {
85     if (!m_calculatedRegistersForCallAndExceptionHandling) {
86         m_calculatedRegistersForCallAndExceptionHandling = true;
87
88         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
89         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
90         if (m_needsToRestoreRegistersIfException)
91             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
92
93         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
94         m_liveRegistersForCall.merge(extra);
95         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
96         m_liveRegistersForCall.merge(extra);
97     }
98 }
99
100 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
101 {
102     calculateLiveRegistersForCallAndExceptionHandling(extra);
103     
104     unsigned extraStackPadding = 0;
105     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
106     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
107         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
108     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
109 }
110
111 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
112 {
113     RegisterSet dontRestore;
114     if (isGetter) {
115         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
116         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
117         dontRestore.set(valueRegs);
118     }
119     restoreLiveRegistersFromStackForCall(dontRestore);
120 }
121
122 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
123 {
124     // Even if we're a getter, we don't want to ignore the result value like we normally do
125     // because the getter threw, and therefore, didn't return a value that means anything.
126     // Instead, we want to restore that register to what it was upon entering the getter
127     // inline cache. The subtlety here is if the base and the result are the same register,
128     // and the getter threw, we want OSR exit to see the original base value, not the result
129     // of the getter call.
130     RegisterSet dontRestore = liveRegistersForCall();
131     // As an optimization here, we only need to restore what is live for exception handling.
132     // We can construct the dontRestore set to accomplish this goal by having it contain only
133     // what is live for call but not live for exception handling. By ignoring things that are
134     // only live at the call but not the exception handler, we will only restore things live
135     // at the exception handler.
136     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
137     restoreLiveRegistersFromStackForCall(dontRestore);
138 }
139
140 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
141 {
142     unsigned extraStackPadding = 0;
143     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
144 }
145
146 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
147 {
148     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
149
150     if (!m_calculatedCallSiteIndex) {
151         m_calculatedCallSiteIndex = true;
152
153         if (m_needsToRestoreRegistersIfException)
154             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
155         else
156             m_callSiteIndex = originalCallSiteIndex();
157     }
158
159     return m_callSiteIndex;
160 }
161
162 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
163 {
164     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
165     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
166     RELEASE_ASSERT(exceptionHandler);
167     return *exceptionHandler;
168 }
169
170 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
171
172 void AccessGenerationState::emitExplicitExceptionHandler()
173 {
174     restoreScratch();
175     jit->copyCalleeSavesToVMCalleeSavesBuffer();
176     if (needsToRestoreRegistersIfException()) {
177         // To the JIT that produces the original exception handling
178         // call site, they will expect the OSR exit to be arrived
179         // at from genericUnwind. Therefore we must model what genericUnwind
180         // does here. I.e, set callFrameForCatch and copy callee saves.
181
182         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
183         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
184
185         // We don't need to insert a new exception handler in the table
186         // because we're doing a manual exception check here. i.e, we'll
187         // never arrive here from genericUnwind().
188         HandlerInfo originalHandler = originalExceptionHandler();
189         jit->addLinkTask(
190             [=] (LinkBuffer& linkBuffer) {
191                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
192             });
193     } else {
194         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
195         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
196         jit->addLinkTask(
197             [=] (LinkBuffer& linkBuffer) {
198                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
199             });
200         jit->jumpToExceptionHandler();
201     }
202 }
203
204 AccessCase::AccessCase()
205 {
206 }
207
208 std::unique_ptr<AccessCase> AccessCase::tryGet(
209     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
210     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
211 {
212     std::unique_ptr<AccessCase> result(new AccessCase());
213
214     result->m_type = type;
215     result->m_offset = offset;
216     result->m_structure.set(vm, owner, structure);
217     result->m_conditionSet = conditionSet;
218
219     if (viaProxy || additionalSet) {
220         result->m_rareData = std::make_unique<RareData>();
221         result->m_rareData->viaProxy = viaProxy;
222         result->m_rareData->additionalSet = additionalSet;
223     }
224
225     return result;
226 }
227
228 std::unique_ptr<AccessCase> AccessCase::get(
229     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
230     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
231     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
232 {
233     std::unique_ptr<AccessCase> result(new AccessCase());
234
235     result->m_type = type;
236     result->m_offset = offset;
237     result->m_structure.set(vm, owner, structure);
238     result->m_conditionSet = conditionSet;
239
240     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
241         result->m_rareData = std::make_unique<RareData>();
242         result->m_rareData->viaProxy = viaProxy;
243         result->m_rareData->additionalSet = additionalSet;
244         result->m_rareData->customAccessor.getter = customGetter;
245         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
246     }
247
248     return result;
249 }
250
251 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
252 {
253     UNUSED_PARAM(vm);
254     UNUSED_PARAM(owner);
255     
256     if (GPRInfo::numberOfRegisters < 9)
257         return nullptr;
258     
259     std::unique_ptr<AccessCase> result(new AccessCase());
260     
261     result->m_type = MegamorphicLoad;
262     
263     return result;
264 }
265
266 std::unique_ptr<AccessCase> AccessCase::replace(
267     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
268 {
269     std::unique_ptr<AccessCase> result(new AccessCase());
270
271     result->m_type = Replace;
272     result->m_offset = offset;
273     result->m_structure.set(vm, owner, structure);
274
275     return result;
276 }
277
278 std::unique_ptr<AccessCase> AccessCase::transition(
279     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
280     const ObjectPropertyConditionSet& conditionSet)
281 {
282     RELEASE_ASSERT(oldStructure == newStructure->previousID());
283
284     // Skip optimizing the case where we need a realloc, if we don't have
285     // enough registers to make it happen.
286     if (GPRInfo::numberOfRegisters < 6
287         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
288         && oldStructure->outOfLineCapacity()) {
289         return nullptr;
290     }
291
292     std::unique_ptr<AccessCase> result(new AccessCase());
293
294     result->m_type = Transition;
295     result->m_offset = offset;
296     result->m_structure.set(vm, owner, newStructure);
297     result->m_conditionSet = conditionSet;
298
299     return result;
300 }
301
302 std::unique_ptr<AccessCase> AccessCase::setter(
303     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
304     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
305     JSObject* customSlotBase)
306 {
307     std::unique_ptr<AccessCase> result(new AccessCase());
308
309     result->m_type = type;
310     result->m_offset = offset;
311     result->m_structure.set(vm, owner, structure);
312     result->m_conditionSet = conditionSet;
313     result->m_rareData = std::make_unique<RareData>();
314     result->m_rareData->customAccessor.setter = customSetter;
315     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
316
317     return result;
318 }
319
320 std::unique_ptr<AccessCase> AccessCase::in(
321     VM& vm, JSCell* owner, AccessType type, Structure* structure,
322     const ObjectPropertyConditionSet& conditionSet)
323 {
324     std::unique_ptr<AccessCase> result(new AccessCase());
325
326     result->m_type = type;
327     result->m_structure.set(vm, owner, structure);
328     result->m_conditionSet = conditionSet;
329
330     return result;
331 }
332
333 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
334 {
335     std::unique_ptr<AccessCase> result(new AccessCase());
336
337     result->m_type = type;
338
339     return result;
340 }
341
342 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
343     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
344     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
345 {
346     std::unique_ptr<AccessCase> result(new AccessCase());
347
348     result->m_type = IntrinsicGetter;
349     result->m_structure.set(vm, owner, structure);
350     result->m_conditionSet = conditionSet;
351     result->m_offset = offset;
352
353     result->m_rareData = std::make_unique<RareData>();
354     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
355
356     return result;
357 }
358
359 AccessCase::~AccessCase()
360 {
361 }
362
363 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
364     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
365 {
366     switch (stubInfo.cacheType) {
367     case CacheType::GetByIdSelf:
368         return get(
369             vm, owner, Load, stubInfo.u.byIdSelf.offset,
370             stubInfo.u.byIdSelf.baseObjectStructure.get());
371
372     case CacheType::PutByIdReplace:
373         return replace(
374             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
375
376     default:
377         return nullptr;
378     }
379 }
380
381 std::unique_ptr<AccessCase> AccessCase::clone() const
382 {
383     std::unique_ptr<AccessCase> result(new AccessCase());
384     result->m_type = m_type;
385     result->m_offset = m_offset;
386     result->m_structure = m_structure;
387     result->m_conditionSet = m_conditionSet;
388     if (RareData* rareData = m_rareData.get()) {
389         result->m_rareData = std::make_unique<RareData>();
390         result->m_rareData->viaProxy = rareData->viaProxy;
391         result->m_rareData->additionalSet = rareData->additionalSet;
392         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
393         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
394         result->m_rareData->customSlotBase = rareData->customSlotBase;
395         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
396     }
397     return result;
398 }
399
400 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
401 {
402     RELEASE_ASSERT(m_state == Primordial);
403     
404     Vector<WatchpointSet*, 2> result;
405     
406     if ((structure() && structure()->needImpurePropertyWatchpoint())
407         || m_conditionSet.needImpurePropertyWatchpoint())
408         result.append(vm.ensureWatchpointSetForImpureProperty(ident));
409
410     if (additionalSet())
411         result.append(additionalSet());
412     
413     m_state = Committed;
414     
415     return result;
416 }
417
418 bool AccessCase::guardedByStructureCheck() const
419 {
420     if (viaProxy())
421         return false;
422
423     switch (m_type) {
424     case MegamorphicLoad:
425     case ArrayLength:
426     case StringLength:
427     case DirectArgumentsLength:
428     case ScopedArgumentsLength:
429         return false;
430     default:
431         return true;
432     }
433 }
434
435 JSObject* AccessCase::alternateBase() const
436 {
437     if (customSlotBase())
438         return customSlotBase();
439     return conditionSet().slotBaseCondition().object();
440 }
441
442 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
443 {
444     switch (type()) {
445     case Getter:
446     case Setter:
447     case CustomValueGetter:
448     case CustomAccessorGetter:
449     case CustomValueSetter:
450     case CustomAccessorSetter:
451         return true;
452     case Transition:
453         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
454             && structure()->couldHaveIndexingHeader()) {
455             if (cellsToMark)
456                 cellsToMark->append(newStructure());
457             return true;
458         }
459         return false;
460     default:
461         return false;
462     }
463 }
464
465 bool AccessCase::couldStillSucceed() const
466 {
467     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
468 }
469
470 bool AccessCase::canBeReplacedByMegamorphicLoad() const
471 {
472     return type() == Load
473         && !viaProxy()
474         && conditionSet().isEmpty()
475         && !additionalSet()
476         && !customSlotBase();
477 }
478
479 bool AccessCase::canReplace(const AccessCase& other) const
480 {
481     // We could do a lot better here, but for now we just do something obvious.
482     
483     if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
484         return true;
485
486     if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
487         // FIXME: Implement this!
488         return false;
489     }
490
491     return structure() == other.structure();
492 }
493
494 void AccessCase::dump(PrintStream& out) const
495 {
496     out.print(m_type, ":(");
497
498     CommaPrinter comma;
499     
500     out.print(comma, m_state);
501
502     if (m_type == Transition)
503         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
504     else if (m_structure)
505         out.print(comma, "structure = ", pointerDump(m_structure.get()));
506
507     if (isValidOffset(m_offset))
508         out.print(comma, "offset = ", m_offset);
509     if (!m_conditionSet.isEmpty())
510         out.print(comma, "conditions = ", m_conditionSet);
511
512     if (RareData* rareData = m_rareData.get()) {
513         if (rareData->viaProxy)
514             out.print(comma, "viaProxy = ", rareData->viaProxy);
515         if (rareData->additionalSet)
516             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
517         if (rareData->callLinkInfo)
518             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
519         if (rareData->customAccessor.opaque)
520             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
521         if (rareData->customSlotBase)
522             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
523     }
524
525     out.print(")");
526 }
527
528 bool AccessCase::visitWeak(VM& vm) const
529 {
530     if (m_structure && !Heap::isMarked(m_structure.get()))
531         return false;
532     if (!m_conditionSet.areStillLive())
533         return false;
534     if (m_rareData) {
535         if (m_rareData->callLinkInfo)
536             m_rareData->callLinkInfo->visitWeak(vm);
537         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
538             return false;
539         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
540             return false;
541     }
542     return true;
543 }
544
545 void AccessCase::generateWithGuard(
546     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
547 {
548     SuperSamplerScope superSamplerScope(false);
549
550     RELEASE_ASSERT(m_state == Committed);
551     m_state = Generated;
552     
553     CCallHelpers& jit = *state.jit;
554     VM& vm = *jit.vm();
555     const Identifier& ident = *state.ident;
556     StructureStubInfo& stubInfo = *state.stubInfo;
557     JSValueRegs valueRegs = state.valueRegs;
558     GPRReg baseGPR = state.baseGPR;
559     GPRReg scratchGPR = state.scratchGPR;
560     
561     UNUSED_PARAM(vm);
562
563     switch (m_type) {
564     case ArrayLength: {
565         ASSERT(!viaProxy());
566         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
567         fallThrough.append(
568             jit.branchTest32(
569                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
570         fallThrough.append(
571             jit.branchTest32(
572                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
573         break;
574     }
575
576     case StringLength: {
577         ASSERT(!viaProxy());
578         fallThrough.append(
579             jit.branch8(
580                 CCallHelpers::NotEqual,
581                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
582                 CCallHelpers::TrustedImm32(StringType)));
583         break;
584     }
585         
586     case DirectArgumentsLength: {
587         ASSERT(!viaProxy());
588         fallThrough.append(
589             jit.branch8(
590                 CCallHelpers::NotEqual,
591                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
592                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
593
594         fallThrough.append(
595             jit.branchTestPtr(
596                 CCallHelpers::NonZero,
597                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
598         jit.load32(
599             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
600             valueRegs.payloadGPR());
601         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
602         state.succeed();
603         return;
604     }
605         
606     case ScopedArgumentsLength: {
607         ASSERT(!viaProxy());
608         fallThrough.append(
609             jit.branch8(
610                 CCallHelpers::NotEqual,
611                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
612                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
613
614         fallThrough.append(
615             jit.branchTest8(
616                 CCallHelpers::NonZero,
617                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
618         jit.load32(
619             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
620             valueRegs.payloadGPR());
621         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
622         state.succeed();
623         return;
624     }
625         
626     case MegamorphicLoad: {
627         UniquedStringImpl* key = ident.impl();
628         unsigned hash = IdentifierRepHash::hash(key);
629         
630         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
631         allocator.lock(baseGPR);
632 #if USE(JSVALUE32_64)
633         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
634 #endif
635         allocator.lock(valueRegs);
636         allocator.lock(scratchGPR);
637         
638         GPRReg intermediateGPR = scratchGPR;
639         GPRReg maskGPR = allocator.allocateScratchGPR();
640         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
641         GPRReg indexGPR = allocator.allocateScratchGPR();
642         GPRReg offsetGPR = allocator.allocateScratchGPR();
643         
644         if (verbose) {
645             dataLog("baseGPR = ", baseGPR, "\n");
646             dataLog("valueRegs = ", valueRegs, "\n");
647             dataLog("scratchGPR = ", scratchGPR, "\n");
648             dataLog("intermediateGPR = ", intermediateGPR, "\n");
649             dataLog("maskGPR = ", maskGPR, "\n");
650             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
651             dataLog("indexGPR = ", indexGPR, "\n");
652             dataLog("offsetGPR = ", offsetGPR, "\n");
653         }
654
655         ScratchRegisterAllocator::PreservedState preservedState =
656             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
657
658         CCallHelpers::JumpList myFailAndIgnore;
659         CCallHelpers::JumpList myFallThrough;
660         
661         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
662         jit.loadPtr(
663             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
664             intermediateGPR);
665         
666         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
667         
668         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
669         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
670         jit.load32(
671             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
672             intermediateGPR);
673
674         jit.move(maskGPR, maskedHashGPR);
675         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
676         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
677         jit.addPtr(indexGPR, intermediateGPR);
678         
679         CCallHelpers::Label loop = jit.label();
680         
681         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
682         
683         myFallThrough.append(
684             jit.branch32(
685                 CCallHelpers::Equal,
686                 offsetGPR,
687                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
688         
689         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
690         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
691         jit.addPtr(intermediateGPR, offsetGPR);
692         
693         CCallHelpers::Jump collision =  jit.branchPtr(
694             CCallHelpers::NotEqual,
695             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
696             CCallHelpers::TrustedImmPtr(key));
697         
698         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
699         // Check them and then attempt the load.
700         
701         myFallThrough.append(
702             jit.branchTest32(
703                 CCallHelpers::NonZero,
704                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
705                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
706         
707         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
708         
709         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
710         
711         allocator.restoreReusedRegistersByPopping(jit, preservedState);
712         state.succeed();
713         
714         collision.link(&jit);
715
716         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
717         
718         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
719         // around isn't super common so we could, for example, recompute the mask from the difference between
720         // the table and index. But before we do that we should probably make it easier to multiply and
721         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
722         // to have a power-of-2 size.
723         jit.and32(maskGPR, maskedHashGPR);
724         jit.jump().linkTo(loop, &jit);
725         
726         if (allocator.didReuseRegisters()) {
727             myFailAndIgnore.link(&jit);
728             allocator.restoreReusedRegistersByPopping(jit, preservedState);
729             state.failAndIgnore.append(jit.jump());
730             
731             myFallThrough.link(&jit);
732             allocator.restoreReusedRegistersByPopping(jit, preservedState);
733             fallThrough.append(jit.jump());
734         } else {
735             state.failAndIgnore.append(myFailAndIgnore);
736             fallThrough.append(myFallThrough);
737         }
738         return;
739     }
740
741     default: {
742         if (viaProxy()) {
743             fallThrough.append(
744                 jit.branch8(
745                     CCallHelpers::NotEqual,
746                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
747                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
748
749             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
750
751             fallThrough.append(
752                 jit.branchStructure(
753                     CCallHelpers::NotEqual,
754                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
755                     structure()));
756         } else {
757             fallThrough.append(
758                 jit.branchStructure(
759                     CCallHelpers::NotEqual,
760                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
761                     structure()));
762         }
763         break;
764     } };
765
766     generateImpl(state);
767 }
768
769 void AccessCase::generate(AccessGenerationState& state)
770 {
771     RELEASE_ASSERT(m_state == Committed);
772     m_state = Generated;
773     
774     generateImpl(state);
775 }
776
777 void AccessCase::generateImpl(AccessGenerationState& state)
778 {
779     SuperSamplerScope superSamplerScope(false);
780     if (verbose)
781         dataLog("Generating code for: ", *this, "\n");
782     
783     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
784     
785     CCallHelpers& jit = *state.jit;
786     VM& vm = *jit.vm();
787     CodeBlock* codeBlock = jit.codeBlock();
788     StructureStubInfo& stubInfo = *state.stubInfo;
789     const Identifier& ident = *state.ident;
790     JSValueRegs valueRegs = state.valueRegs;
791     GPRReg baseGPR = state.baseGPR;
792     GPRReg scratchGPR = state.scratchGPR;
793
794     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
795
796     for (const ObjectPropertyCondition& condition : m_conditionSet) {
797         Structure* structure = condition.object()->structure();
798
799         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
800             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
801             continue;
802         }
803
804         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
805             // The reason why this cannot happen is that we require that PolymorphicAccess calls
806             // AccessCase::generate() only after it has verified that
807             // AccessCase::couldStillSucceed() returned true.
808             
809             dataLog("This condition is no longer met: ", condition, "\n");
810             RELEASE_ASSERT_NOT_REACHED();
811         }
812
813         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
814         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
815         
816         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
817         state.failAndRepatch.append(
818             jit.branchStructure(
819                 CCallHelpers::NotEqual,
820                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
821                 structure));
822     }
823
824     switch (m_type) {
825     case InHit:
826     case InMiss:
827         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
828         state.succeed();
829         return;
830
831     case Miss:
832         jit.moveTrustedValue(jsUndefined(), valueRegs);
833         state.succeed();
834         return;
835
836     case Load:
837     case GetGetter:
838     case Getter:
839     case Setter:
840     case CustomValueGetter:
841     case CustomAccessorGetter:
842     case CustomValueSetter:
843     case CustomAccessorSetter: {
844         if (isValidOffset(m_offset)) {
845             Structure* currStructure;
846             if (m_conditionSet.isEmpty())
847                 currStructure = structure();
848             else
849                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
850             currStructure->startWatchingPropertyForReplacements(vm, offset());
851         }
852
853         GPRReg baseForGetGPR;
854         if (viaProxy()) {
855             baseForGetGPR = valueRegs.payloadGPR();
856             jit.loadPtr(
857                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
858                 baseForGetGPR);
859         } else
860             baseForGetGPR = baseGPR;
861
862         GPRReg baseForAccessGPR;
863         if (!m_conditionSet.isEmpty()) {
864             jit.move(
865                 CCallHelpers::TrustedImmPtr(alternateBase()),
866                 scratchGPR);
867             baseForAccessGPR = scratchGPR;
868         } else
869             baseForAccessGPR = baseForGetGPR;
870
871         GPRReg loadedValueGPR = InvalidGPRReg;
872         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
873             if (m_type == Load || m_type == GetGetter)
874                 loadedValueGPR = valueRegs.payloadGPR();
875             else
876                 loadedValueGPR = scratchGPR;
877
878             GPRReg storageGPR;
879             if (isInlineOffset(m_offset))
880                 storageGPR = baseForAccessGPR;
881             else {
882                 jit.loadPtr(
883                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
884                     loadedValueGPR);
885                 storageGPR = loadedValueGPR;
886             }
887
888 #if USE(JSVALUE64)
889             jit.load64(
890                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
891 #else
892             if (m_type == Load || m_type == GetGetter) {
893                 jit.load32(
894                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
895                     valueRegs.tagGPR());
896             }
897             jit.load32(
898                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
899                 loadedValueGPR);
900 #endif
901         }
902
903         if (m_type == Load || m_type == GetGetter) {
904             state.succeed();
905             return;
906         }
907
908         // Stuff for custom getters/setters.
909         CCallHelpers::Call operationCall;
910
911         // Stuff for JS getters/setters.
912         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
913         CCallHelpers::Call fastPathCall;
914         CCallHelpers::Call slowPathCall;
915
916         CCallHelpers::Jump success;
917         CCallHelpers::Jump fail;
918
919         // This also does the necessary calculations of whether or not we're an
920         // exception handling call site.
921         state.preserveLiveRegistersToStackForCall();
922
923         jit.store32(
924             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
925             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
926
927         if (m_type == Getter || m_type == Setter) {
928             // Create a JS call using a JS call inline cache. Assume that:
929             //
930             // - SP is aligned and represents the extent of the calling compiler's stack usage.
931             //
932             // - FP is set correctly (i.e. it points to the caller's call frame header).
933             //
934             // - SP - FP is an aligned difference.
935             //
936             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
937             //   code.
938             //
939             // Therefore, we temporarily grow the stack for the purpose of the call and then
940             // shrink it after.
941
942             RELEASE_ASSERT(!m_rareData->callLinkInfo);
943             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
944             
945             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
946             // stub, which then jumped back to the main code, then we'd have a reachability
947             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
948             // call stub stayed alive, and it would ensure that the main code stayed alive, but
949             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
950             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
951             // reference to the getter stub.
952             // https://bugs.webkit.org/show_bug.cgi?id=148914
953             m_rareData->callLinkInfo->disallowStubs();
954             
955             m_rareData->callLinkInfo->setUpCall(
956                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
957
958             CCallHelpers::JumpList done;
959
960             // There is a "this" argument.
961             unsigned numberOfParameters = 1;
962             // ... and a value argument if we're calling a setter.
963             if (m_type == Setter)
964                 numberOfParameters++;
965
966             // Get the accessor; if there ain't one then the result is jsUndefined().
967             if (m_type == Setter) {
968                 jit.loadPtr(
969                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
970                     loadedValueGPR);
971             } else {
972                 jit.loadPtr(
973                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
974                     loadedValueGPR);
975             }
976
977             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
978                 CCallHelpers::Zero, loadedValueGPR);
979
980             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
981
982             unsigned numberOfBytesForCall =
983                 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
984
985             unsigned alignedNumberOfBytesForCall =
986                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
987
988             jit.subPtr(
989                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
990                 CCallHelpers::stackPointerRegister);
991
992             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
993                 CCallHelpers::stackPointerRegister,
994                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
995
996             jit.store32(
997                 CCallHelpers::TrustedImm32(numberOfParameters),
998                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
999
1000             jit.storeCell(
1001                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
1002
1003             jit.storeCell(
1004                 baseForGetGPR,
1005                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1006
1007             if (m_type == Setter) {
1008                 jit.storeValue(
1009                     valueRegs,
1010                     calleeFrame.withOffset(
1011                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
1012             }
1013
1014             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1015                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1016                 CCallHelpers::TrustedImmPtr(0));
1017
1018             fastPathCall = jit.nearCall();
1019             if (m_type == Getter)
1020                 jit.setupResults(valueRegs);
1021             done.append(jit.jump());
1022
1023             slowCase.link(&jit);
1024             jit.move(loadedValueGPR, GPRInfo::regT0);
1025 #if USE(JSVALUE32_64)
1026             // We *always* know that the getter/setter, if non-null, is a cell.
1027             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1028 #endif
1029             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1030             slowPathCall = jit.nearCall();
1031             if (m_type == Getter)
1032                 jit.setupResults(valueRegs);
1033             done.append(jit.jump());
1034
1035             returnUndefined.link(&jit);
1036             if (m_type == Getter)
1037                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1038
1039             done.link(&jit);
1040
1041             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1042                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1043             state.restoreLiveRegistersFromStackForCall(isGetter());
1044
1045             jit.addLinkTask(
1046                 [=, &vm] (LinkBuffer& linkBuffer) {
1047                     m_rareData->callLinkInfo->setCallLocations(
1048                         linkBuffer.locationOfNearCall(slowPathCall),
1049                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
1050                         linkBuffer.locationOfNearCall(fastPathCall));
1051
1052                     linkBuffer.link(
1053                         slowPathCall,
1054                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1055                 });
1056         } else {
1057             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1058             // hard to track if someone did spillage or not, so we just assume that we always need
1059             // to make some space here.
1060             jit.makeSpaceOnStackForCCall();
1061
1062             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1063             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1064             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1065             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1066 #if USE(JSVALUE64)
1067             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1068                 jit.setupArgumentsWithExecState(
1069                     baseForCustomValue,
1070                     CCallHelpers::TrustedImmPtr(ident.impl()));
1071             } else
1072                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1073 #else
1074             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1075                 jit.setupArgumentsWithExecState(
1076                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1077                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1078                     CCallHelpers::TrustedImmPtr(ident.impl()));
1079             } else {
1080                 jit.setupArgumentsWithExecState(
1081                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1082                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1083                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1084             }
1085 #endif
1086             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1087
1088             operationCall = jit.call();
1089             jit.addLinkTask(
1090                 [=] (LinkBuffer& linkBuffer) {
1091                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1092                 });
1093
1094             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1095                 jit.setupResults(valueRegs);
1096             jit.reclaimSpaceOnStackForCCall();
1097
1098             CCallHelpers::Jump noException =
1099                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1100
1101             state.restoreLiveRegistersFromStackForCallWithThrownException();
1102             state.emitExplicitExceptionHandler();
1103         
1104             noException.link(&jit);
1105             state.restoreLiveRegistersFromStackForCall(isGetter());
1106         }
1107         state.succeed();
1108         return;
1109     }
1110
1111     case Replace: {
1112         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1113             if (verbose)
1114                 dataLog("Have type: ", type->descriptor(), "\n");
1115             state.failAndRepatch.append(
1116                 jit.branchIfNotType(
1117                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1118         } else if (verbose)
1119             dataLog("Don't have type.\n");
1120         
1121         if (isInlineOffset(m_offset)) {
1122             jit.storeValue(
1123                 valueRegs,
1124                 CCallHelpers::Address(
1125                     baseGPR,
1126                     JSObject::offsetOfInlineStorage() +
1127                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1128         } else {
1129             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1130             jit.storeValue(
1131                 valueRegs,
1132                 CCallHelpers::Address(
1133                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1134         }
1135         state.succeed();
1136         return;
1137     }
1138
1139     case Transition: {
1140         // AccessCase::transition() should have returned null if this wasn't true.
1141         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1142
1143         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1144             if (verbose)
1145                 dataLog("Have type: ", type->descriptor(), "\n");
1146             state.failAndRepatch.append(
1147                 jit.branchIfNotType(
1148                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1149         } else if (verbose)
1150             dataLog("Don't have type.\n");
1151         
1152         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1153         // exactly when this would make calls.
1154         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1155         bool reallocating = allocating && structure()->outOfLineCapacity();
1156         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1157
1158         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1159         allocator.lock(baseGPR);
1160 #if USE(JSVALUE32_64)
1161         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1162 #endif
1163         allocator.lock(valueRegs);
1164         allocator.lock(scratchGPR);
1165
1166         GPRReg scratchGPR2 = InvalidGPRReg;
1167         GPRReg scratchGPR3 = InvalidGPRReg;
1168         if (allocatingInline) {
1169             scratchGPR2 = allocator.allocateScratchGPR();
1170             scratchGPR3 = allocator.allocateScratchGPR();
1171         }
1172
1173         ScratchRegisterAllocator::PreservedState preservedState =
1174             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1175         
1176         CCallHelpers::JumpList slowPath;
1177
1178         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1179
1180         if (allocating) {
1181             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1182             
1183             if (allocatingInline) {
1184                 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1185
1186                 if (!reallocating) {
1187                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1188                     slowPath.append(
1189                         jit.branchSubPtr(
1190                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1191                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1192                     jit.negPtr(scratchGPR);
1193                     jit.addPtr(
1194                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1195                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1196                 } else {
1197                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1198                     // already had out-of-line property storage).
1199                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1200                     ASSERT(newSize > oldSize);
1201             
1202                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1203                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1204                     slowPath.append(
1205                         jit.branchSubPtr(
1206                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1207                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1208                     jit.negPtr(scratchGPR);
1209                     jit.addPtr(
1210                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1211                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1212                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1213                     // scratchGPR2 = available
1214                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1215                         jit.loadPtr(
1216                             CCallHelpers::Address(
1217                                 scratchGPR3,
1218                                 -static_cast<ptrdiff_t>(
1219                                     offset + sizeof(JSValue) + sizeof(void*))),
1220                             scratchGPR2);
1221                         jit.storePtr(
1222                             scratchGPR2,
1223                             CCallHelpers::Address(
1224                                 scratchGPR,
1225                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1226                     }
1227                 }
1228             } else {
1229                 // Handle the case where we are allocating out-of-line using an operation.
1230                 RegisterSet extraRegistersToPreserve;
1231                 extraRegistersToPreserve.set(baseGPR);
1232                 extraRegistersToPreserve.set(valueRegs);
1233                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1234                 
1235                 jit.store32(
1236                     CCallHelpers::TrustedImm32(
1237                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1238                     CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1239                 
1240                 jit.makeSpaceOnStackForCCall();
1241                 
1242                 if (!reallocating) {
1243                     jit.setupArgumentsWithExecState(baseGPR);
1244                     
1245                     CCallHelpers::Call operationCall = jit.call();
1246                     jit.addLinkTask(
1247                         [=] (LinkBuffer& linkBuffer) {
1248                             linkBuffer.link(
1249                                 operationCall,
1250                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1251                         });
1252                 } else {
1253                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1254                     // already had out-of-line property storage).
1255                     jit.setupArgumentsWithExecState(
1256                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1257                     
1258                     CCallHelpers::Call operationCall = jit.call();
1259                     jit.addLinkTask(
1260                         [=] (LinkBuffer& linkBuffer) {
1261                             linkBuffer.link(
1262                                 operationCall,
1263                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1264                         });
1265                 }
1266                 
1267                 jit.reclaimSpaceOnStackForCCall();
1268                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1269                 
1270                 CCallHelpers::Jump noException =
1271                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1272                 
1273                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1274                 state.emitExplicitExceptionHandler();
1275                 
1276                 noException.link(&jit);
1277                 state.restoreLiveRegistersFromStackForCall();
1278             }
1279         }
1280
1281         if (isInlineOffset(m_offset)) {
1282             jit.storeValue(
1283                 valueRegs,
1284                 CCallHelpers::Address(
1285                     baseGPR,
1286                     JSObject::offsetOfInlineStorage() +
1287                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1288         } else {
1289             if (!allocating)
1290                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1291             jit.storeValue(
1292                 valueRegs,
1293                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1294         }
1295         
1296         // If we had allocated using an operation then we would have already executed the store
1297         // barrier and we would have already stored the butterfly into the object.
1298         if (allocatingInline) {
1299             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1300             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1301             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1302             slowPath.append(
1303                 jit.branch32(
1304                     CCallHelpers::AboveOrEqual, scratchGPR2,
1305                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1306             
1307             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1308             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1309             
1310             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1311             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1312             jit.storePtr(
1313                 baseGPR,
1314                 CCallHelpers::BaseIndex(
1315                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1316                     static_cast<int32_t>(-sizeof(void*))));
1317             ownerIsRememberedOrInEden.link(&jit);
1318             
1319             // We set the new butterfly and the structure last. Doing it this way ensures that
1320             // whatever we had done up to this point is forgotten if we choose to branch to slow
1321             // path.
1322             
1323             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1324         }
1325         
1326         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1327         jit.store32(
1328             CCallHelpers::TrustedImm32(structureBits),
1329             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1330
1331         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1332         state.succeed();
1333         
1334         // We will have a slow path if we were allocating without the help of an operation.
1335         if (allocatingInline) {
1336             if (allocator.didReuseRegisters()) {
1337                 slowPath.link(&jit);
1338                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1339                 state.failAndIgnore.append(jit.jump());
1340             } else
1341                 state.failAndIgnore.append(slowPath);
1342         } else
1343             RELEASE_ASSERT(slowPath.empty());
1344         return;
1345     }
1346
1347     case ArrayLength: {
1348         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1349         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1350         state.failAndIgnore.append(
1351             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1352         jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1353         state.succeed();
1354         return;
1355     }
1356
1357     case StringLength: {
1358         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1359         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1360         state.succeed();
1361         return;
1362     }
1363         
1364     case IntrinsicGetter: {
1365         RELEASE_ASSERT(isValidOffset(offset()));
1366
1367         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1368         // are immutable so we just need to watch the property not any value inside it.
1369         Structure* currStructure;
1370         if (m_conditionSet.isEmpty())
1371             currStructure = structure();
1372         else
1373             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1374         currStructure->startWatchingPropertyForReplacements(vm, offset());
1375
1376         emitIntrinsicGetter(state);
1377         return;
1378     }
1379
1380     case DirectArgumentsLength:
1381     case ScopedArgumentsLength:
1382     case MegamorphicLoad:
1383         // These need to be handled by generateWithGuard(), since the guard is part of the
1384         // algorithm. We can be sure that nobody will call generate() directly for these since they
1385         // are not guarded by structure checks.
1386         RELEASE_ASSERT_NOT_REACHED();
1387     }
1388     
1389     RELEASE_ASSERT_NOT_REACHED();
1390 }
1391
1392 PolymorphicAccess::PolymorphicAccess() { }
1393 PolymorphicAccess::~PolymorphicAccess() { }
1394
1395 AccessGenerationResult PolymorphicAccess::addCases(
1396     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1397     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1398 {
1399     SuperSamplerScope superSamplerScope(false);
1400     
1401     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1402     // invariants:
1403     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1404     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1405     //   can be removed via the canReplace() rule.
1406     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1407     //   cascade through the cases in reverse order, you will get the most recent cases first.
1408     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1409     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1410     //   add more things after failure.
1411     
1412     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1413     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1414     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1415         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1416
1417         // Add it only if it is not replaced by the subsequent cases in the list.
1418         bool found = false;
1419         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1420             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1421                 found = true;
1422                 break;
1423             }
1424         }
1425
1426         if (found)
1427             continue;
1428         
1429         casesToAdd.append(WTFMove(myCase));
1430     }
1431
1432     if (verbose)
1433         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1434
1435     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1436     // new stub that will be identical to the old one. Returning null should tell the caller to just
1437     // keep doing what they were doing before.
1438     if (casesToAdd.isEmpty())
1439         return AccessGenerationResult::MadeNoChanges;
1440
1441     // Now add things to the new list. Note that at this point, we will still have old cases that
1442     // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1443     for (auto& caseToAdd : casesToAdd) {
1444         commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1445         m_list.append(WTFMove(caseToAdd));
1446     }
1447     
1448     if (verbose)
1449         dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1450
1451     return AccessGenerationResult::Buffered;
1452 }
1453
1454 AccessGenerationResult PolymorphicAccess::addCase(
1455     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1456     std::unique_ptr<AccessCase> newAccess)
1457 {
1458     Vector<std::unique_ptr<AccessCase>> newAccesses;
1459     newAccesses.append(WTFMove(newAccess));
1460     return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1461 }
1462
1463 bool PolymorphicAccess::visitWeak(VM& vm) const
1464 {
1465     for (unsigned i = 0; i < size(); ++i) {
1466         if (!at(i).visitWeak(vm))
1467             return false;
1468     }
1469     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1470         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1471             if (!Heap::isMarked(weakReference.get()))
1472                 return false;
1473         }
1474     }
1475     return true;
1476 }
1477
1478 void PolymorphicAccess::dump(PrintStream& out) const
1479 {
1480     out.print(RawPointer(this), ":[");
1481     CommaPrinter comma;
1482     for (auto& entry : m_list)
1483         out.print(comma, *entry);
1484     out.print("]");
1485 }
1486
1487 void PolymorphicAccess::commit(
1488     VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1489     StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1490 {
1491     // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1492     // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1493     // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1494     // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1495     // Those common kinds of JSC object accesses don't hit this case.
1496     
1497     for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1498         Watchpoint* watchpoint =
1499             WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1500                 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1501         
1502         set->add(watchpoint);
1503     }
1504 }
1505
1506 AccessGenerationResult PolymorphicAccess::regenerate(
1507     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1508 {
1509     SuperSamplerScope superSamplerScope(false);
1510     
1511     if (verbose)
1512         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1513     
1514     AccessGenerationState state;
1515
1516     state.access = this;
1517     state.stubInfo = &stubInfo;
1518     state.ident = &ident;
1519     
1520     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1521     state.valueRegs = JSValueRegs(
1522 #if USE(JSVALUE32_64)
1523         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1524 #endif
1525         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1526
1527     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1528     state.allocator = &allocator;
1529     allocator.lock(state.baseGPR);
1530     allocator.lock(state.valueRegs);
1531 #if USE(JSVALUE32_64)
1532     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1533 #endif
1534
1535     state.scratchGPR = allocator.allocateScratchGPR();
1536     
1537     CCallHelpers jit(&vm, codeBlock);
1538     state.jit = &jit;
1539
1540     state.preservedReusedRegisterState =
1541         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1542
1543     // Regenerating is our opportunity to figure out what our list of cases should look like. We
1544     // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1545     // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1546     // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1547     // from the code of the current stub (aka previous).
1548     ListType cases;
1549     for (unsigned i = 0; i < m_list.size(); ++i) {
1550         AccessCase& someCase = *m_list[i];
1551         // Ignore cases that cannot possibly succeed anymore.
1552         if (!someCase.couldStillSucceed())
1553             continue;
1554         
1555         // Figure out if this is replaced by any later case.
1556         bool found = false;
1557         for (unsigned j = i + 1; j < m_list.size(); ++j) {
1558             if (m_list[j]->canReplace(someCase)) {
1559                 found = true;
1560                 break;
1561             }
1562         }
1563         if (found)
1564             continue;
1565         
1566         // FIXME: Do we have to clone cases that aren't generated? Maybe we can just take those
1567         // from m_list, since we don't have to keep those alive if we fail.
1568         // https://bugs.webkit.org/show_bug.cgi?id=156493
1569         cases.append(someCase.clone());
1570     }
1571     
1572     if (verbose)
1573         dataLog("In regenerate: cases: ", listDump(cases), "\n");
1574     
1575     // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1576     // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1577     // size. It would be faster to just allow more repatching with many load cases, and avoid the
1578     // megamorphicLoad optimization, if we had infinite executable memory.
1579     if (cases.size() >= Options::megamorphicLoadCost()) {
1580         unsigned numSelfLoads = 0;
1581         for (auto& newCase : cases) {
1582             if (newCase->canBeReplacedByMegamorphicLoad())
1583                 numSelfLoads++;
1584         }
1585         
1586         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1587             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1588                 cases.removeAllMatching(
1589                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1590                         return newCase->canBeReplacedByMegamorphicLoad();
1591                     });
1592                 
1593                 cases.append(WTFMove(mega));
1594             }
1595         }
1596     }
1597     
1598     if (verbose)
1599         dataLog("Optimized cases: ", listDump(cases), "\n");
1600     
1601     // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1602     // won't change that set anymore.
1603     
1604     bool allGuardedByStructureCheck = true;
1605     bool hasJSGetterSetterCall = false;
1606     for (auto& newCase : cases) {
1607         commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1608         allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1609         if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1610             hasJSGetterSetterCall = true;
1611     }
1612
1613     if (cases.isEmpty()) {
1614         // This is super unlikely, but we make it legal anyway.
1615         state.failAndRepatch.append(jit.jump());
1616     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1617         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1618         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1619         // one case.
1620         CCallHelpers::JumpList fallThrough;
1621
1622         // Cascade through the list, preferring newer entries.
1623         for (unsigned i = cases.size(); i--;) {
1624             fallThrough.link(&jit);
1625             cases[i]->generateWithGuard(state, fallThrough);
1626         }
1627         state.failAndRepatch.append(fallThrough);
1628     } else {
1629         jit.load32(
1630             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1631             state.scratchGPR);
1632         
1633         Vector<int64_t> caseValues(cases.size());
1634         for (unsigned i = 0; i < cases.size(); ++i)
1635             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1636         
1637         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1638         while (binarySwitch.advance(jit))
1639             cases[binarySwitch.caseIndex()]->generate(state);
1640         state.failAndRepatch.append(binarySwitch.fallThrough());
1641     }
1642
1643     if (!state.failAndIgnore.empty()) {
1644         state.failAndIgnore.link(&jit);
1645         
1646         // Make sure that the inline cache optimization code knows that we are taking slow path because
1647         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1648         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1649         // that the slow path does not try to patch.
1650         jit.load8(&stubInfo.countdown, state.scratchGPR);
1651         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1652         jit.store8(state.scratchGPR, &stubInfo.countdown);
1653     }
1654
1655     CCallHelpers::JumpList failure;
1656     if (allocator.didReuseRegisters()) {
1657         state.failAndRepatch.link(&jit);
1658         state.restoreScratch();
1659     } else
1660         failure = state.failAndRepatch;
1661     failure.append(jit.jump());
1662
1663     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1664     CallSiteIndex callSiteIndexForExceptionHandling;
1665     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1666         // Emit the exception handler.
1667         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1668         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1669         // their own exception handling logic that doesn't go through genericUnwind.
1670         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1671
1672         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1673         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1674         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1675
1676         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1677         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1678
1679         state.restoreLiveRegistersFromStackForCallWithThrownException();
1680         state.restoreScratch();
1681         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1682
1683         HandlerInfo oldHandler = state.originalExceptionHandler();
1684         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1685         jit.addLinkTask(
1686             [=] (LinkBuffer& linkBuffer) {
1687                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1688
1689                 HandlerInfo handlerToRegister = oldHandler;
1690                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1691                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1692                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1693                 codeBlock->appendExceptionHandler(handlerToRegister);
1694             });
1695
1696         // We set these to indicate to the stub to remove itself from the CodeBlock's
1697         // exception handler table when it is deallocated.
1698         codeBlockThatOwnsExceptionHandlers = codeBlock;
1699         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1700         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1701     }
1702
1703     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1704     if (linkBuffer.didFailToAllocate()) {
1705         if (verbose)
1706             dataLog("Did fail to allocate.\n");
1707         return AccessGenerationResult::GaveUp;
1708     }
1709
1710     CodeLocationLabel successLabel =
1711         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1712         
1713     linkBuffer.link(state.success, successLabel);
1714
1715     linkBuffer.link(
1716         failure,
1717         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1718     
1719     if (verbose)
1720         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1721
1722     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1723         codeBlock, linkBuffer,
1724         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1725
1726     bool doesCalls = false;
1727     Vector<JSCell*> cellsToMark;
1728     for (auto& entry : cases)
1729         doesCalls |= entry->doesCalls(&cellsToMark);
1730     
1731     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1732     m_watchpoints = WTFMove(state.watchpoints);
1733     if (!state.weakReferences.isEmpty())
1734         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1735     if (verbose)
1736         dataLog("Returning: ", code.code(), "\n");
1737     
1738     m_list = WTFMove(cases);
1739     
1740     AccessGenerationResult::Kind resultKind;
1741     if (m_list.size() >= Options::maxAccessVariantListSize())
1742         resultKind = AccessGenerationResult::GeneratedFinalCode;
1743     else
1744         resultKind = AccessGenerationResult::GeneratedNewCode;
1745     
1746     return AccessGenerationResult(resultKind, code.code());
1747 }
1748
1749 void PolymorphicAccess::aboutToDie()
1750 {
1751     if (m_stubRoutine)
1752         m_stubRoutine->aboutToDie();
1753 }
1754
1755 } // namespace JSC
1756
1757 namespace WTF {
1758
1759 using namespace JSC;
1760
1761 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1762 {
1763     switch (kind) {
1764     case AccessGenerationResult::MadeNoChanges:
1765         out.print("MadeNoChanges");
1766         return;
1767     case AccessGenerationResult::GaveUp:
1768         out.print("GaveUp");
1769         return;
1770     case AccessGenerationResult::Buffered:
1771         out.print("Buffered");
1772         return;
1773     case AccessGenerationResult::GeneratedNewCode:
1774         out.print("GeneratedNewCode");
1775         return;
1776     case AccessGenerationResult::GeneratedFinalCode:
1777         out.print("GeneratedFinalCode");
1778         return;
1779     }
1780     
1781     RELEASE_ASSERT_NOT_REACHED();
1782 }
1783
1784 void printInternal(PrintStream& out, AccessCase::AccessType type)
1785 {
1786     switch (type) {
1787     case AccessCase::Load:
1788         out.print("Load");
1789         return;
1790     case AccessCase::MegamorphicLoad:
1791         out.print("MegamorphicLoad");
1792         return;
1793     case AccessCase::Transition:
1794         out.print("Transition");
1795         return;
1796     case AccessCase::Replace:
1797         out.print("Replace");
1798         return;
1799     case AccessCase::Miss:
1800         out.print("Miss");
1801         return;
1802     case AccessCase::GetGetter:
1803         out.print("GetGetter");
1804         return;
1805     case AccessCase::Getter:
1806         out.print("Getter");
1807         return;
1808     case AccessCase::Setter:
1809         out.print("Setter");
1810         return;
1811     case AccessCase::CustomValueGetter:
1812         out.print("CustomValueGetter");
1813         return;
1814     case AccessCase::CustomAccessorGetter:
1815         out.print("CustomAccessorGetter");
1816         return;
1817     case AccessCase::CustomValueSetter:
1818         out.print("CustomValueSetter");
1819         return;
1820     case AccessCase::CustomAccessorSetter:
1821         out.print("CustomAccessorSetter");
1822         return;
1823     case AccessCase::IntrinsicGetter:
1824         out.print("IntrinsicGetter");
1825         return;
1826     case AccessCase::InHit:
1827         out.print("InHit");
1828         return;
1829     case AccessCase::InMiss:
1830         out.print("InMiss");
1831         return;
1832     case AccessCase::ArrayLength:
1833         out.print("ArrayLength");
1834         return;
1835     case AccessCase::StringLength:
1836         out.print("StringLength");
1837         return;
1838     case AccessCase::DirectArgumentsLength:
1839         out.print("DirectArgumentsLength");
1840         return;
1841     case AccessCase::ScopedArgumentsLength:
1842         out.print("ScopedArgumentsLength");
1843         return;
1844     }
1845
1846     RELEASE_ASSERT_NOT_REACHED();
1847 }
1848
1849 void printInternal(PrintStream& out, AccessCase::State state)
1850 {
1851     switch (state) {
1852     case AccessCase::Primordial:
1853         out.print("Primordial");
1854         return;
1855     case AccessCase::Committed:
1856         out.print("Committed");
1857         return;
1858     case AccessCase::Generated:
1859         out.print("Generated");
1860         return;
1861     }
1862
1863     RELEASE_ASSERT_NOT_REACHED();
1864 }
1865
1866 } // namespace WTF
1867
1868 #endif // ENABLE(JIT)
1869
1870