5caca06d05f1a947e7304c62b2e86a0dcf7746f2
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 void AccessGenerationResult::dump(PrintStream& out) const
52 {
53     out.print(m_kind);
54     if (m_code)
55         out.print(":", m_code);
56 }
57
58 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
59 {
60     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61         watchpoints, jit->codeBlock(), stubInfo, condition);
62 }
63
64 void AccessGenerationState::restoreScratch()
65 {
66     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67 }
68
69 void AccessGenerationState::succeed()
70 {
71     restoreScratch();
72     success.append(jit->jump());
73 }
74
75 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
76 {
77     if (!m_calculatedRegistersForCallAndExceptionHandling) {
78         m_calculatedRegistersForCallAndExceptionHandling = true;
79
80         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
81         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
82         if (m_needsToRestoreRegistersIfException)
83             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
84
85         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
86         m_liveRegistersForCall.merge(extra);
87         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
88         m_liveRegistersForCall.merge(extra);
89     }
90 }
91
92 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
93 {
94     calculateLiveRegistersForCallAndExceptionHandling(extra);
95     
96     unsigned extraStackPadding = 0;
97     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
98     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
99         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
100     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
101 }
102
103 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
104 {
105     RegisterSet dontRestore;
106     if (isGetter) {
107         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
108         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
109         dontRestore.set(valueRegs);
110     }
111     restoreLiveRegistersFromStackForCall(dontRestore);
112 }
113
114 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
115 {
116     // Even if we're a getter, we don't want to ignore the result value like we normally do
117     // because the getter threw, and therefore, didn't return a value that means anything.
118     // Instead, we want to restore that register to what it was upon entering the getter
119     // inline cache. The subtlety here is if the base and the result are the same register,
120     // and the getter threw, we want OSR exit to see the original base value, not the result
121     // of the getter call.
122     RegisterSet dontRestore = liveRegistersForCall();
123     // As an optimization here, we only need to restore what is live for exception handling.
124     // We can construct the dontRestore set to accomplish this goal by having it contain only
125     // what is live for call but not live for exception handling. By ignoring things that are
126     // only live at the call but not the exception handler, we will only restore things live
127     // at the exception handler.
128     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
129     restoreLiveRegistersFromStackForCall(dontRestore);
130 }
131
132 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
133 {
134     unsigned extraStackPadding = 0;
135     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
136 }
137
138 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
139 {
140     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
141
142     if (!m_calculatedCallSiteIndex) {
143         m_calculatedCallSiteIndex = true;
144
145         if (m_needsToRestoreRegistersIfException)
146             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
147         else
148             m_callSiteIndex = originalCallSiteIndex();
149     }
150
151     return m_callSiteIndex;
152 }
153
154 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
155 {
156     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
157     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
158     RELEASE_ASSERT(exceptionHandler);
159     return *exceptionHandler;
160 }
161
162 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
163
164 void AccessGenerationState::emitExplicitExceptionHandler()
165 {
166     restoreScratch();
167     jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
168     if (needsToRestoreRegistersIfException()) {
169         // To the JIT that produces the original exception handling
170         // call site, they will expect the OSR exit to be arrived
171         // at from genericUnwind. Therefore we must model what genericUnwind
172         // does here. I.e, set callFrameForCatch and copy callee saves.
173
174         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
175         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
176
177         // We don't need to insert a new exception handler in the table
178         // because we're doing a manual exception check here. i.e, we'll
179         // never arrive here from genericUnwind().
180         HandlerInfo originalHandler = originalExceptionHandler();
181         jit->addLinkTask(
182             [=] (LinkBuffer& linkBuffer) {
183                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
184             });
185     } else {
186         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
187         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
188         jit->addLinkTask(
189             [=] (LinkBuffer& linkBuffer) {
190                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
191             });
192         jit->jumpToExceptionHandler();
193     }
194 }
195
196 AccessCase::AccessCase()
197 {
198 }
199
200 std::unique_ptr<AccessCase> AccessCase::tryGet(
201     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
202     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
203 {
204     std::unique_ptr<AccessCase> result(new AccessCase());
205
206     result->m_type = type;
207     result->m_offset = offset;
208     result->m_structure.set(vm, owner, structure);
209     result->m_conditionSet = conditionSet;
210
211     if (viaProxy || additionalSet) {
212         result->m_rareData = std::make_unique<RareData>();
213         result->m_rareData->viaProxy = viaProxy;
214         result->m_rareData->additionalSet = additionalSet;
215     }
216
217     return result;
218 }
219
220 std::unique_ptr<AccessCase> AccessCase::get(
221     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
222     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
223     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
224 {
225     std::unique_ptr<AccessCase> result(new AccessCase());
226
227     result->m_type = type;
228     result->m_offset = offset;
229     result->m_structure.set(vm, owner, structure);
230     result->m_conditionSet = conditionSet;
231
232     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
233         result->m_rareData = std::make_unique<RareData>();
234         result->m_rareData->viaProxy = viaProxy;
235         result->m_rareData->additionalSet = additionalSet;
236         result->m_rareData->customAccessor.getter = customGetter;
237         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
238     }
239
240     return result;
241 }
242
243 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
244 {
245     UNUSED_PARAM(vm);
246     UNUSED_PARAM(owner);
247     
248     if (GPRInfo::numberOfRegisters < 9)
249         return nullptr;
250     
251     std::unique_ptr<AccessCase> result(new AccessCase());
252     
253     result->m_type = MegamorphicLoad;
254     
255     return result;
256 }
257
258 std::unique_ptr<AccessCase> AccessCase::replace(
259     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
260 {
261     std::unique_ptr<AccessCase> result(new AccessCase());
262
263     result->m_type = Replace;
264     result->m_offset = offset;
265     result->m_structure.set(vm, owner, structure);
266
267     return result;
268 }
269
270 std::unique_ptr<AccessCase> AccessCase::transition(
271     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
272     const ObjectPropertyConditionSet& conditionSet)
273 {
274     RELEASE_ASSERT(oldStructure == newStructure->previousID());
275
276     // Skip optimizing the case where we need a realloc, if we don't have
277     // enough registers to make it happen.
278     if (GPRInfo::numberOfRegisters < 6
279         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
280         && oldStructure->outOfLineCapacity()) {
281         return nullptr;
282     }
283
284     std::unique_ptr<AccessCase> result(new AccessCase());
285
286     result->m_type = Transition;
287     result->m_offset = offset;
288     result->m_structure.set(vm, owner, newStructure);
289     result->m_conditionSet = conditionSet;
290
291     return result;
292 }
293
294 std::unique_ptr<AccessCase> AccessCase::setter(
295     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
296     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
297     JSObject* customSlotBase)
298 {
299     std::unique_ptr<AccessCase> result(new AccessCase());
300
301     result->m_type = type;
302     result->m_offset = offset;
303     result->m_structure.set(vm, owner, structure);
304     result->m_conditionSet = conditionSet;
305     result->m_rareData = std::make_unique<RareData>();
306     result->m_rareData->customAccessor.setter = customSetter;
307     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
308
309     return result;
310 }
311
312 std::unique_ptr<AccessCase> AccessCase::in(
313     VM& vm, JSCell* owner, AccessType type, Structure* structure,
314     const ObjectPropertyConditionSet& conditionSet)
315 {
316     std::unique_ptr<AccessCase> result(new AccessCase());
317
318     result->m_type = type;
319     result->m_structure.set(vm, owner, structure);
320     result->m_conditionSet = conditionSet;
321
322     return result;
323 }
324
325 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
326 {
327     std::unique_ptr<AccessCase> result(new AccessCase());
328
329     result->m_type = type;
330
331     return result;
332 }
333
334 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
335     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
336     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
337 {
338     std::unique_ptr<AccessCase> result(new AccessCase());
339
340     result->m_type = IntrinsicGetter;
341     result->m_structure.set(vm, owner, structure);
342     result->m_conditionSet = conditionSet;
343     result->m_offset = offset;
344
345     result->m_rareData = std::make_unique<RareData>();
346     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
347
348     return result;
349 }
350
351 AccessCase::~AccessCase()
352 {
353 }
354
355 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
356     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
357 {
358     switch (stubInfo.cacheType) {
359     case CacheType::GetByIdSelf:
360         return get(
361             vm, owner, Load, stubInfo.u.byIdSelf.offset,
362             stubInfo.u.byIdSelf.baseObjectStructure.get());
363
364     case CacheType::PutByIdReplace:
365         return replace(
366             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
367
368     default:
369         return nullptr;
370     }
371 }
372
373 std::unique_ptr<AccessCase> AccessCase::clone() const
374 {
375     std::unique_ptr<AccessCase> result(new AccessCase());
376     result->m_type = m_type;
377     result->m_offset = m_offset;
378     result->m_structure = m_structure;
379     result->m_conditionSet = m_conditionSet;
380     if (RareData* rareData = m_rareData.get()) {
381         result->m_rareData = std::make_unique<RareData>();
382         result->m_rareData->viaProxy = rareData->viaProxy;
383         result->m_rareData->additionalSet = rareData->additionalSet;
384         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
385         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
386         result->m_rareData->customSlotBase = rareData->customSlotBase;
387         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
388     }
389     return result;
390 }
391
392 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
393 {
394     // It's fine to commit something that is already committed. That arises when we switch to using
395     // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
396     // because most AccessCases have no extra watchpoints anyway.
397     RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
398     
399     Vector<WatchpointSet*, 2> result;
400     
401     if ((structure() && structure()->needImpurePropertyWatchpoint())
402         || m_conditionSet.needImpurePropertyWatchpoint())
403         result.append(vm.ensureWatchpointSetForImpureProperty(ident));
404
405     if (additionalSet())
406         result.append(additionalSet());
407     
408     m_state = Committed;
409     
410     return result;
411 }
412
413 bool AccessCase::guardedByStructureCheck() const
414 {
415     if (viaProxy())
416         return false;
417
418     switch (m_type) {
419     case MegamorphicLoad:
420     case ArrayLength:
421     case StringLength:
422     case DirectArgumentsLength:
423     case ScopedArgumentsLength:
424         return false;
425     default:
426         return true;
427     }
428 }
429
430 JSObject* AccessCase::alternateBase() const
431 {
432     if (customSlotBase())
433         return customSlotBase();
434     return conditionSet().slotBaseCondition().object();
435 }
436
437 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
438 {
439     switch (type()) {
440     case Getter:
441     case Setter:
442     case CustomValueGetter:
443     case CustomAccessorGetter:
444     case CustomValueSetter:
445     case CustomAccessorSetter:
446         return true;
447     case Transition:
448         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
449             && structure()->couldHaveIndexingHeader()) {
450             if (cellsToMark)
451                 cellsToMark->append(newStructure());
452             return true;
453         }
454         return false;
455     default:
456         return false;
457     }
458 }
459
460 bool AccessCase::couldStillSucceed() const
461 {
462     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
463 }
464
465 bool AccessCase::canBeReplacedByMegamorphicLoad() const
466 {
467     if (type() == MegamorphicLoad)
468         return true;
469     
470     return type() == Load
471         && !viaProxy()
472         && conditionSet().isEmpty()
473         && !additionalSet()
474         && !customSlotBase();
475 }
476
477 bool AccessCase::canReplace(const AccessCase& other) const
478 {
479     // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
480     // It's fine for this to return false if it's in doubt.
481
482     switch (type()) {
483     case MegamorphicLoad:
484         return other.canBeReplacedByMegamorphicLoad();
485     case ArrayLength:
486     case StringLength:
487     case DirectArgumentsLength:
488     case ScopedArgumentsLength:
489         return other.type() == type();
490     default:
491         if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
492             return false;
493         
494         return structure() == other.structure();
495     }
496 }
497
498 void AccessCase::dump(PrintStream& out) const
499 {
500     out.print(m_type, ":(");
501
502     CommaPrinter comma;
503     
504     out.print(comma, m_state);
505
506     if (m_type == Transition)
507         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
508     else if (m_structure)
509         out.print(comma, "structure = ", pointerDump(m_structure.get()));
510
511     if (isValidOffset(m_offset))
512         out.print(comma, "offset = ", m_offset);
513     if (!m_conditionSet.isEmpty())
514         out.print(comma, "conditions = ", m_conditionSet);
515
516     if (RareData* rareData = m_rareData.get()) {
517         if (rareData->viaProxy)
518             out.print(comma, "viaProxy = ", rareData->viaProxy);
519         if (rareData->additionalSet)
520             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
521         if (rareData->callLinkInfo)
522             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
523         if (rareData->customAccessor.opaque)
524             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
525         if (rareData->customSlotBase)
526             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
527     }
528
529     out.print(")");
530 }
531
532 bool AccessCase::visitWeak(VM& vm) const
533 {
534     if (m_structure && !Heap::isMarked(m_structure.get()))
535         return false;
536     if (!m_conditionSet.areStillLive())
537         return false;
538     if (m_rareData) {
539         if (m_rareData->callLinkInfo)
540             m_rareData->callLinkInfo->visitWeak(vm);
541         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
542             return false;
543         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
544             return false;
545     }
546     return true;
547 }
548
549 bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
550 {
551     bool result = true;
552     
553     if (m_structure)
554         result &= m_structure->markIfCheap(visitor);
555     
556     switch (m_type) {
557     case Transition:
558         if (Heap::isMarked(m_structure->previousID()))
559             visitor.appendUnbarrieredReadOnlyPointer(m_structure.get());
560         else
561             result = false;
562         break;
563     default:
564         break;
565     }
566     
567     return result;
568 }
569
570 void AccessCase::generateWithGuard(
571     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
572 {
573     SuperSamplerScope superSamplerScope(false);
574
575     RELEASE_ASSERT(m_state == Committed);
576     m_state = Generated;
577     
578     CCallHelpers& jit = *state.jit;
579     VM& vm = *jit.vm();
580     const Identifier& ident = *state.ident;
581     StructureStubInfo& stubInfo = *state.stubInfo;
582     JSValueRegs valueRegs = state.valueRegs;
583     GPRReg baseGPR = state.baseGPR;
584     GPRReg scratchGPR = state.scratchGPR;
585     
586     UNUSED_PARAM(vm);
587
588     switch (m_type) {
589     case ArrayLength: {
590         ASSERT(!viaProxy());
591         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
592         fallThrough.append(
593             jit.branchTest32(
594                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
595         fallThrough.append(
596             jit.branchTest32(
597                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
598         break;
599     }
600
601     case StringLength: {
602         ASSERT(!viaProxy());
603         fallThrough.append(
604             jit.branch8(
605                 CCallHelpers::NotEqual,
606                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
607                 CCallHelpers::TrustedImm32(StringType)));
608         break;
609     }
610         
611     case DirectArgumentsLength: {
612         ASSERT(!viaProxy());
613         fallThrough.append(
614             jit.branch8(
615                 CCallHelpers::NotEqual,
616                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
617                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
618
619         fallThrough.append(
620             jit.branchTestPtr(
621                 CCallHelpers::NonZero,
622                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
623         jit.load32(
624             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
625             valueRegs.payloadGPR());
626         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
627         state.succeed();
628         return;
629     }
630         
631     case ScopedArgumentsLength: {
632         ASSERT(!viaProxy());
633         fallThrough.append(
634             jit.branch8(
635                 CCallHelpers::NotEqual,
636                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
637                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
638
639         fallThrough.append(
640             jit.branchTest8(
641                 CCallHelpers::NonZero,
642                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
643         jit.load32(
644             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
645             valueRegs.payloadGPR());
646         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
647         state.succeed();
648         return;
649     }
650         
651     case MegamorphicLoad: {
652         UniquedStringImpl* key = ident.impl();
653         unsigned hash = IdentifierRepHash::hash(key);
654         
655         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
656         allocator.lock(baseGPR);
657 #if USE(JSVALUE32_64)
658         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
659 #endif
660         allocator.lock(valueRegs);
661         allocator.lock(scratchGPR);
662         
663         GPRReg intermediateGPR = scratchGPR;
664         GPRReg maskGPR = allocator.allocateScratchGPR();
665         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
666         GPRReg indexGPR = allocator.allocateScratchGPR();
667         GPRReg offsetGPR = allocator.allocateScratchGPR();
668         
669         if (verbose) {
670             dataLog("baseGPR = ", baseGPR, "\n");
671             dataLog("valueRegs = ", valueRegs, "\n");
672             dataLog("scratchGPR = ", scratchGPR, "\n");
673             dataLog("intermediateGPR = ", intermediateGPR, "\n");
674             dataLog("maskGPR = ", maskGPR, "\n");
675             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
676             dataLog("indexGPR = ", indexGPR, "\n");
677             dataLog("offsetGPR = ", offsetGPR, "\n");
678         }
679
680         ScratchRegisterAllocator::PreservedState preservedState =
681             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
682
683         CCallHelpers::JumpList myFailAndIgnore;
684         CCallHelpers::JumpList myFallThrough;
685         
686         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
687         jit.loadPtr(
688             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
689             intermediateGPR);
690         
691         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
692         
693         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
694         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
695         jit.load32(
696             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
697             intermediateGPR);
698
699         jit.move(maskGPR, maskedHashGPR);
700         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
701         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
702         jit.addPtr(indexGPR, intermediateGPR);
703         
704         CCallHelpers::Label loop = jit.label();
705         
706         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
707         
708         myFallThrough.append(
709             jit.branch32(
710                 CCallHelpers::Equal,
711                 offsetGPR,
712                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
713         
714         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
715         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
716         jit.addPtr(intermediateGPR, offsetGPR);
717         
718         CCallHelpers::Jump collision =  jit.branchPtr(
719             CCallHelpers::NotEqual,
720             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
721             CCallHelpers::TrustedImmPtr(key));
722         
723         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
724         // Check them and then attempt the load.
725         
726         myFallThrough.append(
727             jit.branchTest32(
728                 CCallHelpers::NonZero,
729                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
730                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
731         
732         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
733         
734         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
735         
736         allocator.restoreReusedRegistersByPopping(jit, preservedState);
737         state.succeed();
738         
739         collision.link(&jit);
740
741         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
742         
743         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
744         // around isn't super common so we could, for example, recompute the mask from the difference between
745         // the table and index. But before we do that we should probably make it easier to multiply and
746         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
747         // to have a power-of-2 size.
748         jit.and32(maskGPR, maskedHashGPR);
749         jit.jump().linkTo(loop, &jit);
750         
751         if (allocator.didReuseRegisters()) {
752             myFailAndIgnore.link(&jit);
753             allocator.restoreReusedRegistersByPopping(jit, preservedState);
754             state.failAndIgnore.append(jit.jump());
755             
756             myFallThrough.link(&jit);
757             allocator.restoreReusedRegistersByPopping(jit, preservedState);
758             fallThrough.append(jit.jump());
759         } else {
760             state.failAndIgnore.append(myFailAndIgnore);
761             fallThrough.append(myFallThrough);
762         }
763         return;
764     }
765
766     default: {
767         if (viaProxy()) {
768             fallThrough.append(
769                 jit.branch8(
770                     CCallHelpers::NotEqual,
771                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
772                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
773
774             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
775
776             fallThrough.append(
777                 jit.branchStructure(
778                     CCallHelpers::NotEqual,
779                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
780                     structure()));
781         } else {
782             fallThrough.append(
783                 jit.branchStructure(
784                     CCallHelpers::NotEqual,
785                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
786                     structure()));
787         }
788         break;
789     } };
790
791     generateImpl(state);
792 }
793
794 void AccessCase::generate(AccessGenerationState& state)
795 {
796     RELEASE_ASSERT(m_state == Committed);
797     m_state = Generated;
798     
799     generateImpl(state);
800 }
801
802 void AccessCase::generateImpl(AccessGenerationState& state)
803 {
804     SuperSamplerScope superSamplerScope(false);
805     if (verbose)
806         dataLog("Generating code for: ", *this, "\n");
807     
808     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
809     
810     CCallHelpers& jit = *state.jit;
811     VM& vm = *jit.vm();
812     CodeBlock* codeBlock = jit.codeBlock();
813     StructureStubInfo& stubInfo = *state.stubInfo;
814     const Identifier& ident = *state.ident;
815     JSValueRegs valueRegs = state.valueRegs;
816     GPRReg baseGPR = state.baseGPR;
817     GPRReg scratchGPR = state.scratchGPR;
818
819     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
820
821     for (const ObjectPropertyCondition& condition : m_conditionSet) {
822         Structure* structure = condition.object()->structure();
823
824         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
825             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
826             continue;
827         }
828
829         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
830             // The reason why this cannot happen is that we require that PolymorphicAccess calls
831             // AccessCase::generate() only after it has verified that
832             // AccessCase::couldStillSucceed() returned true.
833             
834             dataLog("This condition is no longer met: ", condition, "\n");
835             RELEASE_ASSERT_NOT_REACHED();
836         }
837
838         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
839         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
840         
841         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
842         state.failAndRepatch.append(
843             jit.branchStructure(
844                 CCallHelpers::NotEqual,
845                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
846                 structure));
847     }
848
849     switch (m_type) {
850     case InHit:
851     case InMiss:
852         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
853         state.succeed();
854         return;
855
856     case Miss:
857         jit.moveTrustedValue(jsUndefined(), valueRegs);
858         state.succeed();
859         return;
860
861     case Load:
862     case GetGetter:
863     case Getter:
864     case Setter:
865     case CustomValueGetter:
866     case CustomAccessorGetter:
867     case CustomValueSetter:
868     case CustomAccessorSetter: {
869         if (isValidOffset(m_offset)) {
870             Structure* currStructure;
871             if (m_conditionSet.isEmpty())
872                 currStructure = structure();
873             else
874                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
875             currStructure->startWatchingPropertyForReplacements(vm, offset());
876         }
877
878         GPRReg baseForGetGPR;
879         if (viaProxy()) {
880             baseForGetGPR = valueRegs.payloadGPR();
881             jit.loadPtr(
882                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
883                 baseForGetGPR);
884         } else
885             baseForGetGPR = baseGPR;
886
887         GPRReg baseForAccessGPR;
888         if (!m_conditionSet.isEmpty()) {
889             jit.move(
890                 CCallHelpers::TrustedImmPtr(alternateBase()),
891                 scratchGPR);
892             baseForAccessGPR = scratchGPR;
893         } else
894             baseForAccessGPR = baseForGetGPR;
895
896         GPRReg loadedValueGPR = InvalidGPRReg;
897         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
898             if (m_type == Load || m_type == GetGetter)
899                 loadedValueGPR = valueRegs.payloadGPR();
900             else
901                 loadedValueGPR = scratchGPR;
902
903             GPRReg storageGPR;
904             if (isInlineOffset(m_offset))
905                 storageGPR = baseForAccessGPR;
906             else {
907                 jit.loadPtr(
908                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
909                     loadedValueGPR);
910                 storageGPR = loadedValueGPR;
911             }
912
913 #if USE(JSVALUE64)
914             jit.load64(
915                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
916 #else
917             if (m_type == Load || m_type == GetGetter) {
918                 jit.load32(
919                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
920                     valueRegs.tagGPR());
921             }
922             jit.load32(
923                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
924                 loadedValueGPR);
925 #endif
926         }
927
928         if (m_type == Load || m_type == GetGetter) {
929             state.succeed();
930             return;
931         }
932
933         // Stuff for custom getters/setters.
934         CCallHelpers::Call operationCall;
935
936         // Stuff for JS getters/setters.
937         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
938         CCallHelpers::Call fastPathCall;
939         CCallHelpers::Call slowPathCall;
940
941         CCallHelpers::Jump success;
942         CCallHelpers::Jump fail;
943
944         // This also does the necessary calculations of whether or not we're an
945         // exception handling call site.
946         state.preserveLiveRegistersToStackForCall();
947
948         jit.store32(
949             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
950             CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
951
952         if (m_type == Getter || m_type == Setter) {
953             // Create a JS call using a JS call inline cache. Assume that:
954             //
955             // - SP is aligned and represents the extent of the calling compiler's stack usage.
956             //
957             // - FP is set correctly (i.e. it points to the caller's call frame header).
958             //
959             // - SP - FP is an aligned difference.
960             //
961             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
962             //   code.
963             //
964             // Therefore, we temporarily grow the stack for the purpose of the call and then
965             // shrink it after.
966
967             RELEASE_ASSERT(!m_rareData->callLinkInfo);
968             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
969             
970             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
971             // stub, which then jumped back to the main code, then we'd have a reachability
972             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
973             // call stub stayed alive, and it would ensure that the main code stayed alive, but
974             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
975             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
976             // reference to the getter stub.
977             // https://bugs.webkit.org/show_bug.cgi?id=148914
978             m_rareData->callLinkInfo->disallowStubs();
979             
980             m_rareData->callLinkInfo->setUpCall(
981                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
982
983             CCallHelpers::JumpList done;
984
985             // There is a "this" argument.
986             unsigned numberOfParameters = 1;
987             // ... and a value argument if we're calling a setter.
988             if (m_type == Setter)
989                 numberOfParameters++;
990
991             // Get the accessor; if there ain't one then the result is jsUndefined().
992             if (m_type == Setter) {
993                 jit.loadPtr(
994                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
995                     loadedValueGPR);
996             } else {
997                 jit.loadPtr(
998                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
999                     loadedValueGPR);
1000             }
1001
1002             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
1003                 CCallHelpers::Zero, loadedValueGPR);
1004
1005             unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
1006
1007             unsigned numberOfBytesForCall =
1008                 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
1009
1010             unsigned alignedNumberOfBytesForCall =
1011                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
1012
1013             jit.subPtr(
1014                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
1015                 CCallHelpers::stackPointerRegister);
1016
1017             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
1018                 CCallHelpers::stackPointerRegister,
1019                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
1020
1021             jit.store32(
1022                 CCallHelpers::TrustedImm32(numberOfParameters),
1023                 calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
1024
1025             jit.storeCell(
1026                 loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
1027
1028             jit.storeCell(
1029                 baseForGetGPR,
1030                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1031
1032             if (m_type == Setter) {
1033                 jit.storeValue(
1034                     valueRegs,
1035                     calleeFrame.withOffset(
1036                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
1037             }
1038
1039             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1040                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1041                 CCallHelpers::TrustedImmPtr(0));
1042
1043             fastPathCall = jit.nearCall();
1044             if (m_type == Getter)
1045                 jit.setupResults(valueRegs);
1046             done.append(jit.jump());
1047
1048             slowCase.link(&jit);
1049             jit.move(loadedValueGPR, GPRInfo::regT0);
1050 #if USE(JSVALUE32_64)
1051             // We *always* know that the getter/setter, if non-null, is a cell.
1052             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1053 #endif
1054             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1055             slowPathCall = jit.nearCall();
1056             if (m_type == Getter)
1057                 jit.setupResults(valueRegs);
1058             done.append(jit.jump());
1059
1060             returnUndefined.link(&jit);
1061             if (m_type == Getter)
1062                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1063
1064             done.link(&jit);
1065
1066             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1067                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1068             state.restoreLiveRegistersFromStackForCall(isGetter());
1069
1070             jit.addLinkTask(
1071                 [=, &vm] (LinkBuffer& linkBuffer) {
1072                     m_rareData->callLinkInfo->setCallLocations(
1073                         linkBuffer.locationOfNearCall(slowPathCall),
1074                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
1075                         linkBuffer.locationOfNearCall(fastPathCall));
1076
1077                     linkBuffer.link(
1078                         slowPathCall,
1079                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1080                 });
1081         } else {
1082             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1083             // hard to track if someone did spillage or not, so we just assume that we always need
1084             // to make some space here.
1085             jit.makeSpaceOnStackForCCall();
1086
1087             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1088             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1089             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1090             // FIXME: Remove this differences in custom values and custom accessors.
1091             // https://bugs.webkit.org/show_bug.cgi?id=158014
1092             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1093 #if USE(JSVALUE64)
1094             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1095                 jit.setupArgumentsWithExecState(
1096                     baseForCustomValue,
1097                     CCallHelpers::TrustedImmPtr(ident.impl()));
1098             } else
1099                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1100 #else
1101             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1102                 jit.setupArgumentsWithExecState(
1103                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1104                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1105                     CCallHelpers::TrustedImmPtr(ident.impl()));
1106             } else {
1107                 jit.setupArgumentsWithExecState(
1108                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1109                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1110                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1111             }
1112 #endif
1113             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1114
1115             operationCall = jit.call();
1116             jit.addLinkTask(
1117                 [=] (LinkBuffer& linkBuffer) {
1118                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1119                 });
1120
1121             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1122                 jit.setupResults(valueRegs);
1123             jit.reclaimSpaceOnStackForCCall();
1124
1125             CCallHelpers::Jump noException =
1126                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1127
1128             state.restoreLiveRegistersFromStackForCallWithThrownException();
1129             state.emitExplicitExceptionHandler();
1130         
1131             noException.link(&jit);
1132             state.restoreLiveRegistersFromStackForCall(isGetter());
1133         }
1134         state.succeed();
1135         return;
1136     }
1137
1138     case Replace: {
1139         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1140             if (verbose)
1141                 dataLog("Have type: ", type->descriptor(), "\n");
1142             state.failAndRepatch.append(
1143                 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
1144         } else if (verbose)
1145             dataLog("Don't have type.\n");
1146         
1147         if (isInlineOffset(m_offset)) {
1148             jit.storeValue(
1149                 valueRegs,
1150                 CCallHelpers::Address(
1151                     baseGPR,
1152                     JSObject::offsetOfInlineStorage() +
1153                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1154         } else {
1155             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1156             jit.storeValue(
1157                 valueRegs,
1158                 CCallHelpers::Address(
1159                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1160         }
1161         state.succeed();
1162         return;
1163     }
1164
1165     case Transition: {
1166         // AccessCase::transition() should have returned null if this wasn't true.
1167         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1168
1169         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1170             if (verbose)
1171                 dataLog("Have type: ", type->descriptor(), "\n");
1172             state.failAndRepatch.append(
1173                 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
1174         } else if (verbose)
1175             dataLog("Don't have type.\n");
1176         
1177         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1178         // exactly when this would make calls.
1179         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1180         bool reallocating = allocating && structure()->outOfLineCapacity();
1181         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1182
1183         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1184         allocator.lock(baseGPR);
1185 #if USE(JSVALUE32_64)
1186         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1187 #endif
1188         allocator.lock(valueRegs);
1189         allocator.lock(scratchGPR);
1190
1191         GPRReg scratchGPR2 = InvalidGPRReg;
1192         GPRReg scratchGPR3 = InvalidGPRReg;
1193         if (allocatingInline) {
1194             scratchGPR2 = allocator.allocateScratchGPR();
1195             scratchGPR3 = allocator.allocateScratchGPR();
1196         }
1197
1198         ScratchRegisterAllocator::PreservedState preservedState =
1199             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1200         
1201         CCallHelpers::JumpList slowPath;
1202
1203         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1204
1205         if (allocating) {
1206             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1207             
1208             if (allocatingInline) {
1209                 MarkedAllocator* allocator = vm.heap.allocatorForAuxiliaryData(newSize);
1210                 
1211                 if (!allocator) {
1212                     // Yuck, this case would suck!
1213                     slowPath.append(jit.jump());
1214                 }
1215                 
1216                 jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
1217                 jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
1218                 jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
1219                 
1220                 if (reallocating) {
1221                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1222                     // already had out-of-line property storage).
1223                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1224                     ASSERT(newSize > oldSize);
1225             
1226                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1227                     
1228                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1229                     // scratchGPR2 = available
1230                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1231                         jit.loadPtr(
1232                             CCallHelpers::Address(
1233                                 scratchGPR3,
1234                                 -static_cast<ptrdiff_t>(
1235                                     offset + sizeof(JSValue) + sizeof(void*))),
1236                             scratchGPR2);
1237                         jit.storePtr(
1238                             scratchGPR2,
1239                             CCallHelpers::Address(
1240                                 scratchGPR,
1241                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1242                     }
1243                 }
1244             } else {
1245                 // Handle the case where we are allocating out-of-line using an operation.
1246                 RegisterSet extraRegistersToPreserve;
1247                 extraRegistersToPreserve.set(baseGPR);
1248                 extraRegistersToPreserve.set(valueRegs);
1249                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1250                 
1251                 jit.store32(
1252                     CCallHelpers::TrustedImm32(
1253                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1254                     CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
1255                 
1256                 jit.makeSpaceOnStackForCCall();
1257                 
1258                 if (!reallocating) {
1259                     jit.setupArgumentsWithExecState(baseGPR);
1260                     
1261                     CCallHelpers::Call operationCall = jit.call();
1262                     jit.addLinkTask(
1263                         [=] (LinkBuffer& linkBuffer) {
1264                             linkBuffer.link(
1265                                 operationCall,
1266                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1267                         });
1268                 } else {
1269                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1270                     // already had out-of-line property storage).
1271                     jit.setupArgumentsWithExecState(
1272                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1273                     
1274                     CCallHelpers::Call operationCall = jit.call();
1275                     jit.addLinkTask(
1276                         [=] (LinkBuffer& linkBuffer) {
1277                             linkBuffer.link(
1278                                 operationCall,
1279                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1280                         });
1281                 }
1282                 
1283                 jit.reclaimSpaceOnStackForCCall();
1284                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1285                 
1286                 CCallHelpers::Jump noException =
1287                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1288                 
1289                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1290                 state.emitExplicitExceptionHandler();
1291                 
1292                 noException.link(&jit);
1293                 state.restoreLiveRegistersFromStackForCall();
1294             }
1295         }
1296
1297         if (isInlineOffset(m_offset)) {
1298             jit.storeValue(
1299                 valueRegs,
1300                 CCallHelpers::Address(
1301                     baseGPR,
1302                     JSObject::offsetOfInlineStorage() +
1303                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1304         } else {
1305             if (!allocating)
1306                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1307             jit.storeValue(
1308                 valueRegs,
1309                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1310         }
1311         
1312         // If we had allocated using an operation then we would have already executed the store
1313         // barrier and we would have already stored the butterfly into the object.
1314         if (allocatingInline) {
1315             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1316             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1317             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1318             slowPath.append(
1319                 jit.branch32(
1320                     CCallHelpers::AboveOrEqual, scratchGPR2,
1321                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1322             
1323             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1324             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1325             
1326             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1327             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1328             jit.storePtr(
1329                 baseGPR,
1330                 CCallHelpers::BaseIndex(
1331                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1332                     static_cast<int32_t>(-sizeof(void*))));
1333             ownerIsRememberedOrInEden.link(&jit);
1334             
1335             // We set the new butterfly and the structure last. Doing it this way ensures that
1336             // whatever we had done up to this point is forgotten if we choose to branch to slow
1337             // path.
1338             
1339             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1340         }
1341         
1342         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1343         jit.store32(
1344             CCallHelpers::TrustedImm32(structureBits),
1345             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1346
1347         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1348         state.succeed();
1349         
1350         // We will have a slow path if we were allocating without the help of an operation.
1351         if (allocatingInline) {
1352             if (allocator.didReuseRegisters()) {
1353                 slowPath.link(&jit);
1354                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1355                 state.failAndIgnore.append(jit.jump());
1356             } else
1357                 state.failAndIgnore.append(slowPath);
1358         } else
1359             RELEASE_ASSERT(slowPath.empty());
1360         return;
1361     }
1362
1363     case ArrayLength: {
1364         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1365         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1366         state.failAndIgnore.append(
1367             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1368         jit.boxInt32(scratchGPR, valueRegs);
1369         state.succeed();
1370         return;
1371     }
1372
1373     case StringLength: {
1374         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1375         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1376         state.succeed();
1377         return;
1378     }
1379         
1380     case IntrinsicGetter: {
1381         RELEASE_ASSERT(isValidOffset(offset()));
1382
1383         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1384         // are immutable so we just need to watch the property not any value inside it.
1385         Structure* currStructure;
1386         if (m_conditionSet.isEmpty())
1387             currStructure = structure();
1388         else
1389             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1390         currStructure->startWatchingPropertyForReplacements(vm, offset());
1391
1392         emitIntrinsicGetter(state);
1393         return;
1394     }
1395
1396     case DirectArgumentsLength:
1397     case ScopedArgumentsLength:
1398     case MegamorphicLoad:
1399         // These need to be handled by generateWithGuard(), since the guard is part of the
1400         // algorithm. We can be sure that nobody will call generate() directly for these since they
1401         // are not guarded by structure checks.
1402         RELEASE_ASSERT_NOT_REACHED();
1403     }
1404     
1405     RELEASE_ASSERT_NOT_REACHED();
1406 }
1407
1408 PolymorphicAccess::PolymorphicAccess() { }
1409 PolymorphicAccess::~PolymorphicAccess() { }
1410
1411 AccessGenerationResult PolymorphicAccess::addCases(
1412     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1413     Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
1414 {
1415     SuperSamplerScope superSamplerScope(false);
1416     
1417     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1418     // invariants:
1419     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1420     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1421     //   can be removed via the canReplace() rule.
1422     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1423     //   cascade through the cases in reverse order, you will get the most recent cases first.
1424     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1425     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1426     //   add more things after failure.
1427     
1428     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1429     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1430     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1431         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1432
1433         // Add it only if it is not replaced by the subsequent cases in the list.
1434         bool found = false;
1435         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1436             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1437                 found = true;
1438                 break;
1439             }
1440         }
1441
1442         if (found)
1443             continue;
1444         
1445         casesToAdd.append(WTFMove(myCase));
1446     }
1447
1448     if (verbose)
1449         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1450
1451     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1452     // new stub that will be identical to the old one. Returning null should tell the caller to just
1453     // keep doing what they were doing before.
1454     if (casesToAdd.isEmpty())
1455         return AccessGenerationResult::MadeNoChanges;
1456
1457     // Now add things to the new list. Note that at this point, we will still have old cases that
1458     // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1459     for (auto& caseToAdd : casesToAdd) {
1460         commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1461         m_list.append(WTFMove(caseToAdd));
1462     }
1463     
1464     if (verbose)
1465         dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1466
1467     return AccessGenerationResult::Buffered;
1468 }
1469
1470 AccessGenerationResult PolymorphicAccess::addCase(
1471     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1472     std::unique_ptr<AccessCase> newAccess)
1473 {
1474     Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
1475     newAccesses.append(WTFMove(newAccess));
1476     return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1477 }
1478
1479 bool PolymorphicAccess::visitWeak(VM& vm) const
1480 {
1481     for (unsigned i = 0; i < size(); ++i) {
1482         if (!at(i).visitWeak(vm))
1483             return false;
1484     }
1485     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1486         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1487             if (!Heap::isMarked(weakReference.get()))
1488                 return false;
1489         }
1490     }
1491     return true;
1492 }
1493
1494 bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
1495 {
1496     bool result = true;
1497     for (unsigned i = 0; i < size(); ++i)
1498         result &= at(i).propagateTransitions(visitor);
1499     return result;
1500 }
1501
1502 void PolymorphicAccess::dump(PrintStream& out) const
1503 {
1504     out.print(RawPointer(this), ":[");
1505     CommaPrinter comma;
1506     for (auto& entry : m_list)
1507         out.print(comma, *entry);
1508     out.print("]");
1509 }
1510
1511 void PolymorphicAccess::commit(
1512     VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1513     StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1514 {
1515     // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1516     // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1517     // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1518     // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1519     // Those common kinds of JSC object accesses don't hit this case.
1520     
1521     for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1522         Watchpoint* watchpoint =
1523             WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1524                 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1525         
1526         set->add(watchpoint);
1527     }
1528 }
1529
1530 AccessGenerationResult PolymorphicAccess::regenerate(
1531     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1532 {
1533     SuperSamplerScope superSamplerScope(false);
1534     
1535     if (verbose)
1536         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1537     
1538     AccessGenerationState state;
1539
1540     state.access = this;
1541     state.stubInfo = &stubInfo;
1542     state.ident = &ident;
1543     
1544     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1545     state.valueRegs = stubInfo.valueRegs();
1546
1547     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1548     state.allocator = &allocator;
1549     allocator.lock(state.baseGPR);
1550     allocator.lock(state.valueRegs);
1551 #if USE(JSVALUE32_64)
1552     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1553 #endif
1554
1555     state.scratchGPR = allocator.allocateScratchGPR();
1556     
1557     CCallHelpers jit(&vm, codeBlock);
1558     state.jit = &jit;
1559
1560     state.preservedReusedRegisterState =
1561         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1562
1563     // Regenerating is our opportunity to figure out what our list of cases should look like. We
1564     // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1565     // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1566     // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1567     // from the code of the current stub (aka previous).
1568     ListType cases;
1569     unsigned srcIndex = 0;
1570     unsigned dstIndex = 0;
1571     while (srcIndex < m_list.size()) {
1572         std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
1573         
1574         // If the case had been generated, then we have to keep the original in m_list in case we
1575         // fail to regenerate. That case may have data structures that are used by the code that it
1576         // had generated. If the case had not been generated, then we want to remove it from m_list.
1577         bool isGenerated = someCase->state() == AccessCase::Generated;
1578         
1579         [&] () {
1580             if (!someCase->couldStillSucceed())
1581                 return;
1582
1583             // Figure out if this is replaced by any later case.
1584             for (unsigned j = srcIndex; j < m_list.size(); ++j) {
1585                 if (m_list[j]->canReplace(*someCase))
1586                     return;
1587             }
1588             
1589             if (isGenerated)
1590                 cases.append(someCase->clone());
1591             else
1592                 cases.append(WTFMove(someCase));
1593         }();
1594         
1595         if (isGenerated)
1596             m_list[dstIndex++] = WTFMove(someCase);
1597     }
1598     m_list.resize(dstIndex);
1599     
1600     if (verbose)
1601         dataLog("In regenerate: cases: ", listDump(cases), "\n");
1602     
1603     // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1604     // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1605     // size. It would be faster to just allow more repatching with many load cases, and avoid the
1606     // megamorphicLoad optimization, if we had infinite executable memory.
1607     if (cases.size() >= Options::maxAccessVariantListSize()) {
1608         unsigned numSelfLoads = 0;
1609         for (auto& newCase : cases) {
1610             if (newCase->canBeReplacedByMegamorphicLoad())
1611                 numSelfLoads++;
1612         }
1613         
1614         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1615             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1616                 cases.removeAllMatching(
1617                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1618                         return newCase->canBeReplacedByMegamorphicLoad();
1619                     });
1620                 
1621                 cases.append(WTFMove(mega));
1622             }
1623         }
1624     }
1625     
1626     if (verbose)
1627         dataLog("Optimized cases: ", listDump(cases), "\n");
1628     
1629     // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1630     // won't change that set anymore.
1631     
1632     bool allGuardedByStructureCheck = true;
1633     bool hasJSGetterSetterCall = false;
1634     for (auto& newCase : cases) {
1635         commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1636         allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1637         if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1638             hasJSGetterSetterCall = true;
1639     }
1640
1641     if (cases.isEmpty()) {
1642         // This is super unlikely, but we make it legal anyway.
1643         state.failAndRepatch.append(jit.jump());
1644     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1645         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1646         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1647         // one case.
1648         CCallHelpers::JumpList fallThrough;
1649
1650         // Cascade through the list, preferring newer entries.
1651         for (unsigned i = cases.size(); i--;) {
1652             fallThrough.link(&jit);
1653             fallThrough.clear();
1654             cases[i]->generateWithGuard(state, fallThrough);
1655         }
1656         state.failAndRepatch.append(fallThrough);
1657     } else {
1658         jit.load32(
1659             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1660             state.scratchGPR);
1661         
1662         Vector<int64_t> caseValues(cases.size());
1663         for (unsigned i = 0; i < cases.size(); ++i)
1664             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1665         
1666         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1667         while (binarySwitch.advance(jit))
1668             cases[binarySwitch.caseIndex()]->generate(state);
1669         state.failAndRepatch.append(binarySwitch.fallThrough());
1670     }
1671
1672     if (!state.failAndIgnore.empty()) {
1673         state.failAndIgnore.link(&jit);
1674         
1675         // Make sure that the inline cache optimization code knows that we are taking slow path because
1676         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1677         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1678         // that the slow path does not try to patch.
1679 #if CPU(X86) || CPU(X86_64)
1680         jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
1681         jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
1682 #else
1683         jit.load8(&stubInfo.countdown, state.scratchGPR);
1684         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1685         jit.store8(state.scratchGPR, &stubInfo.countdown);
1686 #endif
1687     }
1688
1689     CCallHelpers::JumpList failure;
1690     if (allocator.didReuseRegisters()) {
1691         state.failAndRepatch.link(&jit);
1692         state.restoreScratch();
1693     } else
1694         failure = state.failAndRepatch;
1695     failure.append(jit.jump());
1696
1697     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1698     CallSiteIndex callSiteIndexForExceptionHandling;
1699     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1700         // Emit the exception handler.
1701         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1702         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1703         // their own exception handling logic that doesn't go through genericUnwind.
1704         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1705
1706         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1707         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1708         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1709
1710         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1711         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1712
1713         state.restoreLiveRegistersFromStackForCallWithThrownException();
1714         state.restoreScratch();
1715         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1716
1717         HandlerInfo oldHandler = state.originalExceptionHandler();
1718         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1719         jit.addLinkTask(
1720             [=] (LinkBuffer& linkBuffer) {
1721                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1722
1723                 HandlerInfo handlerToRegister = oldHandler;
1724                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1725                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1726                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1727                 codeBlock->appendExceptionHandler(handlerToRegister);
1728             });
1729
1730         // We set these to indicate to the stub to remove itself from the CodeBlock's
1731         // exception handler table when it is deallocated.
1732         codeBlockThatOwnsExceptionHandlers = codeBlock;
1733         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1734         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1735     }
1736
1737     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1738     if (linkBuffer.didFailToAllocate()) {
1739         if (verbose)
1740             dataLog("Did fail to allocate.\n");
1741         return AccessGenerationResult::GaveUp;
1742     }
1743
1744     CodeLocationLabel successLabel = stubInfo.doneLocation();
1745         
1746     linkBuffer.link(state.success, successLabel);
1747
1748     linkBuffer.link(failure, stubInfo.slowPathStartLocation());
1749     
1750     if (verbose)
1751         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1752
1753     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1754         codeBlock, linkBuffer,
1755         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1756
1757     bool doesCalls = false;
1758     Vector<JSCell*> cellsToMark;
1759     for (auto& entry : cases)
1760         doesCalls |= entry->doesCalls(&cellsToMark);
1761     
1762     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1763     m_watchpoints = WTFMove(state.watchpoints);
1764     if (!state.weakReferences.isEmpty())
1765         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1766     if (verbose)
1767         dataLog("Returning: ", code.code(), "\n");
1768     
1769     m_list = WTFMove(cases);
1770     
1771     AccessGenerationResult::Kind resultKind;
1772     if (m_list.size() >= Options::maxAccessVariantListSize())
1773         resultKind = AccessGenerationResult::GeneratedFinalCode;
1774     else
1775         resultKind = AccessGenerationResult::GeneratedNewCode;
1776     
1777     return AccessGenerationResult(resultKind, code.code());
1778 }
1779
1780 void PolymorphicAccess::aboutToDie()
1781 {
1782     if (m_stubRoutine)
1783         m_stubRoutine->aboutToDie();
1784 }
1785
1786 } // namespace JSC
1787
1788 namespace WTF {
1789
1790 using namespace JSC;
1791
1792 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1793 {
1794     switch (kind) {
1795     case AccessGenerationResult::MadeNoChanges:
1796         out.print("MadeNoChanges");
1797         return;
1798     case AccessGenerationResult::GaveUp:
1799         out.print("GaveUp");
1800         return;
1801     case AccessGenerationResult::Buffered:
1802         out.print("Buffered");
1803         return;
1804     case AccessGenerationResult::GeneratedNewCode:
1805         out.print("GeneratedNewCode");
1806         return;
1807     case AccessGenerationResult::GeneratedFinalCode:
1808         out.print("GeneratedFinalCode");
1809         return;
1810     }
1811     
1812     RELEASE_ASSERT_NOT_REACHED();
1813 }
1814
1815 void printInternal(PrintStream& out, AccessCase::AccessType type)
1816 {
1817     switch (type) {
1818     case AccessCase::Load:
1819         out.print("Load");
1820         return;
1821     case AccessCase::MegamorphicLoad:
1822         out.print("MegamorphicLoad");
1823         return;
1824     case AccessCase::Transition:
1825         out.print("Transition");
1826         return;
1827     case AccessCase::Replace:
1828         out.print("Replace");
1829         return;
1830     case AccessCase::Miss:
1831         out.print("Miss");
1832         return;
1833     case AccessCase::GetGetter:
1834         out.print("GetGetter");
1835         return;
1836     case AccessCase::Getter:
1837         out.print("Getter");
1838         return;
1839     case AccessCase::Setter:
1840         out.print("Setter");
1841         return;
1842     case AccessCase::CustomValueGetter:
1843         out.print("CustomValueGetter");
1844         return;
1845     case AccessCase::CustomAccessorGetter:
1846         out.print("CustomAccessorGetter");
1847         return;
1848     case AccessCase::CustomValueSetter:
1849         out.print("CustomValueSetter");
1850         return;
1851     case AccessCase::CustomAccessorSetter:
1852         out.print("CustomAccessorSetter");
1853         return;
1854     case AccessCase::IntrinsicGetter:
1855         out.print("IntrinsicGetter");
1856         return;
1857     case AccessCase::InHit:
1858         out.print("InHit");
1859         return;
1860     case AccessCase::InMiss:
1861         out.print("InMiss");
1862         return;
1863     case AccessCase::ArrayLength:
1864         out.print("ArrayLength");
1865         return;
1866     case AccessCase::StringLength:
1867         out.print("StringLength");
1868         return;
1869     case AccessCase::DirectArgumentsLength:
1870         out.print("DirectArgumentsLength");
1871         return;
1872     case AccessCase::ScopedArgumentsLength:
1873         out.print("ScopedArgumentsLength");
1874         return;
1875     }
1876
1877     RELEASE_ASSERT_NOT_REACHED();
1878 }
1879
1880 void printInternal(PrintStream& out, AccessCase::State state)
1881 {
1882     switch (state) {
1883     case AccessCase::Primordial:
1884         out.print("Primordial");
1885         return;
1886     case AccessCase::Committed:
1887         out.print("Committed");
1888         return;
1889     case AccessCase::Generated:
1890         out.print("Generated");
1891         return;
1892     }
1893
1894     RELEASE_ASSERT_NOT_REACHED();
1895 }
1896
1897 } // namespace WTF
1898
1899 #endif // ENABLE(JIT)
1900
1901