91e224085207dae0ca10a4861ab433e4f0357861
[WebKit.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
52 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
53 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
54 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
55 #else
56 #define EABI_32BIT_DUMMY_ARG
57 #endif
58
59 void AccessGenerationResult::dump(PrintStream& out) const
60 {
61     out.print(m_kind);
62     if (m_code)
63         out.print(":", m_code);
64 }
65
66 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
67 {
68     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
69         watchpoints, jit->codeBlock(), stubInfo, condition);
70 }
71
72 void AccessGenerationState::restoreScratch()
73 {
74     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
75 }
76
77 void AccessGenerationState::succeed()
78 {
79     restoreScratch();
80     success.append(jit->jump());
81 }
82
83 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
84 {
85     if (!m_calculatedRegistersForCallAndExceptionHandling) {
86         m_calculatedRegistersForCallAndExceptionHandling = true;
87
88         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
89         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
90         if (m_needsToRestoreRegistersIfException)
91             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
92
93         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
94         m_liveRegistersForCall.merge(extra);
95         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
96         m_liveRegistersForCall.merge(extra);
97     }
98 }
99
100 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
101 {
102     calculateLiveRegistersForCallAndExceptionHandling(extra);
103     
104     unsigned extraStackPadding = 0;
105     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
106     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
107         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
108     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
109 }
110
111 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
112 {
113     RegisterSet dontRestore;
114     if (isGetter) {
115         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
116         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
117         dontRestore.set(valueRegs);
118     }
119     restoreLiveRegistersFromStackForCall(dontRestore);
120 }
121
122 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
123 {
124     // Even if we're a getter, we don't want to ignore the result value like we normally do
125     // because the getter threw, and therefore, didn't return a value that means anything.
126     // Instead, we want to restore that register to what it was upon entering the getter
127     // inline cache. The subtlety here is if the base and the result are the same register,
128     // and the getter threw, we want OSR exit to see the original base value, not the result
129     // of the getter call.
130     RegisterSet dontRestore = liveRegistersForCall();
131     // As an optimization here, we only need to restore what is live for exception handling.
132     // We can construct the dontRestore set to accomplish this goal by having it contain only
133     // what is live for call but not live for exception handling. By ignoring things that are
134     // only live at the call but not the exception handler, we will only restore things live
135     // at the exception handler.
136     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
137     restoreLiveRegistersFromStackForCall(dontRestore);
138 }
139
140 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
141 {
142     unsigned extraStackPadding = 0;
143     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
144 }
145
146 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
147 {
148     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
149
150     if (!m_calculatedCallSiteIndex) {
151         m_calculatedCallSiteIndex = true;
152
153         if (m_needsToRestoreRegistersIfException)
154             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
155         else
156             m_callSiteIndex = originalCallSiteIndex();
157     }
158
159     return m_callSiteIndex;
160 }
161
162 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
163 {
164     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
165     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
166     RELEASE_ASSERT(exceptionHandler);
167     return *exceptionHandler;
168 }
169
170 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
171
172 void AccessGenerationState::emitExplicitExceptionHandler()
173 {
174     restoreScratch();
175     jit->copyCalleeSavesToVMCalleeSavesBuffer();
176     if (needsToRestoreRegistersIfException()) {
177         // To the JIT that produces the original exception handling
178         // call site, they will expect the OSR exit to be arrived
179         // at from genericUnwind. Therefore we must model what genericUnwind
180         // does here. I.e, set callFrameForCatch and copy callee saves.
181
182         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
183         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
184
185         // We don't need to insert a new exception handler in the table
186         // because we're doing a manual exception check here. i.e, we'll
187         // never arrive here from genericUnwind().
188         HandlerInfo originalHandler = originalExceptionHandler();
189         jit->addLinkTask(
190             [=] (LinkBuffer& linkBuffer) {
191                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
192             });
193     } else {
194         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
195         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
196         jit->addLinkTask(
197             [=] (LinkBuffer& linkBuffer) {
198                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
199             });
200         jit->jumpToExceptionHandler();
201     }
202 }
203
204 AccessCase::AccessCase()
205 {
206 }
207
208 std::unique_ptr<AccessCase> AccessCase::tryGet(
209     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
210     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
211 {
212     std::unique_ptr<AccessCase> result(new AccessCase());
213
214     result->m_type = type;
215     result->m_offset = offset;
216     result->m_structure.set(vm, owner, structure);
217     result->m_conditionSet = conditionSet;
218
219     if (viaProxy || additionalSet) {
220         result->m_rareData = std::make_unique<RareData>();
221         result->m_rareData->viaProxy = viaProxy;
222         result->m_rareData->additionalSet = additionalSet;
223     }
224
225     return result;
226 }
227
228 std::unique_ptr<AccessCase> AccessCase::get(
229     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
230     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
231     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
232 {
233     std::unique_ptr<AccessCase> result(new AccessCase());
234
235     result->m_type = type;
236     result->m_offset = offset;
237     result->m_structure.set(vm, owner, structure);
238     result->m_conditionSet = conditionSet;
239
240     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
241         result->m_rareData = std::make_unique<RareData>();
242         result->m_rareData->viaProxy = viaProxy;
243         result->m_rareData->additionalSet = additionalSet;
244         result->m_rareData->customAccessor.getter = customGetter;
245         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
246     }
247
248     return result;
249 }
250
251 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
252 {
253     UNUSED_PARAM(vm);
254     UNUSED_PARAM(owner);
255     
256     if (GPRInfo::numberOfRegisters < 9)
257         return nullptr;
258     
259     std::unique_ptr<AccessCase> result(new AccessCase());
260     
261     result->m_type = MegamorphicLoad;
262     
263     return result;
264 }
265
266 std::unique_ptr<AccessCase> AccessCase::replace(
267     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
268 {
269     std::unique_ptr<AccessCase> result(new AccessCase());
270
271     result->m_type = Replace;
272     result->m_offset = offset;
273     result->m_structure.set(vm, owner, structure);
274
275     return result;
276 }
277
278 std::unique_ptr<AccessCase> AccessCase::transition(
279     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
280     const ObjectPropertyConditionSet& conditionSet)
281 {
282     RELEASE_ASSERT(oldStructure == newStructure->previousID());
283
284     // Skip optimizing the case where we need a realloc, if we don't have
285     // enough registers to make it happen.
286     if (GPRInfo::numberOfRegisters < 6
287         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
288         && oldStructure->outOfLineCapacity()) {
289         return nullptr;
290     }
291
292     std::unique_ptr<AccessCase> result(new AccessCase());
293
294     result->m_type = Transition;
295     result->m_offset = offset;
296     result->m_structure.set(vm, owner, newStructure);
297     result->m_conditionSet = conditionSet;
298
299     return result;
300 }
301
302 std::unique_ptr<AccessCase> AccessCase::setter(
303     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
304     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
305     JSObject* customSlotBase)
306 {
307     std::unique_ptr<AccessCase> result(new AccessCase());
308
309     result->m_type = type;
310     result->m_offset = offset;
311     result->m_structure.set(vm, owner, structure);
312     result->m_conditionSet = conditionSet;
313     result->m_rareData = std::make_unique<RareData>();
314     result->m_rareData->customAccessor.setter = customSetter;
315     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
316
317     return result;
318 }
319
320 std::unique_ptr<AccessCase> AccessCase::in(
321     VM& vm, JSCell* owner, AccessType type, Structure* structure,
322     const ObjectPropertyConditionSet& conditionSet)
323 {
324     std::unique_ptr<AccessCase> result(new AccessCase());
325
326     result->m_type = type;
327     result->m_structure.set(vm, owner, structure);
328     result->m_conditionSet = conditionSet;
329
330     return result;
331 }
332
333 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
334 {
335     std::unique_ptr<AccessCase> result(new AccessCase());
336
337     result->m_type = type;
338
339     return result;
340 }
341
342 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
343     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
344     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
345 {
346     std::unique_ptr<AccessCase> result(new AccessCase());
347
348     result->m_type = IntrinsicGetter;
349     result->m_structure.set(vm, owner, structure);
350     result->m_conditionSet = conditionSet;
351     result->m_offset = offset;
352
353     result->m_rareData = std::make_unique<RareData>();
354     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
355
356     return result;
357 }
358
359 AccessCase::~AccessCase()
360 {
361 }
362
363 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
364     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
365 {
366     switch (stubInfo.cacheType) {
367     case CacheType::GetByIdSelf:
368         return get(
369             vm, owner, Load, stubInfo.u.byIdSelf.offset,
370             stubInfo.u.byIdSelf.baseObjectStructure.get());
371
372     case CacheType::PutByIdReplace:
373         return replace(
374             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
375
376     default:
377         return nullptr;
378     }
379 }
380
381 std::unique_ptr<AccessCase> AccessCase::clone() const
382 {
383     std::unique_ptr<AccessCase> result(new AccessCase());
384     result->m_type = m_type;
385     result->m_offset = m_offset;
386     result->m_structure = m_structure;
387     result->m_conditionSet = m_conditionSet;
388     if (RareData* rareData = m_rareData.get()) {
389         result->m_rareData = std::make_unique<RareData>();
390         result->m_rareData->viaProxy = rareData->viaProxy;
391         result->m_rareData->additionalSet = rareData->additionalSet;
392         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
393         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
394         result->m_rareData->customSlotBase = rareData->customSlotBase;
395         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
396     }
397     return result;
398 }
399
400 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
401 {
402     // It's fine to commit something that is already committed. That arises when we switch to using
403     // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
404     // because most AccessCases have no extra watchpoints anyway.
405     RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
406     
407     Vector<WatchpointSet*, 2> result;
408     
409     if ((structure() && structure()->needImpurePropertyWatchpoint())
410         || m_conditionSet.needImpurePropertyWatchpoint())
411         result.append(vm.ensureWatchpointSetForImpureProperty(ident));
412
413     if (additionalSet())
414         result.append(additionalSet());
415     
416     m_state = Committed;
417     
418     return result;
419 }
420
421 bool AccessCase::guardedByStructureCheck() const
422 {
423     if (viaProxy())
424         return false;
425
426     switch (m_type) {
427     case MegamorphicLoad:
428     case ArrayLength:
429     case StringLength:
430     case DirectArgumentsLength:
431     case ScopedArgumentsLength:
432         return false;
433     default:
434         return true;
435     }
436 }
437
438 JSObject* AccessCase::alternateBase() const
439 {
440     if (customSlotBase())
441         return customSlotBase();
442     return conditionSet().slotBaseCondition().object();
443 }
444
445 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
446 {
447     switch (type()) {
448     case Getter:
449     case Setter:
450     case CustomValueGetter:
451     case CustomAccessorGetter:
452     case CustomValueSetter:
453     case CustomAccessorSetter:
454         return true;
455     case Transition:
456         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
457             && structure()->couldHaveIndexingHeader()) {
458             if (cellsToMark)
459                 cellsToMark->append(newStructure());
460             return true;
461         }
462         return false;
463     default:
464         return false;
465     }
466 }
467
468 bool AccessCase::couldStillSucceed() const
469 {
470     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
471 }
472
473 bool AccessCase::canBeReplacedByMegamorphicLoad() const
474 {
475     if (type() == MegamorphicLoad)
476         return true;
477     
478     return type() == Load
479         && !viaProxy()
480         && conditionSet().isEmpty()
481         && !additionalSet()
482         && !customSlotBase();
483 }
484
485 bool AccessCase::canReplace(const AccessCase& other) const
486 {
487     // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
488     // It's fine for this to return false if it's in doubt.
489
490     switch (type()) {
491     case MegamorphicLoad:
492         return other.canBeReplacedByMegamorphicLoad();
493     case ArrayLength:
494     case StringLength:
495     case DirectArgumentsLength:
496     case ScopedArgumentsLength:
497         return other.type() == type();
498     default:
499         if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
500             return false;
501         
502         return structure() == other.structure();
503     }
504 }
505
506 void AccessCase::dump(PrintStream& out) const
507 {
508     out.print(m_type, ":(");
509
510     CommaPrinter comma;
511     
512     out.print(comma, m_state);
513
514     if (m_type == Transition)
515         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
516     else if (m_structure)
517         out.print(comma, "structure = ", pointerDump(m_structure.get()));
518
519     if (isValidOffset(m_offset))
520         out.print(comma, "offset = ", m_offset);
521     if (!m_conditionSet.isEmpty())
522         out.print(comma, "conditions = ", m_conditionSet);
523
524     if (RareData* rareData = m_rareData.get()) {
525         if (rareData->viaProxy)
526             out.print(comma, "viaProxy = ", rareData->viaProxy);
527         if (rareData->additionalSet)
528             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
529         if (rareData->callLinkInfo)
530             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
531         if (rareData->customAccessor.opaque)
532             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
533         if (rareData->customSlotBase)
534             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
535     }
536
537     out.print(")");
538 }
539
540 bool AccessCase::visitWeak(VM& vm) const
541 {
542     if (m_structure && !Heap::isMarked(m_structure.get()))
543         return false;
544     if (!m_conditionSet.areStillLive())
545         return false;
546     if (m_rareData) {
547         if (m_rareData->callLinkInfo)
548             m_rareData->callLinkInfo->visitWeak(vm);
549         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
550             return false;
551         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
552             return false;
553     }
554     return true;
555 }
556
557 void AccessCase::generateWithGuard(
558     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
559 {
560     SuperSamplerScope superSamplerScope(false);
561
562     RELEASE_ASSERT(m_state == Committed);
563     m_state = Generated;
564     
565     CCallHelpers& jit = *state.jit;
566     VM& vm = *jit.vm();
567     const Identifier& ident = *state.ident;
568     StructureStubInfo& stubInfo = *state.stubInfo;
569     JSValueRegs valueRegs = state.valueRegs;
570     GPRReg baseGPR = state.baseGPR;
571     GPRReg scratchGPR = state.scratchGPR;
572     
573     UNUSED_PARAM(vm);
574
575     switch (m_type) {
576     case ArrayLength: {
577         ASSERT(!viaProxy());
578         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
579         fallThrough.append(
580             jit.branchTest32(
581                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
582         fallThrough.append(
583             jit.branchTest32(
584                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
585         break;
586     }
587
588     case StringLength: {
589         ASSERT(!viaProxy());
590         fallThrough.append(
591             jit.branch8(
592                 CCallHelpers::NotEqual,
593                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
594                 CCallHelpers::TrustedImm32(StringType)));
595         break;
596     }
597         
598     case DirectArgumentsLength: {
599         ASSERT(!viaProxy());
600         fallThrough.append(
601             jit.branch8(
602                 CCallHelpers::NotEqual,
603                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
604                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
605
606         fallThrough.append(
607             jit.branchTestPtr(
608                 CCallHelpers::NonZero,
609                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
610         jit.load32(
611             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
612             valueRegs.payloadGPR());
613         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
614         state.succeed();
615         return;
616     }
617         
618     case ScopedArgumentsLength: {
619         ASSERT(!viaProxy());
620         fallThrough.append(
621             jit.branch8(
622                 CCallHelpers::NotEqual,
623                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
624                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
625
626         fallThrough.append(
627             jit.branchTest8(
628                 CCallHelpers::NonZero,
629                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
630         jit.load32(
631             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
632             valueRegs.payloadGPR());
633         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
634         state.succeed();
635         return;
636     }
637         
638     case MegamorphicLoad: {
639         UniquedStringImpl* key = ident.impl();
640         unsigned hash = IdentifierRepHash::hash(key);
641         
642         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
643         allocator.lock(baseGPR);
644 #if USE(JSVALUE32_64)
645         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
646 #endif
647         allocator.lock(valueRegs);
648         allocator.lock(scratchGPR);
649         
650         GPRReg intermediateGPR = scratchGPR;
651         GPRReg maskGPR = allocator.allocateScratchGPR();
652         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
653         GPRReg indexGPR = allocator.allocateScratchGPR();
654         GPRReg offsetGPR = allocator.allocateScratchGPR();
655         
656         if (verbose) {
657             dataLog("baseGPR = ", baseGPR, "\n");
658             dataLog("valueRegs = ", valueRegs, "\n");
659             dataLog("scratchGPR = ", scratchGPR, "\n");
660             dataLog("intermediateGPR = ", intermediateGPR, "\n");
661             dataLog("maskGPR = ", maskGPR, "\n");
662             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
663             dataLog("indexGPR = ", indexGPR, "\n");
664             dataLog("offsetGPR = ", offsetGPR, "\n");
665         }
666
667         ScratchRegisterAllocator::PreservedState preservedState =
668             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
669
670         CCallHelpers::JumpList myFailAndIgnore;
671         CCallHelpers::JumpList myFallThrough;
672         
673         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
674         jit.loadPtr(
675             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
676             intermediateGPR);
677         
678         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
679         
680         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
681         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
682         jit.load32(
683             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
684             intermediateGPR);
685
686         jit.move(maskGPR, maskedHashGPR);
687         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
688         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
689         jit.addPtr(indexGPR, intermediateGPR);
690         
691         CCallHelpers::Label loop = jit.label();
692         
693         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
694         
695         myFallThrough.append(
696             jit.branch32(
697                 CCallHelpers::Equal,
698                 offsetGPR,
699                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
700         
701         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
702         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
703         jit.addPtr(intermediateGPR, offsetGPR);
704         
705         CCallHelpers::Jump collision =  jit.branchPtr(
706             CCallHelpers::NotEqual,
707             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
708             CCallHelpers::TrustedImmPtr(key));
709         
710         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
711         // Check them and then attempt the load.
712         
713         myFallThrough.append(
714             jit.branchTest32(
715                 CCallHelpers::NonZero,
716                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
717                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
718         
719         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
720         
721         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
722         
723         allocator.restoreReusedRegistersByPopping(jit, preservedState);
724         state.succeed();
725         
726         collision.link(&jit);
727
728         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
729         
730         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
731         // around isn't super common so we could, for example, recompute the mask from the difference between
732         // the table and index. But before we do that we should probably make it easier to multiply and
733         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
734         // to have a power-of-2 size.
735         jit.and32(maskGPR, maskedHashGPR);
736         jit.jump().linkTo(loop, &jit);
737         
738         if (allocator.didReuseRegisters()) {
739             myFailAndIgnore.link(&jit);
740             allocator.restoreReusedRegistersByPopping(jit, preservedState);
741             state.failAndIgnore.append(jit.jump());
742             
743             myFallThrough.link(&jit);
744             allocator.restoreReusedRegistersByPopping(jit, preservedState);
745             fallThrough.append(jit.jump());
746         } else {
747             state.failAndIgnore.append(myFailAndIgnore);
748             fallThrough.append(myFallThrough);
749         }
750         return;
751     }
752
753     default: {
754         if (viaProxy()) {
755             fallThrough.append(
756                 jit.branch8(
757                     CCallHelpers::NotEqual,
758                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
759                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
760
761             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
762
763             fallThrough.append(
764                 jit.branchStructure(
765                     CCallHelpers::NotEqual,
766                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
767                     structure()));
768         } else {
769             fallThrough.append(
770                 jit.branchStructure(
771                     CCallHelpers::NotEqual,
772                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
773                     structure()));
774         }
775         break;
776     } };
777
778     generateImpl(state);
779 }
780
781 void AccessCase::generate(AccessGenerationState& state)
782 {
783     RELEASE_ASSERT(m_state == Committed);
784     m_state = Generated;
785     
786     generateImpl(state);
787 }
788
789 void AccessCase::generateImpl(AccessGenerationState& state)
790 {
791     SuperSamplerScope superSamplerScope(false);
792     if (verbose)
793         dataLog("Generating code for: ", *this, "\n");
794     
795     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
796     
797     CCallHelpers& jit = *state.jit;
798     VM& vm = *jit.vm();
799     CodeBlock* codeBlock = jit.codeBlock();
800     StructureStubInfo& stubInfo = *state.stubInfo;
801     const Identifier& ident = *state.ident;
802     JSValueRegs valueRegs = state.valueRegs;
803     GPRReg baseGPR = state.baseGPR;
804     GPRReg scratchGPR = state.scratchGPR;
805
806     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
807
808     for (const ObjectPropertyCondition& condition : m_conditionSet) {
809         Structure* structure = condition.object()->structure();
810
811         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
812             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
813             continue;
814         }
815
816         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
817             // The reason why this cannot happen is that we require that PolymorphicAccess calls
818             // AccessCase::generate() only after it has verified that
819             // AccessCase::couldStillSucceed() returned true.
820             
821             dataLog("This condition is no longer met: ", condition, "\n");
822             RELEASE_ASSERT_NOT_REACHED();
823         }
824
825         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
826         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
827         
828         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
829         state.failAndRepatch.append(
830             jit.branchStructure(
831                 CCallHelpers::NotEqual,
832                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
833                 structure));
834     }
835
836     switch (m_type) {
837     case InHit:
838     case InMiss:
839         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
840         state.succeed();
841         return;
842
843     case Miss:
844         jit.moveTrustedValue(jsUndefined(), valueRegs);
845         state.succeed();
846         return;
847
848     case Load:
849     case GetGetter:
850     case Getter:
851     case Setter:
852     case CustomValueGetter:
853     case CustomAccessorGetter:
854     case CustomValueSetter:
855     case CustomAccessorSetter: {
856         if (isValidOffset(m_offset)) {
857             Structure* currStructure;
858             if (m_conditionSet.isEmpty())
859                 currStructure = structure();
860             else
861                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
862             currStructure->startWatchingPropertyForReplacements(vm, offset());
863         }
864
865         GPRReg baseForGetGPR;
866         if (viaProxy()) {
867             baseForGetGPR = valueRegs.payloadGPR();
868             jit.loadPtr(
869                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
870                 baseForGetGPR);
871         } else
872             baseForGetGPR = baseGPR;
873
874         GPRReg baseForAccessGPR;
875         if (!m_conditionSet.isEmpty()) {
876             jit.move(
877                 CCallHelpers::TrustedImmPtr(alternateBase()),
878                 scratchGPR);
879             baseForAccessGPR = scratchGPR;
880         } else
881             baseForAccessGPR = baseForGetGPR;
882
883         GPRReg loadedValueGPR = InvalidGPRReg;
884         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
885             if (m_type == Load || m_type == GetGetter)
886                 loadedValueGPR = valueRegs.payloadGPR();
887             else
888                 loadedValueGPR = scratchGPR;
889
890             GPRReg storageGPR;
891             if (isInlineOffset(m_offset))
892                 storageGPR = baseForAccessGPR;
893             else {
894                 jit.loadPtr(
895                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
896                     loadedValueGPR);
897                 storageGPR = loadedValueGPR;
898             }
899
900 #if USE(JSVALUE64)
901             jit.load64(
902                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
903 #else
904             if (m_type == Load || m_type == GetGetter) {
905                 jit.load32(
906                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
907                     valueRegs.tagGPR());
908             }
909             jit.load32(
910                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
911                 loadedValueGPR);
912 #endif
913         }
914
915         if (m_type == Load || m_type == GetGetter) {
916             state.succeed();
917             return;
918         }
919
920         // Stuff for custom getters/setters.
921         CCallHelpers::Call operationCall;
922
923         // Stuff for JS getters/setters.
924         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
925         CCallHelpers::Call fastPathCall;
926         CCallHelpers::Call slowPathCall;
927
928         CCallHelpers::Jump success;
929         CCallHelpers::Jump fail;
930
931         // This also does the necessary calculations of whether or not we're an
932         // exception handling call site.
933         state.preserveLiveRegistersToStackForCall();
934
935         jit.store32(
936             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
937             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
938
939         if (m_type == Getter || m_type == Setter) {
940             // Create a JS call using a JS call inline cache. Assume that:
941             //
942             // - SP is aligned and represents the extent of the calling compiler's stack usage.
943             //
944             // - FP is set correctly (i.e. it points to the caller's call frame header).
945             //
946             // - SP - FP is an aligned difference.
947             //
948             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
949             //   code.
950             //
951             // Therefore, we temporarily grow the stack for the purpose of the call and then
952             // shrink it after.
953
954             RELEASE_ASSERT(!m_rareData->callLinkInfo);
955             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
956             
957             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
958             // stub, which then jumped back to the main code, then we'd have a reachability
959             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
960             // call stub stayed alive, and it would ensure that the main code stayed alive, but
961             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
962             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
963             // reference to the getter stub.
964             // https://bugs.webkit.org/show_bug.cgi?id=148914
965             m_rareData->callLinkInfo->disallowStubs();
966             
967             m_rareData->callLinkInfo->setUpCall(
968                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
969
970             CCallHelpers::JumpList done;
971
972             // There is a "this" argument.
973             unsigned numberOfParameters = 1;
974             // ... and a value argument if we're calling a setter.
975             if (m_type == Setter)
976                 numberOfParameters++;
977
978             // Get the accessor; if there ain't one then the result is jsUndefined().
979             if (m_type == Setter) {
980                 jit.loadPtr(
981                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
982                     loadedValueGPR);
983             } else {
984                 jit.loadPtr(
985                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
986                     loadedValueGPR);
987             }
988
989             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
990                 CCallHelpers::Zero, loadedValueGPR);
991
992             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
993
994             unsigned numberOfBytesForCall =
995                 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
996
997             unsigned alignedNumberOfBytesForCall =
998                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
999
1000             jit.subPtr(
1001                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
1002                 CCallHelpers::stackPointerRegister);
1003
1004             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
1005                 CCallHelpers::stackPointerRegister,
1006                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
1007
1008             jit.store32(
1009                 CCallHelpers::TrustedImm32(numberOfParameters),
1010                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
1011
1012             jit.storeCell(
1013                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
1014
1015             jit.storeCell(
1016                 baseForGetGPR,
1017                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1018
1019             if (m_type == Setter) {
1020                 jit.storeValue(
1021                     valueRegs,
1022                     calleeFrame.withOffset(
1023                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
1024             }
1025
1026             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1027                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1028                 CCallHelpers::TrustedImmPtr(0));
1029
1030             fastPathCall = jit.nearCall();
1031             if (m_type == Getter)
1032                 jit.setupResults(valueRegs);
1033             done.append(jit.jump());
1034
1035             slowCase.link(&jit);
1036             jit.move(loadedValueGPR, GPRInfo::regT0);
1037 #if USE(JSVALUE32_64)
1038             // We *always* know that the getter/setter, if non-null, is a cell.
1039             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1040 #endif
1041             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1042             slowPathCall = jit.nearCall();
1043             if (m_type == Getter)
1044                 jit.setupResults(valueRegs);
1045             done.append(jit.jump());
1046
1047             returnUndefined.link(&jit);
1048             if (m_type == Getter)
1049                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1050
1051             done.link(&jit);
1052
1053             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1054                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1055             state.restoreLiveRegistersFromStackForCall(isGetter());
1056
1057             jit.addLinkTask(
1058                 [=, &vm] (LinkBuffer& linkBuffer) {
1059                     m_rareData->callLinkInfo->setCallLocations(
1060                         linkBuffer.locationOfNearCall(slowPathCall),
1061                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
1062                         linkBuffer.locationOfNearCall(fastPathCall));
1063
1064                     linkBuffer.link(
1065                         slowPathCall,
1066                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1067                 });
1068         } else {
1069             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1070             // hard to track if someone did spillage or not, so we just assume that we always need
1071             // to make some space here.
1072             jit.makeSpaceOnStackForCCall();
1073
1074             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1075             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1076             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1077             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1078 #if USE(JSVALUE64)
1079             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1080                 jit.setupArgumentsWithExecState(
1081                     baseForCustomValue,
1082                     CCallHelpers::TrustedImmPtr(ident.impl()));
1083             } else
1084                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1085 #else
1086             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1087                 jit.setupArgumentsWithExecState(
1088                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1089                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1090                     CCallHelpers::TrustedImmPtr(ident.impl()));
1091             } else {
1092                 jit.setupArgumentsWithExecState(
1093                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1094                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1095                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1096             }
1097 #endif
1098             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1099
1100             operationCall = jit.call();
1101             jit.addLinkTask(
1102                 [=] (LinkBuffer& linkBuffer) {
1103                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1104                 });
1105
1106             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1107                 jit.setupResults(valueRegs);
1108             jit.reclaimSpaceOnStackForCCall();
1109
1110             CCallHelpers::Jump noException =
1111                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1112
1113             state.restoreLiveRegistersFromStackForCallWithThrownException();
1114             state.emitExplicitExceptionHandler();
1115         
1116             noException.link(&jit);
1117             state.restoreLiveRegistersFromStackForCall(isGetter());
1118         }
1119         state.succeed();
1120         return;
1121     }
1122
1123     case Replace: {
1124         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1125             if (verbose)
1126                 dataLog("Have type: ", type->descriptor(), "\n");
1127             state.failAndRepatch.append(
1128                 jit.branchIfNotType(
1129                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1130         } else if (verbose)
1131             dataLog("Don't have type.\n");
1132         
1133         if (isInlineOffset(m_offset)) {
1134             jit.storeValue(
1135                 valueRegs,
1136                 CCallHelpers::Address(
1137                     baseGPR,
1138                     JSObject::offsetOfInlineStorage() +
1139                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1140         } else {
1141             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1142             jit.storeValue(
1143                 valueRegs,
1144                 CCallHelpers::Address(
1145                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1146         }
1147         state.succeed();
1148         return;
1149     }
1150
1151     case Transition: {
1152         // AccessCase::transition() should have returned null if this wasn't true.
1153         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1154
1155         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1156             if (verbose)
1157                 dataLog("Have type: ", type->descriptor(), "\n");
1158             state.failAndRepatch.append(
1159                 jit.branchIfNotType(
1160                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1161         } else if (verbose)
1162             dataLog("Don't have type.\n");
1163         
1164         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1165         // exactly when this would make calls.
1166         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1167         bool reallocating = allocating && structure()->outOfLineCapacity();
1168         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1169
1170         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1171         allocator.lock(baseGPR);
1172 #if USE(JSVALUE32_64)
1173         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1174 #endif
1175         allocator.lock(valueRegs);
1176         allocator.lock(scratchGPR);
1177
1178         GPRReg scratchGPR2 = InvalidGPRReg;
1179         GPRReg scratchGPR3 = InvalidGPRReg;
1180         if (allocatingInline) {
1181             scratchGPR2 = allocator.allocateScratchGPR();
1182             scratchGPR3 = allocator.allocateScratchGPR();
1183         }
1184
1185         ScratchRegisterAllocator::PreservedState preservedState =
1186             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1187         
1188         CCallHelpers::JumpList slowPath;
1189
1190         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1191
1192         if (allocating) {
1193             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1194             
1195             if (allocatingInline) {
1196                 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1197
1198                 if (!reallocating) {
1199                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1200                     slowPath.append(
1201                         jit.branchSubPtr(
1202                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1203                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1204                     jit.negPtr(scratchGPR);
1205                     jit.addPtr(
1206                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1207                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1208                 } else {
1209                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1210                     // already had out-of-line property storage).
1211                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1212                     ASSERT(newSize > oldSize);
1213             
1214                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1215                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1216                     slowPath.append(
1217                         jit.branchSubPtr(
1218                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1219                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1220                     jit.negPtr(scratchGPR);
1221                     jit.addPtr(
1222                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1223                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1224                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1225                     // scratchGPR2 = available
1226                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1227                         jit.loadPtr(
1228                             CCallHelpers::Address(
1229                                 scratchGPR3,
1230                                 -static_cast<ptrdiff_t>(
1231                                     offset + sizeof(JSValue) + sizeof(void*))),
1232                             scratchGPR2);
1233                         jit.storePtr(
1234                             scratchGPR2,
1235                             CCallHelpers::Address(
1236                                 scratchGPR,
1237                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1238                     }
1239                 }
1240             } else {
1241                 // Handle the case where we are allocating out-of-line using an operation.
1242                 RegisterSet extraRegistersToPreserve;
1243                 extraRegistersToPreserve.set(baseGPR);
1244                 extraRegistersToPreserve.set(valueRegs);
1245                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1246                 
1247                 jit.store32(
1248                     CCallHelpers::TrustedImm32(
1249                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1250                     CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1251                 
1252                 jit.makeSpaceOnStackForCCall();
1253                 
1254                 if (!reallocating) {
1255                     jit.setupArgumentsWithExecState(baseGPR);
1256                     
1257                     CCallHelpers::Call operationCall = jit.call();
1258                     jit.addLinkTask(
1259                         [=] (LinkBuffer& linkBuffer) {
1260                             linkBuffer.link(
1261                                 operationCall,
1262                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1263                         });
1264                 } else {
1265                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1266                     // already had out-of-line property storage).
1267                     jit.setupArgumentsWithExecState(
1268                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1269                     
1270                     CCallHelpers::Call operationCall = jit.call();
1271                     jit.addLinkTask(
1272                         [=] (LinkBuffer& linkBuffer) {
1273                             linkBuffer.link(
1274                                 operationCall,
1275                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1276                         });
1277                 }
1278                 
1279                 jit.reclaimSpaceOnStackForCCall();
1280                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1281                 
1282                 CCallHelpers::Jump noException =
1283                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1284                 
1285                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1286                 state.emitExplicitExceptionHandler();
1287                 
1288                 noException.link(&jit);
1289                 state.restoreLiveRegistersFromStackForCall();
1290             }
1291         }
1292
1293         if (isInlineOffset(m_offset)) {
1294             jit.storeValue(
1295                 valueRegs,
1296                 CCallHelpers::Address(
1297                     baseGPR,
1298                     JSObject::offsetOfInlineStorage() +
1299                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1300         } else {
1301             if (!allocating)
1302                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1303             jit.storeValue(
1304                 valueRegs,
1305                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1306         }
1307         
1308         // If we had allocated using an operation then we would have already executed the store
1309         // barrier and we would have already stored the butterfly into the object.
1310         if (allocatingInline) {
1311             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1312             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1313             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1314             slowPath.append(
1315                 jit.branch32(
1316                     CCallHelpers::AboveOrEqual, scratchGPR2,
1317                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1318             
1319             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1320             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1321             
1322             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1323             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1324             jit.storePtr(
1325                 baseGPR,
1326                 CCallHelpers::BaseIndex(
1327                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1328                     static_cast<int32_t>(-sizeof(void*))));
1329             ownerIsRememberedOrInEden.link(&jit);
1330             
1331             // We set the new butterfly and the structure last. Doing it this way ensures that
1332             // whatever we had done up to this point is forgotten if we choose to branch to slow
1333             // path.
1334             
1335             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1336         }
1337         
1338         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1339         jit.store32(
1340             CCallHelpers::TrustedImm32(structureBits),
1341             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1342
1343         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1344         state.succeed();
1345         
1346         // We will have a slow path if we were allocating without the help of an operation.
1347         if (allocatingInline) {
1348             if (allocator.didReuseRegisters()) {
1349                 slowPath.link(&jit);
1350                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1351                 state.failAndIgnore.append(jit.jump());
1352             } else
1353                 state.failAndIgnore.append(slowPath);
1354         } else
1355             RELEASE_ASSERT(slowPath.empty());
1356         return;
1357     }
1358
1359     case ArrayLength: {
1360         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1361         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1362         state.failAndIgnore.append(
1363             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1364         jit.boxInt32(scratchGPR, valueRegs);
1365         state.succeed();
1366         return;
1367     }
1368
1369     case StringLength: {
1370         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1371         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1372         state.succeed();
1373         return;
1374     }
1375         
1376     case IntrinsicGetter: {
1377         RELEASE_ASSERT(isValidOffset(offset()));
1378
1379         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1380         // are immutable so we just need to watch the property not any value inside it.
1381         Structure* currStructure;
1382         if (m_conditionSet.isEmpty())
1383             currStructure = structure();
1384         else
1385             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1386         currStructure->startWatchingPropertyForReplacements(vm, offset());
1387
1388         emitIntrinsicGetter(state);
1389         return;
1390     }
1391
1392     case DirectArgumentsLength:
1393     case ScopedArgumentsLength:
1394     case MegamorphicLoad:
1395         // These need to be handled by generateWithGuard(), since the guard is part of the
1396         // algorithm. We can be sure that nobody will call generate() directly for these since they
1397         // are not guarded by structure checks.
1398         RELEASE_ASSERT_NOT_REACHED();
1399     }
1400     
1401     RELEASE_ASSERT_NOT_REACHED();
1402 }
1403
1404 PolymorphicAccess::PolymorphicAccess() { }
1405 PolymorphicAccess::~PolymorphicAccess() { }
1406
1407 AccessGenerationResult PolymorphicAccess::addCases(
1408     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1409     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1410 {
1411     SuperSamplerScope superSamplerScope(false);
1412     
1413     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1414     // invariants:
1415     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1416     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1417     //   can be removed via the canReplace() rule.
1418     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1419     //   cascade through the cases in reverse order, you will get the most recent cases first.
1420     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1421     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1422     //   add more things after failure.
1423     
1424     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1425     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1426     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1427         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1428
1429         // Add it only if it is not replaced by the subsequent cases in the list.
1430         bool found = false;
1431         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1432             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1433                 found = true;
1434                 break;
1435             }
1436         }
1437
1438         if (found)
1439             continue;
1440         
1441         casesToAdd.append(WTFMove(myCase));
1442     }
1443
1444     if (verbose)
1445         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1446
1447     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1448     // new stub that will be identical to the old one. Returning null should tell the caller to just
1449     // keep doing what they were doing before.
1450     if (casesToAdd.isEmpty())
1451         return AccessGenerationResult::MadeNoChanges;
1452
1453     // Now add things to the new list. Note that at this point, we will still have old cases that
1454     // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1455     for (auto& caseToAdd : casesToAdd) {
1456         commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1457         m_list.append(WTFMove(caseToAdd));
1458     }
1459     
1460     if (verbose)
1461         dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1462
1463     return AccessGenerationResult::Buffered;
1464 }
1465
1466 AccessGenerationResult PolymorphicAccess::addCase(
1467     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1468     std::unique_ptr<AccessCase> newAccess)
1469 {
1470     Vector<std::unique_ptr<AccessCase>> newAccesses;
1471     newAccesses.append(WTFMove(newAccess));
1472     return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1473 }
1474
1475 bool PolymorphicAccess::visitWeak(VM& vm) const
1476 {
1477     for (unsigned i = 0; i < size(); ++i) {
1478         if (!at(i).visitWeak(vm))
1479             return false;
1480     }
1481     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1482         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1483             if (!Heap::isMarked(weakReference.get()))
1484                 return false;
1485         }
1486     }
1487     return true;
1488 }
1489
1490 void PolymorphicAccess::dump(PrintStream& out) const
1491 {
1492     out.print(RawPointer(this), ":[");
1493     CommaPrinter comma;
1494     for (auto& entry : m_list)
1495         out.print(comma, *entry);
1496     out.print("]");
1497 }
1498
1499 void PolymorphicAccess::commit(
1500     VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1501     StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1502 {
1503     // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1504     // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1505     // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1506     // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1507     // Those common kinds of JSC object accesses don't hit this case.
1508     
1509     for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1510         Watchpoint* watchpoint =
1511             WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1512                 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1513         
1514         set->add(watchpoint);
1515     }
1516 }
1517
1518 AccessGenerationResult PolymorphicAccess::regenerate(
1519     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1520 {
1521     SuperSamplerScope superSamplerScope(false);
1522     
1523     if (verbose)
1524         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1525     
1526     AccessGenerationState state;
1527
1528     state.access = this;
1529     state.stubInfo = &stubInfo;
1530     state.ident = &ident;
1531     
1532     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1533     state.valueRegs = JSValueRegs(
1534 #if USE(JSVALUE32_64)
1535         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1536 #endif
1537         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1538
1539     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1540     state.allocator = &allocator;
1541     allocator.lock(state.baseGPR);
1542     allocator.lock(state.valueRegs);
1543 #if USE(JSVALUE32_64)
1544     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1545 #endif
1546
1547     state.scratchGPR = allocator.allocateScratchGPR();
1548     
1549     CCallHelpers jit(&vm, codeBlock);
1550     state.jit = &jit;
1551
1552     state.preservedReusedRegisterState =
1553         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1554
1555     // Regenerating is our opportunity to figure out what our list of cases should look like. We
1556     // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1557     // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1558     // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1559     // from the code of the current stub (aka previous).
1560     ListType cases;
1561     unsigned srcIndex = 0;
1562     unsigned dstIndex = 0;
1563     while (srcIndex < m_list.size()) {
1564         std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
1565         
1566         // If the case had been generated, then we have to keep the original in m_list in case we
1567         // fail to regenerate. That case may have data structures that are used by the code that it
1568         // had generated. If the case had not been generated, then we want to remove it from m_list.
1569         bool isGenerated = someCase->state() == AccessCase::Generated;
1570         
1571         [&] () {
1572             if (!someCase->couldStillSucceed())
1573                 return;
1574
1575             // Figure out if this is replaced by any later case.
1576             for (unsigned j = srcIndex; j < m_list.size(); ++j) {
1577                 if (m_list[j]->canReplace(*someCase))
1578                     return;
1579             }
1580             
1581             if (isGenerated)
1582                 cases.append(someCase->clone());
1583             else
1584                 cases.append(WTFMove(someCase));
1585         }();
1586         
1587         if (isGenerated)
1588             m_list[dstIndex++] = WTFMove(someCase);
1589     }
1590     m_list.resize(dstIndex);
1591     
1592     if (verbose)
1593         dataLog("In regenerate: cases: ", listDump(cases), "\n");
1594     
1595     // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1596     // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1597     // size. It would be faster to just allow more repatching with many load cases, and avoid the
1598     // megamorphicLoad optimization, if we had infinite executable memory.
1599     if (cases.size() >= Options::maxAccessVariantListSize()) {
1600         unsigned numSelfLoads = 0;
1601         for (auto& newCase : cases) {
1602             if (newCase->canBeReplacedByMegamorphicLoad())
1603                 numSelfLoads++;
1604         }
1605         
1606         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1607             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1608                 cases.removeAllMatching(
1609                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1610                         return newCase->canBeReplacedByMegamorphicLoad();
1611                     });
1612                 
1613                 cases.append(WTFMove(mega));
1614             }
1615         }
1616     }
1617     
1618     if (verbose)
1619         dataLog("Optimized cases: ", listDump(cases), "\n");
1620     
1621     // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1622     // won't change that set anymore.
1623     
1624     bool allGuardedByStructureCheck = true;
1625     bool hasJSGetterSetterCall = false;
1626     for (auto& newCase : cases) {
1627         commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1628         allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1629         if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1630             hasJSGetterSetterCall = true;
1631     }
1632
1633     if (cases.isEmpty()) {
1634         // This is super unlikely, but we make it legal anyway.
1635         state.failAndRepatch.append(jit.jump());
1636     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1637         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1638         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1639         // one case.
1640         CCallHelpers::JumpList fallThrough;
1641
1642         // Cascade through the list, preferring newer entries.
1643         for (unsigned i = cases.size(); i--;) {
1644             fallThrough.link(&jit);
1645             cases[i]->generateWithGuard(state, fallThrough);
1646         }
1647         state.failAndRepatch.append(fallThrough);
1648     } else {
1649         jit.load32(
1650             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1651             state.scratchGPR);
1652         
1653         Vector<int64_t> caseValues(cases.size());
1654         for (unsigned i = 0; i < cases.size(); ++i)
1655             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1656         
1657         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1658         while (binarySwitch.advance(jit))
1659             cases[binarySwitch.caseIndex()]->generate(state);
1660         state.failAndRepatch.append(binarySwitch.fallThrough());
1661     }
1662
1663     if (!state.failAndIgnore.empty()) {
1664         state.failAndIgnore.link(&jit);
1665         
1666         // Make sure that the inline cache optimization code knows that we are taking slow path because
1667         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1668         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1669         // that the slow path does not try to patch.
1670 #if CPU(X86) || CPU(X86_64)
1671         jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
1672         jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
1673 #else
1674         jit.load8(&stubInfo.countdown, state.scratchGPR);
1675         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1676         jit.store8(state.scratchGPR, &stubInfo.countdown);
1677 #endif
1678     }
1679
1680     CCallHelpers::JumpList failure;
1681     if (allocator.didReuseRegisters()) {
1682         state.failAndRepatch.link(&jit);
1683         state.restoreScratch();
1684     } else
1685         failure = state.failAndRepatch;
1686     failure.append(jit.jump());
1687
1688     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1689     CallSiteIndex callSiteIndexForExceptionHandling;
1690     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1691         // Emit the exception handler.
1692         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1693         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1694         // their own exception handling logic that doesn't go through genericUnwind.
1695         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1696
1697         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1698         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1699         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1700
1701         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1702         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1703
1704         state.restoreLiveRegistersFromStackForCallWithThrownException();
1705         state.restoreScratch();
1706         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1707
1708         HandlerInfo oldHandler = state.originalExceptionHandler();
1709         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1710         jit.addLinkTask(
1711             [=] (LinkBuffer& linkBuffer) {
1712                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1713
1714                 HandlerInfo handlerToRegister = oldHandler;
1715                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1716                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1717                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1718                 codeBlock->appendExceptionHandler(handlerToRegister);
1719             });
1720
1721         // We set these to indicate to the stub to remove itself from the CodeBlock's
1722         // exception handler table when it is deallocated.
1723         codeBlockThatOwnsExceptionHandlers = codeBlock;
1724         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1725         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1726     }
1727
1728     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1729     if (linkBuffer.didFailToAllocate()) {
1730         if (verbose)
1731             dataLog("Did fail to allocate.\n");
1732         return AccessGenerationResult::GaveUp;
1733     }
1734
1735     CodeLocationLabel successLabel =
1736         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1737         
1738     linkBuffer.link(state.success, successLabel);
1739
1740     linkBuffer.link(
1741         failure,
1742         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1743     
1744     if (verbose)
1745         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1746
1747     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1748         codeBlock, linkBuffer,
1749         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1750
1751     bool doesCalls = false;
1752     Vector<JSCell*> cellsToMark;
1753     for (auto& entry : cases)
1754         doesCalls |= entry->doesCalls(&cellsToMark);
1755     
1756     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1757     m_watchpoints = WTFMove(state.watchpoints);
1758     if (!state.weakReferences.isEmpty())
1759         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1760     if (verbose)
1761         dataLog("Returning: ", code.code(), "\n");
1762     
1763     m_list = WTFMove(cases);
1764     
1765     AccessGenerationResult::Kind resultKind;
1766     if (m_list.size() >= Options::maxAccessVariantListSize())
1767         resultKind = AccessGenerationResult::GeneratedFinalCode;
1768     else
1769         resultKind = AccessGenerationResult::GeneratedNewCode;
1770     
1771     return AccessGenerationResult(resultKind, code.code());
1772 }
1773
1774 void PolymorphicAccess::aboutToDie()
1775 {
1776     if (m_stubRoutine)
1777         m_stubRoutine->aboutToDie();
1778 }
1779
1780 } // namespace JSC
1781
1782 namespace WTF {
1783
1784 using namespace JSC;
1785
1786 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1787 {
1788     switch (kind) {
1789     case AccessGenerationResult::MadeNoChanges:
1790         out.print("MadeNoChanges");
1791         return;
1792     case AccessGenerationResult::GaveUp:
1793         out.print("GaveUp");
1794         return;
1795     case AccessGenerationResult::Buffered:
1796         out.print("Buffered");
1797         return;
1798     case AccessGenerationResult::GeneratedNewCode:
1799         out.print("GeneratedNewCode");
1800         return;
1801     case AccessGenerationResult::GeneratedFinalCode:
1802         out.print("GeneratedFinalCode");
1803         return;
1804     }
1805     
1806     RELEASE_ASSERT_NOT_REACHED();
1807 }
1808
1809 void printInternal(PrintStream& out, AccessCase::AccessType type)
1810 {
1811     switch (type) {
1812     case AccessCase::Load:
1813         out.print("Load");
1814         return;
1815     case AccessCase::MegamorphicLoad:
1816         out.print("MegamorphicLoad");
1817         return;
1818     case AccessCase::Transition:
1819         out.print("Transition");
1820         return;
1821     case AccessCase::Replace:
1822         out.print("Replace");
1823         return;
1824     case AccessCase::Miss:
1825         out.print("Miss");
1826         return;
1827     case AccessCase::GetGetter:
1828         out.print("GetGetter");
1829         return;
1830     case AccessCase::Getter:
1831         out.print("Getter");
1832         return;
1833     case AccessCase::Setter:
1834         out.print("Setter");
1835         return;
1836     case AccessCase::CustomValueGetter:
1837         out.print("CustomValueGetter");
1838         return;
1839     case AccessCase::CustomAccessorGetter:
1840         out.print("CustomAccessorGetter");
1841         return;
1842     case AccessCase::CustomValueSetter:
1843         out.print("CustomValueSetter");
1844         return;
1845     case AccessCase::CustomAccessorSetter:
1846         out.print("CustomAccessorSetter");
1847         return;
1848     case AccessCase::IntrinsicGetter:
1849         out.print("IntrinsicGetter");
1850         return;
1851     case AccessCase::InHit:
1852         out.print("InHit");
1853         return;
1854     case AccessCase::InMiss:
1855         out.print("InMiss");
1856         return;
1857     case AccessCase::ArrayLength:
1858         out.print("ArrayLength");
1859         return;
1860     case AccessCase::StringLength:
1861         out.print("StringLength");
1862         return;
1863     case AccessCase::DirectArgumentsLength:
1864         out.print("DirectArgumentsLength");
1865         return;
1866     case AccessCase::ScopedArgumentsLength:
1867         out.print("ScopedArgumentsLength");
1868         return;
1869     }
1870
1871     RELEASE_ASSERT_NOT_REACHED();
1872 }
1873
1874 void printInternal(PrintStream& out, AccessCase::State state)
1875 {
1876     switch (state) {
1877     case AccessCase::Primordial:
1878         out.print("Primordial");
1879         return;
1880     case AccessCase::Committed:
1881         out.print("Committed");
1882         return;
1883     case AccessCase::Generated:
1884         out.print("Generated");
1885         return;
1886     }
1887
1888     RELEASE_ASSERT_NOT_REACHED();
1889 }
1890
1891 } // namespace WTF
1892
1893 #endif // ENABLE(JIT)
1894
1895