Get rid of HeapRootVisitor and make SlotVisitor less painful to use
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DOMJITAccessCasePatchpointParams.h"
35 #include "DOMJITCallDOMGetterPatchpoint.h"
36 #include "DirectArguments.h"
37 #include "GetterSetter.h"
38 #include "Heap.h"
39 #include "JITOperations.h"
40 #include "JSCInlines.h"
41 #include "LinkBuffer.h"
42 #include "ScopedArguments.h"
43 #include "ScratchRegisterAllocator.h"
44 #include "StructureStubClearingWatchpoint.h"
45 #include "StructureStubInfo.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/ListDump.h>
48
49 namespace JSC {
50
51 static const bool verbose = false;
52
53 void AccessGenerationResult::dump(PrintStream& out) const
54 {
55     out.print(m_kind);
56     if (m_code)
57         out.print(":", m_code);
58 }
59
60 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
61 {
62     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
63         watchpoints, jit->codeBlock(), stubInfo, condition);
64 }
65
66 void AccessGenerationState::restoreScratch()
67 {
68     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
69 }
70
71 void AccessGenerationState::succeed()
72 {
73     restoreScratch();
74     success.append(jit->jump());
75 }
76
77 const RegisterSet& AccessGenerationState::liveRegistersForCall()
78 {
79     if (!m_calculatedRegistersForCallAndExceptionHandling)
80         calculateLiveRegistersForCallAndExceptionHandling();
81     return m_liveRegistersForCall;
82 }
83
84 const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
85 {
86     if (!m_calculatedRegistersForCallAndExceptionHandling)
87         calculateLiveRegistersForCallAndExceptionHandling();
88     return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
89 }
90
91 static RegisterSet calleeSaveRegisters()
92 {
93     RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
94     result.filter(RegisterSet::registersToNotSaveForCCall());
95     return result;
96 }
97
98 const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
99 {
100     if (!m_calculatedRegistersForCallAndExceptionHandling) {
101         m_calculatedRegistersForCallAndExceptionHandling = true;
102
103         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
104         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
105         if (m_needsToRestoreRegistersIfException)
106             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
107
108         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
109         m_liveRegistersForCall.exclude(calleeSaveRegisters());
110     }
111     return m_liveRegistersForCall;
112 }
113
114 auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
115 {
116     RegisterSet liveRegisters = liveRegistersForCall();
117     liveRegisters.merge(extra);
118     
119     unsigned extraStackPadding = 0;
120     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
121     return SpillState {
122         WTFMove(liveRegisters),
123         numberOfStackBytesUsedForRegisterPreservation
124     };
125 }
126
127 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
128 {
129     // Even if we're a getter, we don't want to ignore the result value like we normally do
130     // because the getter threw, and therefore, didn't return a value that means anything.
131     // Instead, we want to restore that register to what it was upon entering the getter
132     // inline cache. The subtlety here is if the base and the result are the same register,
133     // and the getter threw, we want OSR exit to see the original base value, not the result
134     // of the getter call.
135     RegisterSet dontRestore = spillState.spilledRegisters;
136     // As an optimization here, we only need to restore what is live for exception handling.
137     // We can construct the dontRestore set to accomplish this goal by having it contain only
138     // what is live for call but not live for exception handling. By ignoring things that are
139     // only live at the call but not the exception handler, we will only restore things live
140     // at the exception handler.
141     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
142     restoreLiveRegistersFromStackForCall(spillState, dontRestore);
143 }
144
145 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
146 {
147     unsigned extraStackPadding = 0;
148     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
149 }
150
151 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
152 {
153     if (!m_calculatedRegistersForCallAndExceptionHandling)
154         calculateLiveRegistersForCallAndExceptionHandling();
155
156     if (!m_calculatedCallSiteIndex) {
157         m_calculatedCallSiteIndex = true;
158
159         if (m_needsToRestoreRegistersIfException)
160             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
161         else
162             m_callSiteIndex = originalCallSiteIndex();
163     }
164
165     return m_callSiteIndex;
166 }
167
168 const HandlerInfo& AccessGenerationState::originalExceptionHandler()
169 {
170     if (!m_calculatedRegistersForCallAndExceptionHandling)
171         calculateLiveRegistersForCallAndExceptionHandling();
172
173     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
174     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
175     RELEASE_ASSERT(exceptionHandler);
176     return *exceptionHandler;
177 }
178
179 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
180
181 void AccessGenerationState::emitExplicitExceptionHandler()
182 {
183     restoreScratch();
184     jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
185     if (needsToRestoreRegistersIfException()) {
186         // To the JIT that produces the original exception handling
187         // call site, they will expect the OSR exit to be arrived
188         // at from genericUnwind. Therefore we must model what genericUnwind
189         // does here. I.e, set callFrameForCatch and copy callee saves.
190
191         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
192         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
193
194         // We don't need to insert a new exception handler in the table
195         // because we're doing a manual exception check here. i.e, we'll
196         // never arrive here from genericUnwind().
197         HandlerInfo originalHandler = originalExceptionHandler();
198         jit->addLinkTask(
199             [=] (LinkBuffer& linkBuffer) {
200                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
201             });
202     } else {
203         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
204         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
205         jit->addLinkTask(
206             [=] (LinkBuffer& linkBuffer) {
207                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
208             });
209         jit->jumpToExceptionHandler();
210     }
211 }
212
213 AccessCase::AccessCase()
214 {
215 }
216
217 std::unique_ptr<AccessCase> AccessCase::tryGet(
218     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
219     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
220 {
221     std::unique_ptr<AccessCase> result(new AccessCase());
222
223     result->m_type = type;
224     result->m_offset = offset;
225     result->m_structure.set(vm, owner, structure);
226     result->m_conditionSet = conditionSet;
227
228     if (viaProxy || additionalSet) {
229         result->m_rareData = std::make_unique<RareData>();
230         result->m_rareData->viaProxy = viaProxy;
231         result->m_rareData->additionalSet = additionalSet;
232     }
233
234     return result;
235 }
236
237 std::unique_ptr<AccessCase> AccessCase::get(
238     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
239     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
240     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase, DOMJIT::GetterSetter* domJIT)
241 {
242     std::unique_ptr<AccessCase> result(new AccessCase());
243
244     result->m_type = type;
245     result->m_offset = offset;
246     result->m_structure.set(vm, owner, structure);
247     result->m_conditionSet = conditionSet;
248
249     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase || domJIT) {
250         result->m_rareData = std::make_unique<RareData>();
251         result->m_rareData->viaProxy = viaProxy;
252         result->m_rareData->additionalSet = additionalSet;
253         result->m_rareData->customAccessor.getter = customGetter;
254         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
255         result->m_rareData->domJIT = domJIT;
256     }
257
258     return result;
259 }
260
261 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
262 {
263     UNUSED_PARAM(vm);
264     UNUSED_PARAM(owner);
265     
266     if (GPRInfo::numberOfRegisters < 9)
267         return nullptr;
268     
269     std::unique_ptr<AccessCase> result(new AccessCase());
270     
271     result->m_type = MegamorphicLoad;
272     
273     return result;
274 }
275
276 std::unique_ptr<AccessCase> AccessCase::replace(
277     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
278 {
279     std::unique_ptr<AccessCase> result(new AccessCase());
280
281     result->m_type = Replace;
282     result->m_offset = offset;
283     result->m_structure.set(vm, owner, structure);
284
285     return result;
286 }
287
288 std::unique_ptr<AccessCase> AccessCase::transition(
289     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
290     const ObjectPropertyConditionSet& conditionSet)
291 {
292     RELEASE_ASSERT(oldStructure == newStructure->previousID());
293
294     // Skip optimizing the case where we need a realloc, if we don't have
295     // enough registers to make it happen.
296     if (GPRInfo::numberOfRegisters < 6
297         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
298         && oldStructure->outOfLineCapacity()) {
299         return nullptr;
300     }
301
302     std::unique_ptr<AccessCase> result(new AccessCase());
303
304     result->m_type = Transition;
305     result->m_offset = offset;
306     result->m_structure.set(vm, owner, newStructure);
307     result->m_conditionSet = conditionSet;
308
309     return result;
310 }
311
312 std::unique_ptr<AccessCase> AccessCase::setter(
313     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
314     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
315     JSObject* customSlotBase)
316 {
317     std::unique_ptr<AccessCase> result(new AccessCase());
318
319     result->m_type = type;
320     result->m_offset = offset;
321     result->m_structure.set(vm, owner, structure);
322     result->m_conditionSet = conditionSet;
323     result->m_rareData = std::make_unique<RareData>();
324     result->m_rareData->customAccessor.setter = customSetter;
325     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
326
327     return result;
328 }
329
330 std::unique_ptr<AccessCase> AccessCase::in(
331     VM& vm, JSCell* owner, AccessType type, Structure* structure,
332     const ObjectPropertyConditionSet& conditionSet)
333 {
334     std::unique_ptr<AccessCase> result(new AccessCase());
335
336     result->m_type = type;
337     result->m_structure.set(vm, owner, structure);
338     result->m_conditionSet = conditionSet;
339
340     return result;
341 }
342
343 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
344 {
345     std::unique_ptr<AccessCase> result(new AccessCase());
346
347     result->m_type = type;
348
349     return result;
350 }
351
352 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
353     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
354     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
355 {
356     std::unique_ptr<AccessCase> result(new AccessCase());
357
358     result->m_type = IntrinsicGetter;
359     result->m_structure.set(vm, owner, structure);
360     result->m_conditionSet = conditionSet;
361     result->m_offset = offset;
362
363     result->m_rareData = std::make_unique<RareData>();
364     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
365
366     return result;
367 }
368
369 AccessCase::~AccessCase()
370 {
371 }
372
373 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
374     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
375 {
376     switch (stubInfo.cacheType) {
377     case CacheType::GetByIdSelf:
378         return get(
379             vm, owner, Load, stubInfo.u.byIdSelf.offset,
380             stubInfo.u.byIdSelf.baseObjectStructure.get());
381
382     case CacheType::PutByIdReplace:
383         return replace(
384             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
385
386     default:
387         return nullptr;
388     }
389 }
390
391 std::unique_ptr<AccessCase> AccessCase::clone() const
392 {
393     std::unique_ptr<AccessCase> result(new AccessCase());
394     result->m_type = m_type;
395     result->m_offset = m_offset;
396     result->m_structure = m_structure;
397     result->m_conditionSet = m_conditionSet;
398     if (RareData* rareData = m_rareData.get()) {
399         result->m_rareData = std::make_unique<RareData>();
400         result->m_rareData->viaProxy = rareData->viaProxy;
401         result->m_rareData->additionalSet = rareData->additionalSet;
402         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
403         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
404         result->m_rareData->customSlotBase = rareData->customSlotBase;
405         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
406         result->m_rareData->domJIT = rareData->domJIT;
407     }
408     return result;
409 }
410
411 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
412 {
413     // It's fine to commit something that is already committed. That arises when we switch to using
414     // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
415     // because most AccessCases have no extra watchpoints anyway.
416     RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
417     
418     Vector<WatchpointSet*, 2> result;
419     
420     if ((structure() && structure()->needImpurePropertyWatchpoint())
421         || m_conditionSet.needImpurePropertyWatchpoint())
422         result.append(vm.ensureWatchpointSetForImpureProperty(ident));
423
424     if (additionalSet())
425         result.append(additionalSet());
426     
427     m_state = Committed;
428     
429     return result;
430 }
431
432 bool AccessCase::guardedByStructureCheck() const
433 {
434     if (viaProxy())
435         return false;
436
437     switch (m_type) {
438     case MegamorphicLoad:
439     case ArrayLength:
440     case StringLength:
441     case DirectArgumentsLength:
442     case ScopedArgumentsLength:
443         return false;
444     default:
445         return true;
446     }
447 }
448
449 JSObject* AccessCase::alternateBase() const
450 {
451     if (customSlotBase())
452         return customSlotBase();
453     return conditionSet().slotBaseCondition().object();
454 }
455
456 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
457 {
458     switch (type()) {
459     case Getter:
460     case Setter:
461     case CustomValueGetter:
462     case CustomAccessorGetter:
463     case CustomValueSetter:
464     case CustomAccessorSetter:
465         return true;
466     case Transition:
467         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
468             && structure()->couldHaveIndexingHeader()) {
469             if (cellsToMark)
470                 cellsToMark->append(newStructure());
471             return true;
472         }
473         return false;
474     default:
475         return false;
476     }
477 }
478
479 bool AccessCase::couldStillSucceed() const
480 {
481     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
482 }
483
484 bool AccessCase::canBeReplacedByMegamorphicLoad() const
485 {
486     if (type() == MegamorphicLoad)
487         return true;
488     
489     return type() == Load
490         && !viaProxy()
491         && conditionSet().isEmpty()
492         && !additionalSet()
493         && !customSlotBase();
494 }
495
496 bool AccessCase::canReplace(const AccessCase& other) const
497 {
498     // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
499     // It's fine for this to return false if it's in doubt.
500
501     switch (type()) {
502     case MegamorphicLoad:
503         return other.canBeReplacedByMegamorphicLoad();
504     case ArrayLength:
505     case StringLength:
506     case DirectArgumentsLength:
507     case ScopedArgumentsLength:
508         return other.type() == type();
509     default:
510         if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
511             return false;
512         
513         return structure() == other.structure();
514     }
515 }
516
517 void AccessCase::dump(PrintStream& out) const
518 {
519     out.print(m_type, ":(");
520
521     CommaPrinter comma;
522     
523     out.print(comma, m_state);
524
525     if (m_type == Transition)
526         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
527     else if (m_structure)
528         out.print(comma, "structure = ", pointerDump(m_structure.get()));
529
530     if (isValidOffset(m_offset))
531         out.print(comma, "offset = ", m_offset);
532     if (!m_conditionSet.isEmpty())
533         out.print(comma, "conditions = ", m_conditionSet);
534
535     if (RareData* rareData = m_rareData.get()) {
536         if (rareData->viaProxy)
537             out.print(comma, "viaProxy = ", rareData->viaProxy);
538         if (rareData->additionalSet)
539             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
540         if (rareData->callLinkInfo)
541             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
542         if (rareData->customAccessor.opaque)
543             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
544         if (rareData->customSlotBase)
545             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
546     }
547
548     out.print(")");
549 }
550
551 bool AccessCase::visitWeak(VM& vm) const
552 {
553     if (m_structure && !Heap::isMarked(m_structure.get()))
554         return false;
555     if (!m_conditionSet.areStillLive())
556         return false;
557     if (m_rareData) {
558         if (m_rareData->callLinkInfo)
559             m_rareData->callLinkInfo->visitWeak(vm);
560         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
561             return false;
562         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
563             return false;
564     }
565     return true;
566 }
567
568 bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
569 {
570     bool result = true;
571     
572     if (m_structure)
573         result &= m_structure->markIfCheap(visitor);
574     
575     switch (m_type) {
576     case Transition:
577         if (Heap::isMarkedConcurrently(m_structure->previousID()))
578             visitor.appendUnbarriered(m_structure.get());
579         else
580             result = false;
581         break;
582     default:
583         break;
584     }
585     
586     return result;
587 }
588
589 void AccessCase::generateWithGuard(
590     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
591 {
592     SuperSamplerScope superSamplerScope(false);
593
594     RELEASE_ASSERT(m_state == Committed);
595     m_state = Generated;
596     
597     CCallHelpers& jit = *state.jit;
598     VM& vm = *jit.vm();
599     const Identifier& ident = *state.ident;
600     StructureStubInfo& stubInfo = *state.stubInfo;
601     JSValueRegs valueRegs = state.valueRegs;
602     GPRReg baseGPR = state.baseGPR;
603     GPRReg scratchGPR = state.scratchGPR;
604     
605     UNUSED_PARAM(vm);
606
607     switch (m_type) {
608     case ArrayLength: {
609         ASSERT(!viaProxy());
610         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeAndMiscOffset()), scratchGPR);
611         fallThrough.append(
612             jit.branchTest32(
613                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
614         fallThrough.append(
615             jit.branchTest32(
616                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
617         break;
618     }
619
620     case StringLength: {
621         ASSERT(!viaProxy());
622         fallThrough.append(
623             jit.branch8(
624                 CCallHelpers::NotEqual,
625                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
626                 CCallHelpers::TrustedImm32(StringType)));
627         break;
628     }
629         
630     case DirectArgumentsLength: {
631         ASSERT(!viaProxy());
632         fallThrough.append(
633             jit.branch8(
634                 CCallHelpers::NotEqual,
635                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
636                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
637
638         fallThrough.append(
639             jit.branchTestPtr(
640                 CCallHelpers::NonZero,
641                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
642         jit.load32(
643             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
644             valueRegs.payloadGPR());
645         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
646         state.succeed();
647         return;
648     }
649         
650     case ScopedArgumentsLength: {
651         ASSERT(!viaProxy());
652         fallThrough.append(
653             jit.branch8(
654                 CCallHelpers::NotEqual,
655                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
656                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
657
658         fallThrough.append(
659             jit.branchTest8(
660                 CCallHelpers::NonZero,
661                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
662         jit.load32(
663             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
664             valueRegs.payloadGPR());
665         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
666         state.succeed();
667         return;
668     }
669         
670     case MegamorphicLoad: {
671         UniquedStringImpl* key = ident.impl();
672         unsigned hash = IdentifierRepHash::hash(key);
673         
674         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
675         allocator.lock(baseGPR);
676 #if USE(JSVALUE32_64)
677         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
678 #endif
679         allocator.lock(valueRegs);
680         allocator.lock(scratchGPR);
681         
682         GPRReg intermediateGPR = scratchGPR;
683         GPRReg maskGPR = allocator.allocateScratchGPR();
684         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
685         GPRReg indexGPR = allocator.allocateScratchGPR();
686         GPRReg offsetGPR = allocator.allocateScratchGPR();
687         
688         if (verbose) {
689             dataLog("baseGPR = ", baseGPR, "\n");
690             dataLog("valueRegs = ", valueRegs, "\n");
691             dataLog("scratchGPR = ", scratchGPR, "\n");
692             dataLog("intermediateGPR = ", intermediateGPR, "\n");
693             dataLog("maskGPR = ", maskGPR, "\n");
694             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
695             dataLog("indexGPR = ", indexGPR, "\n");
696             dataLog("offsetGPR = ", offsetGPR, "\n");
697         }
698
699         ScratchRegisterAllocator::PreservedState preservedState =
700             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
701
702         CCallHelpers::JumpList myFailAndIgnore;
703         CCallHelpers::JumpList myFallThrough;
704         
705         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
706         jit.loadPtr(
707             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
708             intermediateGPR);
709         
710         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
711         
712         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
713         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
714         jit.load32(
715             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
716             intermediateGPR);
717
718         jit.move(maskGPR, maskedHashGPR);
719         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
720         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
721         jit.addPtr(indexGPR, intermediateGPR);
722         
723         CCallHelpers::Label loop = jit.label();
724         
725         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
726         
727         myFallThrough.append(
728             jit.branch32(
729                 CCallHelpers::Equal,
730                 offsetGPR,
731                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
732         
733         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
734         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
735         jit.addPtr(intermediateGPR, offsetGPR);
736         
737         CCallHelpers::Jump collision =  jit.branchPtr(
738             CCallHelpers::NotEqual,
739             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
740             CCallHelpers::TrustedImmPtr(key));
741         
742         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
743         // Check them and then attempt the load.
744         
745         myFallThrough.append(
746             jit.branchTest32(
747                 CCallHelpers::NonZero,
748                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
749                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
750         
751         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
752         
753         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
754         
755         allocator.restoreReusedRegistersByPopping(jit, preservedState);
756         state.succeed();
757         
758         collision.link(&jit);
759
760         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
761         
762         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
763         // around isn't super common so we could, for example, recompute the mask from the difference between
764         // the table and index. But before we do that we should probably make it easier to multiply and
765         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
766         // to have a power-of-2 size.
767         jit.and32(maskGPR, maskedHashGPR);
768         jit.jump().linkTo(loop, &jit);
769         
770         if (allocator.didReuseRegisters()) {
771             myFailAndIgnore.link(&jit);
772             allocator.restoreReusedRegistersByPopping(jit, preservedState);
773             state.failAndIgnore.append(jit.jump());
774             
775             myFallThrough.link(&jit);
776             allocator.restoreReusedRegistersByPopping(jit, preservedState);
777             fallThrough.append(jit.jump());
778         } else {
779             state.failAndIgnore.append(myFailAndIgnore);
780             fallThrough.append(myFallThrough);
781         }
782         return;
783     }
784
785     default: {
786         if (viaProxy()) {
787             fallThrough.append(
788                 jit.branch8(
789                     CCallHelpers::NotEqual,
790                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
791                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
792
793             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
794
795             fallThrough.append(
796                 jit.branchStructure(
797                     CCallHelpers::NotEqual,
798                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
799                     structure()));
800         } else {
801             fallThrough.append(
802                 jit.branchStructure(
803                     CCallHelpers::NotEqual,
804                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
805                     structure()));
806         }
807         break;
808     } };
809
810     generateImpl(state);
811 }
812
813 void AccessCase::generate(AccessGenerationState& state)
814 {
815     RELEASE_ASSERT(m_state == Committed);
816     m_state = Generated;
817     
818     generateImpl(state);
819 }
820
821 void AccessCase::generateImpl(AccessGenerationState& state)
822 {
823     SuperSamplerScope superSamplerScope(false);
824     if (verbose)
825         dataLog("Generating code for: ", *this, "\n");
826     
827     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
828     
829     CCallHelpers& jit = *state.jit;
830     VM& vm = *jit.vm();
831     CodeBlock* codeBlock = jit.codeBlock();
832     StructureStubInfo& stubInfo = *state.stubInfo;
833     const Identifier& ident = *state.ident;
834     JSValueRegs valueRegs = state.valueRegs;
835     GPRReg baseGPR = state.baseGPR;
836     GPRReg scratchGPR = state.scratchGPR;
837
838     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
839
840     for (const ObjectPropertyCondition& condition : m_conditionSet) {
841         Structure* structure = condition.object()->structure();
842
843         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
844             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
845             continue;
846         }
847
848         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
849             // The reason why this cannot happen is that we require that PolymorphicAccess calls
850             // AccessCase::generate() only after it has verified that
851             // AccessCase::couldStillSucceed() returned true.
852             
853             dataLog("This condition is no longer met: ", condition, "\n");
854             RELEASE_ASSERT_NOT_REACHED();
855         }
856
857         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
858         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
859         
860         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
861         state.failAndRepatch.append(
862             jit.branchStructure(
863                 CCallHelpers::NotEqual,
864                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
865                 structure));
866     }
867
868     switch (m_type) {
869     case InHit:
870     case InMiss:
871         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
872         state.succeed();
873         return;
874
875     case Miss:
876         jit.moveTrustedValue(jsUndefined(), valueRegs);
877         state.succeed();
878         return;
879
880     case Load:
881     case GetGetter:
882     case Getter:
883     case Setter:
884     case CustomValueGetter:
885     case CustomAccessorGetter:
886     case CustomValueSetter:
887     case CustomAccessorSetter: {
888         GPRReg valueRegsPayloadGPR = valueRegs.payloadGPR();
889         
890         if (isValidOffset(m_offset)) {
891             Structure* currStructure;
892             if (m_conditionSet.isEmpty())
893                 currStructure = structure();
894             else
895                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
896             currStructure->startWatchingPropertyForReplacements(vm, offset());
897         }
898
899         GPRReg baseForGetGPR;
900         if (viaProxy()) {
901             ASSERT(m_type != CustomValueSetter || m_type != CustomAccessorSetter); // Because setters need to not trash valueRegsPayloadGPR.
902             if (m_type == Getter || m_type == Setter)
903                 baseForGetGPR = scratchGPR;
904             else
905                 baseForGetGPR = valueRegsPayloadGPR;
906
907             ASSERT((m_type != Getter && m_type != Setter) || baseForGetGPR != baseGPR);
908             ASSERT(m_type != Setter || baseForGetGPR != valueRegsPayloadGPR);
909
910             jit.loadPtr(
911                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
912                 baseForGetGPR);
913         } else
914             baseForGetGPR = baseGPR;
915
916         GPRReg baseForAccessGPR;
917         if (!m_conditionSet.isEmpty()) {
918             jit.move(
919                 CCallHelpers::TrustedImmPtr(alternateBase()),
920                 scratchGPR);
921             baseForAccessGPR = scratchGPR;
922         } else
923             baseForAccessGPR = baseForGetGPR;
924
925         GPRReg loadedValueGPR = InvalidGPRReg;
926         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
927             if (m_type == Load || m_type == GetGetter)
928                 loadedValueGPR = valueRegsPayloadGPR;
929             else
930                 loadedValueGPR = scratchGPR;
931
932             ASSERT((m_type != Getter && m_type != Setter) || loadedValueGPR != baseGPR);
933             ASSERT(m_type != Setter || loadedValueGPR != valueRegsPayloadGPR);
934
935             GPRReg storageGPR;
936             if (isInlineOffset(m_offset))
937                 storageGPR = baseForAccessGPR;
938             else {
939                 jit.loadPtr(
940                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
941                     loadedValueGPR);
942                 storageGPR = loadedValueGPR;
943             }
944
945 #if USE(JSVALUE64)
946             jit.load64(
947                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
948 #else
949             if (m_type == Load || m_type == GetGetter) {
950                 jit.load32(
951                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
952                     valueRegs.tagGPR());
953             }
954             jit.load32(
955                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
956                 loadedValueGPR);
957 #endif
958         }
959
960         if (m_type == Load || m_type == GetGetter) {
961             state.succeed();
962             return;
963         }
964
965         if (Options::useDOMJIT() && m_type == CustomAccessorGetter && m_rareData->domJIT) {
966             // We do not need to emit CheckDOM operation since structure check ensures
967             // that the structure of the given base value is structure()! So all we should
968             // do is performing the CheckDOM thingy in IC compiling time here.
969             if (structure()->classInfo()->isSubClassOf(m_rareData->domJIT->thisClassInfo())) {
970                 emitDOMJITGetter(state, baseForGetGPR);
971                 return;
972             }
973         }
974
975         // Stuff for custom getters/setters.
976         CCallHelpers::Call operationCall;
977
978         // Stuff for JS getters/setters.
979         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
980         CCallHelpers::Call fastPathCall;
981         CCallHelpers::Call slowPathCall;
982
983         // This also does the necessary calculations of whether or not we're an
984         // exception handling call site.
985         AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
986
987         auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
988             RegisterSet dontRestore;
989             if (callHasReturnValue) {
990                 // This is the result value. We don't want to overwrite the result with what we stored to the stack.
991                 // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
992                 dontRestore.set(valueRegs);
993             }
994             state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
995         };
996
997         jit.store32(
998             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
999             CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
1000
1001         if (m_type == Getter || m_type == Setter) {
1002             ASSERT(baseGPR != loadedValueGPR);
1003             ASSERT(m_type != Setter || (baseGPR != valueRegsPayloadGPR && loadedValueGPR != valueRegsPayloadGPR));
1004
1005             // Create a JS call using a JS call inline cache. Assume that:
1006             //
1007             // - SP is aligned and represents the extent of the calling compiler's stack usage.
1008             //
1009             // - FP is set correctly (i.e. it points to the caller's call frame header).
1010             //
1011             // - SP - FP is an aligned difference.
1012             //
1013             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
1014             //   code.
1015             //
1016             // Therefore, we temporarily grow the stack for the purpose of the call and then
1017             // shrink it after.
1018
1019             state.setSpillStateForJSGetterSetter(spillState);
1020
1021             RELEASE_ASSERT(!m_rareData->callLinkInfo);
1022             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
1023             
1024             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
1025             // stub, which then jumped back to the main code, then we'd have a reachability
1026             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
1027             // call stub stayed alive, and it would ensure that the main code stayed alive, but
1028             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
1029             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
1030             // reference to the getter stub.
1031             // https://bugs.webkit.org/show_bug.cgi?id=148914
1032             m_rareData->callLinkInfo->disallowStubs();
1033             
1034             m_rareData->callLinkInfo->setUpCall(
1035                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
1036
1037             CCallHelpers::JumpList done;
1038
1039             // There is a "this" argument.
1040             unsigned numberOfParameters = 1;
1041             // ... and a value argument if we're calling a setter.
1042             if (m_type == Setter)
1043                 numberOfParameters++;
1044
1045             // Get the accessor; if there ain't one then the result is jsUndefined().
1046             if (m_type == Setter) {
1047                 jit.loadPtr(
1048                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
1049                     loadedValueGPR);
1050             } else {
1051                 jit.loadPtr(
1052                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
1053                     loadedValueGPR);
1054             }
1055
1056             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
1057                 CCallHelpers::Zero, loadedValueGPR);
1058
1059             unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
1060
1061             unsigned numberOfBytesForCall =
1062                 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
1063
1064             unsigned alignedNumberOfBytesForCall =
1065                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
1066
1067             jit.subPtr(
1068                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
1069                 CCallHelpers::stackPointerRegister);
1070
1071             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
1072                 CCallHelpers::stackPointerRegister,
1073                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
1074
1075             jit.store32(
1076                 CCallHelpers::TrustedImm32(numberOfParameters),
1077                 calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
1078
1079             jit.storeCell(
1080                 loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
1081
1082             jit.storeCell(
1083                 baseGPR,
1084                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1085
1086             if (m_type == Setter) {
1087                 jit.storeValue(
1088                     valueRegs,
1089                     calleeFrame.withOffset(
1090                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
1091             }
1092
1093             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1094                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1095                 CCallHelpers::TrustedImmPtr(0));
1096
1097             fastPathCall = jit.nearCall();
1098             if (m_type == Getter)
1099                 jit.setupResults(valueRegs);
1100             done.append(jit.jump());
1101
1102             slowCase.link(&jit);
1103             jit.move(loadedValueGPR, GPRInfo::regT0);
1104 #if USE(JSVALUE32_64)
1105             // We *always* know that the getter/setter, if non-null, is a cell.
1106             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1107 #endif
1108             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1109             slowPathCall = jit.nearCall();
1110             if (m_type == Getter)
1111                 jit.setupResults(valueRegs);
1112             done.append(jit.jump());
1113
1114             returnUndefined.link(&jit);
1115             if (m_type == Getter)
1116                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1117
1118             done.link(&jit);
1119
1120             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation),
1121                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1122             bool callHasReturnValue = isGetter();
1123             restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
1124
1125             jit.addLinkTask(
1126                 [=, &vm] (LinkBuffer& linkBuffer) {
1127                     m_rareData->callLinkInfo->setCallLocations(
1128                         CodeLocationLabel(linkBuffer.locationOfNearCall(slowPathCall)),
1129                         CodeLocationLabel(linkBuffer.locationOf(addressOfLinkFunctionCheck)),
1130                         linkBuffer.locationOfNearCall(fastPathCall));
1131
1132                     linkBuffer.link(
1133                         slowPathCall,
1134                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1135                 });
1136         } else {
1137             ASSERT(m_type == CustomValueGetter || m_type == CustomAccessorGetter || m_type == CustomValueSetter || m_type == CustomAccessorSetter);
1138
1139             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1140             // hard to track if someone did spillage or not, so we just assume that we always need
1141             // to make some space here.
1142             jit.makeSpaceOnStackForCCall();
1143
1144             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1145             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1146             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1147             // FIXME: Remove this differences in custom values and custom accessors.
1148             // https://bugs.webkit.org/show_bug.cgi?id=158014
1149             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1150 #if USE(JSVALUE64)
1151             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1152                 jit.setupArgumentsWithExecState(
1153                     baseForCustomValue,
1154                     CCallHelpers::TrustedImmPtr(ident.impl()));
1155             } else
1156                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1157 #else
1158             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1159                 jit.setupArgumentsWithExecState(
1160                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1161                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1162                     CCallHelpers::TrustedImmPtr(ident.impl()));
1163             } else {
1164                 jit.setupArgumentsWithExecState(
1165                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1166                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1167                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1168             }
1169 #endif
1170             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1171
1172             operationCall = jit.call();
1173             jit.addLinkTask(
1174                 [=] (LinkBuffer& linkBuffer) {
1175                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1176                 });
1177
1178             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1179                 jit.setupResults(valueRegs);
1180             jit.reclaimSpaceOnStackForCCall();
1181
1182             CCallHelpers::Jump noException =
1183                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1184
1185             state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
1186             state.emitExplicitExceptionHandler();
1187         
1188             noException.link(&jit);
1189             bool callHasReturnValue = isGetter();
1190             restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
1191         }
1192         state.succeed();
1193         return;
1194     }
1195
1196     case Replace: {
1197         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1198             if (verbose)
1199                 dataLog("Have type: ", type->descriptor(), "\n");
1200             state.failAndRepatch.append(
1201                 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
1202         } else if (verbose)
1203             dataLog("Don't have type.\n");
1204         
1205         if (isInlineOffset(m_offset)) {
1206             jit.storeValue(
1207                 valueRegs,
1208                 CCallHelpers::Address(
1209                     baseGPR,
1210                     JSObject::offsetOfInlineStorage() +
1211                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1212         } else {
1213             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1214             jit.storeValue(
1215                 valueRegs,
1216                 CCallHelpers::Address(
1217                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1218         }
1219         state.succeed();
1220         return;
1221     }
1222
1223     case Transition: {
1224         // AccessCase::transition() should have returned null if this wasn't true.
1225         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1226
1227         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1228             if (verbose)
1229                 dataLog("Have type: ", type->descriptor(), "\n");
1230             state.failAndRepatch.append(
1231                 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
1232         } else if (verbose)
1233             dataLog("Don't have type.\n");
1234         
1235         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1236         // exactly when this would make calls.
1237         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1238         bool reallocating = allocating && structure()->outOfLineCapacity();
1239         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1240
1241         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1242         allocator.lock(baseGPR);
1243 #if USE(JSVALUE32_64)
1244         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1245 #endif
1246         allocator.lock(valueRegs);
1247         allocator.lock(scratchGPR);
1248
1249         GPRReg scratchGPR2 = InvalidGPRReg;
1250         GPRReg scratchGPR3 = InvalidGPRReg;
1251         if (allocatingInline) {
1252             scratchGPR2 = allocator.allocateScratchGPR();
1253             scratchGPR3 = allocator.allocateScratchGPR();
1254         }
1255
1256         ScratchRegisterAllocator::PreservedState preservedState =
1257             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1258         
1259         CCallHelpers::JumpList slowPath;
1260
1261         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1262
1263         if (allocating) {
1264             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1265             
1266             if (allocatingInline) {
1267                 MarkedAllocator* allocator = vm.heap.allocatorForAuxiliaryData(newSize);
1268                 
1269                 if (!allocator) {
1270                     // Yuck, this case would suck!
1271                     slowPath.append(jit.jump());
1272                 }
1273                 
1274                 jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
1275                 jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
1276                 jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
1277                 
1278                 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1279                 ASSERT(newSize > oldSize);
1280                 
1281                 if (reallocating) {
1282                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1283                     // already had out-of-line property storage).
1284                     
1285                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1286                     
1287                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1288                     // scratchGPR2 = available
1289                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1290                         jit.loadPtr(
1291                             CCallHelpers::Address(
1292                                 scratchGPR3,
1293                                 -static_cast<ptrdiff_t>(
1294                                     offset + sizeof(JSValue) + sizeof(void*))),
1295                             scratchGPR2);
1296                         jit.storePtr(
1297                             scratchGPR2,
1298                             CCallHelpers::Address(
1299                                 scratchGPR,
1300                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1301                     }
1302                 }
1303                 
1304                 for (size_t offset = oldSize; offset < newSize; offset += sizeof(void*))
1305                     jit.storePtr(CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address(scratchGPR, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1306             } else {
1307                 // Handle the case where we are allocating out-of-line using an operation.
1308                 RegisterSet extraRegistersToPreserve;
1309                 extraRegistersToPreserve.set(baseGPR);
1310                 extraRegistersToPreserve.set(valueRegs);
1311                 AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1312                 
1313                 jit.store32(
1314                     CCallHelpers::TrustedImm32(
1315                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1316                     CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
1317                 
1318                 jit.makeSpaceOnStackForCCall();
1319                 
1320                 if (!reallocating) {
1321                     jit.setupArgumentsWithExecState(baseGPR);
1322                     
1323                     CCallHelpers::Call operationCall = jit.call();
1324                     jit.addLinkTask(
1325                         [=] (LinkBuffer& linkBuffer) {
1326                             linkBuffer.link(
1327                                 operationCall,
1328                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1329                         });
1330                 } else {
1331                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1332                     // already had out-of-line property storage).
1333                     jit.setupArgumentsWithExecState(
1334                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1335                     
1336                     CCallHelpers::Call operationCall = jit.call();
1337                     jit.addLinkTask(
1338                         [=] (LinkBuffer& linkBuffer) {
1339                             linkBuffer.link(
1340                                 operationCall,
1341                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1342                         });
1343                 }
1344                 
1345                 jit.reclaimSpaceOnStackForCCall();
1346                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1347                 
1348                 CCallHelpers::Jump noException =
1349                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1350                 
1351                 state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
1352                 state.emitExplicitExceptionHandler();
1353                 
1354                 noException.link(&jit);
1355                 state.restoreLiveRegistersFromStackForCall(spillState);
1356             }
1357         }
1358
1359         if (isInlineOffset(m_offset)) {
1360             jit.storeValue(
1361                 valueRegs,
1362                 CCallHelpers::Address(
1363                     baseGPR,
1364                     JSObject::offsetOfInlineStorage() +
1365                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1366         } else {
1367             if (!allocating)
1368                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1369             jit.storeValue(
1370                 valueRegs,
1371                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1372         }
1373         
1374         if (allocatingInline) {
1375             // We set the new butterfly and the structure last. Doing it this way ensures that
1376             // whatever we had done up to this point is forgotten if we choose to branch to slow
1377             // path.
1378             jit.nukeStructureAndStoreButterfly(scratchGPR, baseGPR);
1379         }
1380         
1381         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1382         jit.store32(
1383             CCallHelpers::TrustedImm32(structureBits),
1384             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1385
1386         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1387         state.succeed();
1388         
1389         // We will have a slow path if we were allocating without the help of an operation.
1390         if (allocatingInline) {
1391             if (allocator.didReuseRegisters()) {
1392                 slowPath.link(&jit);
1393                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1394                 state.failAndIgnore.append(jit.jump());
1395             } else
1396                 state.failAndIgnore.append(slowPath);
1397         } else
1398             RELEASE_ASSERT(slowPath.empty());
1399         return;
1400     }
1401
1402     case ArrayLength: {
1403         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1404         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1405         state.failAndIgnore.append(
1406             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1407         jit.boxInt32(scratchGPR, valueRegs);
1408         state.succeed();
1409         return;
1410     }
1411
1412     case StringLength: {
1413         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1414         jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1415         state.succeed();
1416         return;
1417     }
1418         
1419     case IntrinsicGetter: {
1420         RELEASE_ASSERT(isValidOffset(offset()));
1421
1422         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1423         // are immutable so we just need to watch the property not any value inside it.
1424         Structure* currStructure;
1425         if (m_conditionSet.isEmpty())
1426             currStructure = structure();
1427         else
1428             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1429         currStructure->startWatchingPropertyForReplacements(vm, offset());
1430
1431         emitIntrinsicGetter(state);
1432         return;
1433     }
1434
1435     case DirectArgumentsLength:
1436     case ScopedArgumentsLength:
1437     case MegamorphicLoad:
1438         // These need to be handled by generateWithGuard(), since the guard is part of the
1439         // algorithm. We can be sure that nobody will call generate() directly for these since they
1440         // are not guarded by structure checks.
1441         RELEASE_ASSERT_NOT_REACHED();
1442     }
1443     
1444     RELEASE_ASSERT_NOT_REACHED();
1445 }
1446
1447 void AccessCase::emitDOMJITGetter(AccessGenerationState& state, GPRReg baseForGetGPR)
1448 {
1449     CCallHelpers& jit = *state.jit;
1450     StructureStubInfo& stubInfo = *state.stubInfo;
1451     JSValueRegs valueRegs = state.valueRegs;
1452     GPRReg baseGPR = state.baseGPR;
1453     GPRReg scratchGPR = state.scratchGPR;
1454
1455     // We construct the environment that can execute the DOMJIT::Patchpoint here.
1456     Ref<DOMJIT::CallDOMGetterPatchpoint> patchpoint = m_rareData->domJIT->callDOMGetter();
1457
1458     Vector<GPRReg> gpScratch;
1459     Vector<FPRReg> fpScratch;
1460     Vector<DOMJIT::Value> regs;
1461
1462     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1463     allocator.lock(baseGPR);
1464 #if USE(JSVALUE32_64)
1465     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1466 #endif
1467     allocator.lock(valueRegs);
1468     allocator.lock(scratchGPR);
1469
1470     GPRReg paramBaseGPR = InvalidGPRReg;
1471     GPRReg paramGlobalObjectGPR = InvalidGPRReg;
1472     JSValueRegs paramValueRegs = valueRegs;
1473     GPRReg remainingScratchGPR = InvalidGPRReg;
1474
1475     // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
1476     // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
1477     // DOMJIT::Patchpoint, DOMJIT::Patchpoint assumes that result registers always early interfere with input registers, in this case,
1478     // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
1479     if (baseForGetGPR != valueRegs.payloadGPR()) {
1480         paramBaseGPR = baseForGetGPR;
1481         if (!patchpoint->requireGlobalObject)
1482             remainingScratchGPR = scratchGPR;
1483         else
1484             paramGlobalObjectGPR = scratchGPR;
1485     } else {
1486         jit.move(valueRegs.payloadGPR(), scratchGPR);
1487         paramBaseGPR = scratchGPR;
1488         if (patchpoint->requireGlobalObject)
1489             paramGlobalObjectGPR = allocator.allocateScratchGPR();
1490     }
1491
1492     JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
1493
1494     regs.append(paramValueRegs);
1495     regs.append(paramBaseGPR);
1496     if (patchpoint->requireGlobalObject) {
1497         ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
1498         regs.append(DOMJIT::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
1499     }
1500
1501     if (patchpoint->numGPScratchRegisters) {
1502         unsigned i = 0;
1503         if (remainingScratchGPR != InvalidGPRReg) {
1504             gpScratch.append(remainingScratchGPR);
1505             ++i;
1506         }
1507         for (; i < patchpoint->numGPScratchRegisters; ++i)
1508             gpScratch.append(allocator.allocateScratchGPR());
1509     }
1510
1511     for (unsigned i = 0; i < patchpoint->numFPScratchRegisters; ++i)
1512         fpScratch.append(allocator.allocateScratchFPR());
1513
1514     // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
1515     ScratchRegisterAllocator::PreservedState preservedState =
1516         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1517
1518     if (verbose) {
1519         dataLog("baseGPR = ", baseGPR, "\n");
1520         dataLog("valueRegs = ", valueRegs, "\n");
1521         dataLog("scratchGPR = ", scratchGPR, "\n");
1522         dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
1523         if (paramGlobalObjectGPR != InvalidGPRReg)
1524             dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
1525         dataLog("paramValueRegs = ", paramValueRegs, "\n");
1526         for (unsigned i = 0; i < patchpoint->numGPScratchRegisters; ++i)
1527             dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
1528     }
1529
1530     if (patchpoint->requireGlobalObject)
1531         jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
1532
1533     // We just spill the registers used in DOMJIT::Patchpoint here. For not spilled registers here explicitly,
1534     // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
1535     // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
1536     // same to valueRegs, and not include it in the used registers since it will be changed.
1537     RegisterSet registersToSpillForCCall;
1538     for (auto& value : regs) {
1539         DOMJIT::Reg reg = value.reg();
1540         if (reg.isJSValueRegs())
1541             registersToSpillForCCall.set(reg.jsValueRegs());
1542         else if (reg.isGPR())
1543             registersToSpillForCCall.set(reg.gpr());
1544         else
1545             registersToSpillForCCall.set(reg.fpr());
1546     }
1547     for (GPRReg reg : gpScratch)
1548         registersToSpillForCCall.set(reg);
1549     for (FPRReg reg : fpScratch)
1550         registersToSpillForCCall.set(reg);
1551     registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
1552
1553     DOMJITAccessCasePatchpointParams params(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
1554     patchpoint->generator()->run(jit, params);
1555     allocator.restoreReusedRegistersByPopping(jit, preservedState);
1556     state.succeed();
1557
1558     CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
1559     if (!exceptions.empty()) {
1560         exceptions.link(&jit);
1561         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1562         state.emitExplicitExceptionHandler();
1563     }
1564 }
1565
1566 PolymorphicAccess::PolymorphicAccess() { }
1567 PolymorphicAccess::~PolymorphicAccess() { }
1568
1569 AccessGenerationResult PolymorphicAccess::addCases(
1570     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1571     Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
1572 {
1573     SuperSamplerScope superSamplerScope(false);
1574     
1575     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1576     // invariants:
1577     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1578     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1579     //   can be removed via the canReplace() rule.
1580     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1581     //   cascade through the cases in reverse order, you will get the most recent cases first.
1582     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1583     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1584     //   add more things after failure.
1585     
1586     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1587     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1588     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1589         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1590
1591         // Add it only if it is not replaced by the subsequent cases in the list.
1592         bool found = false;
1593         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1594             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1595                 found = true;
1596                 break;
1597             }
1598         }
1599
1600         if (found)
1601             continue;
1602         
1603         casesToAdd.append(WTFMove(myCase));
1604     }
1605
1606     if (verbose)
1607         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1608
1609     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1610     // new stub that will be identical to the old one. Returning null should tell the caller to just
1611     // keep doing what they were doing before.
1612     if (casesToAdd.isEmpty())
1613         return AccessGenerationResult::MadeNoChanges;
1614
1615     // Now add things to the new list. Note that at this point, we will still have old cases that
1616     // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1617     for (auto& caseToAdd : casesToAdd) {
1618         commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1619         m_list.append(WTFMove(caseToAdd));
1620     }
1621     
1622     if (verbose)
1623         dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1624
1625     return AccessGenerationResult::Buffered;
1626 }
1627
1628 AccessGenerationResult PolymorphicAccess::addCase(
1629     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1630     std::unique_ptr<AccessCase> newAccess)
1631 {
1632     Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
1633     newAccesses.append(WTFMove(newAccess));
1634     return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1635 }
1636
1637 bool PolymorphicAccess::visitWeak(VM& vm) const
1638 {
1639     for (unsigned i = 0; i < size(); ++i) {
1640         if (!at(i).visitWeak(vm))
1641             return false;
1642     }
1643     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1644         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1645             if (!Heap::isMarked(weakReference.get()))
1646                 return false;
1647         }
1648     }
1649     return true;
1650 }
1651
1652 bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
1653 {
1654     bool result = true;
1655     for (unsigned i = 0; i < size(); ++i)
1656         result &= at(i).propagateTransitions(visitor);
1657     return result;
1658 }
1659
1660 void PolymorphicAccess::dump(PrintStream& out) const
1661 {
1662     out.print(RawPointer(this), ":[");
1663     CommaPrinter comma;
1664     for (auto& entry : m_list)
1665         out.print(comma, *entry);
1666     out.print("]");
1667 }
1668
1669 void PolymorphicAccess::commit(
1670     VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1671     StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1672 {
1673     // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1674     // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1675     // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1676     // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1677     // Those common kinds of JSC object accesses don't hit this case.
1678     
1679     for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1680         Watchpoint* watchpoint =
1681             WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1682                 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1683         
1684         set->add(watchpoint);
1685     }
1686 }
1687
1688 AccessGenerationResult PolymorphicAccess::regenerate(
1689     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1690 {
1691     SuperSamplerScope superSamplerScope(false);
1692     
1693     if (verbose)
1694         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1695     
1696     AccessGenerationState state;
1697
1698     state.access = this;
1699     state.stubInfo = &stubInfo;
1700     state.ident = &ident;
1701     
1702     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1703     state.valueRegs = stubInfo.valueRegs();
1704
1705     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1706     state.allocator = &allocator;
1707     allocator.lock(state.baseGPR);
1708     allocator.lock(state.valueRegs);
1709 #if USE(JSVALUE32_64)
1710     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1711 #endif
1712
1713     state.scratchGPR = allocator.allocateScratchGPR();
1714     
1715     CCallHelpers jit(&vm, codeBlock);
1716     state.jit = &jit;
1717
1718     state.preservedReusedRegisterState =
1719         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1720
1721     // Regenerating is our opportunity to figure out what our list of cases should look like. We
1722     // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1723     // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1724     // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1725     // from the code of the current stub (aka previous).
1726     ListType cases;
1727     unsigned srcIndex = 0;
1728     unsigned dstIndex = 0;
1729     while (srcIndex < m_list.size()) {
1730         std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
1731         
1732         // If the case had been generated, then we have to keep the original in m_list in case we
1733         // fail to regenerate. That case may have data structures that are used by the code that it
1734         // had generated. If the case had not been generated, then we want to remove it from m_list.
1735         bool isGenerated = someCase->state() == AccessCase::Generated;
1736         
1737         [&] () {
1738             if (!someCase->couldStillSucceed())
1739                 return;
1740
1741             // Figure out if this is replaced by any later case.
1742             for (unsigned j = srcIndex; j < m_list.size(); ++j) {
1743                 if (m_list[j]->canReplace(*someCase))
1744                     return;
1745             }
1746             
1747             if (isGenerated)
1748                 cases.append(someCase->clone());
1749             else
1750                 cases.append(WTFMove(someCase));
1751         }();
1752         
1753         if (isGenerated)
1754             m_list[dstIndex++] = WTFMove(someCase);
1755     }
1756     m_list.resize(dstIndex);
1757     
1758     if (verbose)
1759         dataLog("In regenerate: cases: ", listDump(cases), "\n");
1760     
1761     // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1762     // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1763     // size. It would be faster to just allow more repatching with many load cases, and avoid the
1764     // megamorphicLoad optimization, if we had infinite executable memory.
1765     if (cases.size() >= Options::maxAccessVariantListSize()) {
1766         unsigned numSelfLoads = 0;
1767         for (auto& newCase : cases) {
1768             if (newCase->canBeReplacedByMegamorphicLoad())
1769                 numSelfLoads++;
1770         }
1771         
1772         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1773             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1774                 cases.removeAllMatching(
1775                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1776                         return newCase->canBeReplacedByMegamorphicLoad();
1777                     });
1778                 
1779                 cases.append(WTFMove(mega));
1780             }
1781         }
1782     }
1783     
1784     if (verbose)
1785         dataLog("Optimized cases: ", listDump(cases), "\n");
1786     
1787     // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1788     // won't change that set anymore.
1789     
1790     bool allGuardedByStructureCheck = true;
1791     bool hasJSGetterSetterCall = false;
1792     for (auto& newCase : cases) {
1793         commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1794         allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1795         if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1796             hasJSGetterSetterCall = true;
1797     }
1798
1799     if (cases.isEmpty()) {
1800         // This is super unlikely, but we make it legal anyway.
1801         state.failAndRepatch.append(jit.jump());
1802     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1803         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1804         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1805         // one case.
1806         CCallHelpers::JumpList fallThrough;
1807
1808         // Cascade through the list, preferring newer entries.
1809         for (unsigned i = cases.size(); i--;) {
1810             fallThrough.link(&jit);
1811             fallThrough.clear();
1812             cases[i]->generateWithGuard(state, fallThrough);
1813         }
1814         state.failAndRepatch.append(fallThrough);
1815     } else {
1816         jit.load32(
1817             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1818             state.scratchGPR);
1819         
1820         Vector<int64_t> caseValues(cases.size());
1821         for (unsigned i = 0; i < cases.size(); ++i)
1822             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1823         
1824         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1825         while (binarySwitch.advance(jit))
1826             cases[binarySwitch.caseIndex()]->generate(state);
1827         state.failAndRepatch.append(binarySwitch.fallThrough());
1828     }
1829
1830     if (!state.failAndIgnore.empty()) {
1831         state.failAndIgnore.link(&jit);
1832         
1833         // Make sure that the inline cache optimization code knows that we are taking slow path because
1834         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1835         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1836         // that the slow path does not try to patch.
1837 #if CPU(X86) || CPU(X86_64)
1838         jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
1839         jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
1840 #else
1841         jit.load8(&stubInfo.countdown, state.scratchGPR);
1842         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1843         jit.store8(state.scratchGPR, &stubInfo.countdown);
1844 #endif
1845     }
1846
1847     CCallHelpers::JumpList failure;
1848     if (allocator.didReuseRegisters()) {
1849         state.failAndRepatch.link(&jit);
1850         state.restoreScratch();
1851     } else
1852         failure = state.failAndRepatch;
1853     failure.append(jit.jump());
1854
1855     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1856     CallSiteIndex callSiteIndexForExceptionHandling;
1857     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1858         // Emit the exception handler.
1859         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1860         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1861         // their own exception handling logic that doesn't go through genericUnwind.
1862         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1863
1864         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1865         AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
1866         ASSERT(!spillStateForJSGetterSetter.isEmpty());
1867         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1868         stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
1869
1870         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1871         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1872
1873         state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
1874         state.restoreScratch();
1875         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1876
1877         HandlerInfo oldHandler = state.originalExceptionHandler();
1878         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1879         jit.addLinkTask(
1880             [=] (LinkBuffer& linkBuffer) {
1881                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1882
1883                 HandlerInfo handlerToRegister = oldHandler;
1884                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1885                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1886                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1887                 codeBlock->appendExceptionHandler(handlerToRegister);
1888             });
1889
1890         // We set these to indicate to the stub to remove itself from the CodeBlock's
1891         // exception handler table when it is deallocated.
1892         codeBlockThatOwnsExceptionHandlers = codeBlock;
1893         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1894         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1895     }
1896
1897     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1898     if (linkBuffer.didFailToAllocate()) {
1899         if (verbose)
1900             dataLog("Did fail to allocate.\n");
1901         return AccessGenerationResult::GaveUp;
1902     }
1903
1904     CodeLocationLabel successLabel = stubInfo.doneLocation();
1905         
1906     linkBuffer.link(state.success, successLabel);
1907
1908     linkBuffer.link(failure, stubInfo.slowPathStartLocation());
1909     
1910     if (verbose)
1911         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1912
1913     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1914         codeBlock, linkBuffer,
1915         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1916
1917     bool doesCalls = false;
1918     Vector<JSCell*> cellsToMark;
1919     for (auto& entry : cases)
1920         doesCalls |= entry->doesCalls(&cellsToMark);
1921     
1922     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1923     m_watchpoints = WTFMove(state.watchpoints);
1924     if (!state.weakReferences.isEmpty())
1925         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1926     if (verbose)
1927         dataLog("Returning: ", code.code(), "\n");
1928     
1929     m_list = WTFMove(cases);
1930     
1931     AccessGenerationResult::Kind resultKind;
1932     if (m_list.size() >= Options::maxAccessVariantListSize())
1933         resultKind = AccessGenerationResult::GeneratedFinalCode;
1934     else
1935         resultKind = AccessGenerationResult::GeneratedNewCode;
1936     
1937     return AccessGenerationResult(resultKind, code.code());
1938 }
1939
1940 void PolymorphicAccess::aboutToDie()
1941 {
1942     if (m_stubRoutine)
1943         m_stubRoutine->aboutToDie();
1944 }
1945
1946 } // namespace JSC
1947
1948 namespace WTF {
1949
1950 using namespace JSC;
1951
1952 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1953 {
1954     switch (kind) {
1955     case AccessGenerationResult::MadeNoChanges:
1956         out.print("MadeNoChanges");
1957         return;
1958     case AccessGenerationResult::GaveUp:
1959         out.print("GaveUp");
1960         return;
1961     case AccessGenerationResult::Buffered:
1962         out.print("Buffered");
1963         return;
1964     case AccessGenerationResult::GeneratedNewCode:
1965         out.print("GeneratedNewCode");
1966         return;
1967     case AccessGenerationResult::GeneratedFinalCode:
1968         out.print("GeneratedFinalCode");
1969         return;
1970     }
1971     
1972     RELEASE_ASSERT_NOT_REACHED();
1973 }
1974
1975 void printInternal(PrintStream& out, AccessCase::AccessType type)
1976 {
1977     switch (type) {
1978     case AccessCase::Load:
1979         out.print("Load");
1980         return;
1981     case AccessCase::MegamorphicLoad:
1982         out.print("MegamorphicLoad");
1983         return;
1984     case AccessCase::Transition:
1985         out.print("Transition");
1986         return;
1987     case AccessCase::Replace:
1988         out.print("Replace");
1989         return;
1990     case AccessCase::Miss:
1991         out.print("Miss");
1992         return;
1993     case AccessCase::GetGetter:
1994         out.print("GetGetter");
1995         return;
1996     case AccessCase::Getter:
1997         out.print("Getter");
1998         return;
1999     case AccessCase::Setter:
2000         out.print("Setter");
2001         return;
2002     case AccessCase::CustomValueGetter:
2003         out.print("CustomValueGetter");
2004         return;
2005     case AccessCase::CustomAccessorGetter:
2006         out.print("CustomAccessorGetter");
2007         return;
2008     case AccessCase::CustomValueSetter:
2009         out.print("CustomValueSetter");
2010         return;
2011     case AccessCase::CustomAccessorSetter:
2012         out.print("CustomAccessorSetter");
2013         return;
2014     case AccessCase::IntrinsicGetter:
2015         out.print("IntrinsicGetter");
2016         return;
2017     case AccessCase::InHit:
2018         out.print("InHit");
2019         return;
2020     case AccessCase::InMiss:
2021         out.print("InMiss");
2022         return;
2023     case AccessCase::ArrayLength:
2024         out.print("ArrayLength");
2025         return;
2026     case AccessCase::StringLength:
2027         out.print("StringLength");
2028         return;
2029     case AccessCase::DirectArgumentsLength:
2030         out.print("DirectArgumentsLength");
2031         return;
2032     case AccessCase::ScopedArgumentsLength:
2033         out.print("ScopedArgumentsLength");
2034         return;
2035     }
2036
2037     RELEASE_ASSERT_NOT_REACHED();
2038 }
2039
2040 void printInternal(PrintStream& out, AccessCase::State state)
2041 {
2042     switch (state) {
2043     case AccessCase::Primordial:
2044         out.print("Primordial");
2045         return;
2046     case AccessCase::Committed:
2047         out.print("Committed");
2048         return;
2049     case AccessCase::Generated:
2050         out.print("Generated");
2051         return;
2052     }
2053
2054     RELEASE_ASSERT_NOT_REACHED();
2055 }
2056
2057 } // namespace WTF
2058
2059 #endif // ENABLE(JIT)
2060
2061