Debug JSC test failure: stress/multi-put-by-offset-reallocation-butterfly-cse.js...
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 void AccessGenerationResult::dump(PrintStream& out) const
52 {
53     out.print(m_kind);
54     if (m_code)
55         out.print(":", m_code);
56 }
57
58 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
59 {
60     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61         watchpoints, jit->codeBlock(), stubInfo, condition);
62 }
63
64 void AccessGenerationState::restoreScratch()
65 {
66     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67 }
68
69 void AccessGenerationState::succeed()
70 {
71     restoreScratch();
72     success.append(jit->jump());
73 }
74
75 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
76 {
77     if (!m_calculatedRegistersForCallAndExceptionHandling) {
78         m_calculatedRegistersForCallAndExceptionHandling = true;
79
80         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
81         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
82         if (m_needsToRestoreRegistersIfException)
83             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
84
85         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
86         m_liveRegistersForCall.merge(extra);
87         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
88         m_liveRegistersForCall.merge(extra);
89     }
90 }
91
92 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
93 {
94     calculateLiveRegistersForCallAndExceptionHandling(extra);
95     
96     unsigned extraStackPadding = 0;
97     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
98     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
99         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
100     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
101 }
102
103 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
104 {
105     RegisterSet dontRestore;
106     if (isGetter) {
107         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
108         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
109         dontRestore.set(valueRegs);
110     }
111     restoreLiveRegistersFromStackForCall(dontRestore);
112 }
113
114 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
115 {
116     // Even if we're a getter, we don't want to ignore the result value like we normally do
117     // because the getter threw, and therefore, didn't return a value that means anything.
118     // Instead, we want to restore that register to what it was upon entering the getter
119     // inline cache. The subtlety here is if the base and the result are the same register,
120     // and the getter threw, we want OSR exit to see the original base value, not the result
121     // of the getter call.
122     RegisterSet dontRestore = liveRegistersForCall();
123     // As an optimization here, we only need to restore what is live for exception handling.
124     // We can construct the dontRestore set to accomplish this goal by having it contain only
125     // what is live for call but not live for exception handling. By ignoring things that are
126     // only live at the call but not the exception handler, we will only restore things live
127     // at the exception handler.
128     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
129     restoreLiveRegistersFromStackForCall(dontRestore);
130 }
131
132 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
133 {
134     unsigned extraStackPadding = 0;
135     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
136 }
137
138 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
139 {
140     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
141
142     if (!m_calculatedCallSiteIndex) {
143         m_calculatedCallSiteIndex = true;
144
145         if (m_needsToRestoreRegistersIfException)
146             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
147         else
148             m_callSiteIndex = originalCallSiteIndex();
149     }
150
151     return m_callSiteIndex;
152 }
153
154 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
155 {
156     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
157     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
158     RELEASE_ASSERT(exceptionHandler);
159     return *exceptionHandler;
160 }
161
162 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
163
164 void AccessGenerationState::emitExplicitExceptionHandler()
165 {
166     restoreScratch();
167     jit->copyCalleeSavesToVMCalleeSavesBuffer();
168     if (needsToRestoreRegistersIfException()) {
169         // To the JIT that produces the original exception handling
170         // call site, they will expect the OSR exit to be arrived
171         // at from genericUnwind. Therefore we must model what genericUnwind
172         // does here. I.e, set callFrameForCatch and copy callee saves.
173
174         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
175         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
176
177         // We don't need to insert a new exception handler in the table
178         // because we're doing a manual exception check here. i.e, we'll
179         // never arrive here from genericUnwind().
180         HandlerInfo originalHandler = originalExceptionHandler();
181         jit->addLinkTask(
182             [=] (LinkBuffer& linkBuffer) {
183                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
184             });
185     } else {
186         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
187         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
188         jit->addLinkTask(
189             [=] (LinkBuffer& linkBuffer) {
190                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
191             });
192         jit->jumpToExceptionHandler();
193     }
194 }
195
196 AccessCase::AccessCase()
197 {
198 }
199
200 std::unique_ptr<AccessCase> AccessCase::tryGet(
201     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
202     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
203 {
204     std::unique_ptr<AccessCase> result(new AccessCase());
205
206     result->m_type = type;
207     result->m_offset = offset;
208     result->m_structure.set(vm, owner, structure);
209     result->m_conditionSet = conditionSet;
210
211     if (viaProxy || additionalSet) {
212         result->m_rareData = std::make_unique<RareData>();
213         result->m_rareData->viaProxy = viaProxy;
214         result->m_rareData->additionalSet = additionalSet;
215     }
216
217     return result;
218 }
219
220 std::unique_ptr<AccessCase> AccessCase::get(
221     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
222     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
223     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
224 {
225     std::unique_ptr<AccessCase> result(new AccessCase());
226
227     result->m_type = type;
228     result->m_offset = offset;
229     result->m_structure.set(vm, owner, structure);
230     result->m_conditionSet = conditionSet;
231
232     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
233         result->m_rareData = std::make_unique<RareData>();
234         result->m_rareData->viaProxy = viaProxy;
235         result->m_rareData->additionalSet = additionalSet;
236         result->m_rareData->customAccessor.getter = customGetter;
237         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
238     }
239
240     return result;
241 }
242
243 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
244 {
245     UNUSED_PARAM(vm);
246     UNUSED_PARAM(owner);
247     
248     if (GPRInfo::numberOfRegisters < 9)
249         return nullptr;
250     
251     std::unique_ptr<AccessCase> result(new AccessCase());
252     
253     result->m_type = MegamorphicLoad;
254     
255     return result;
256 }
257
258 std::unique_ptr<AccessCase> AccessCase::replace(
259     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
260 {
261     std::unique_ptr<AccessCase> result(new AccessCase());
262
263     result->m_type = Replace;
264     result->m_offset = offset;
265     result->m_structure.set(vm, owner, structure);
266
267     return result;
268 }
269
270 std::unique_ptr<AccessCase> AccessCase::transition(
271     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
272     const ObjectPropertyConditionSet& conditionSet)
273 {
274     RELEASE_ASSERT(oldStructure == newStructure->previousID());
275
276     // Skip optimizing the case where we need a realloc, if we don't have
277     // enough registers to make it happen.
278     if (GPRInfo::numberOfRegisters < 6
279         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
280         && oldStructure->outOfLineCapacity()) {
281         return nullptr;
282     }
283
284     std::unique_ptr<AccessCase> result(new AccessCase());
285
286     result->m_type = Transition;
287     result->m_offset = offset;
288     result->m_structure.set(vm, owner, newStructure);
289     result->m_conditionSet = conditionSet;
290
291     return result;
292 }
293
294 std::unique_ptr<AccessCase> AccessCase::setter(
295     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
296     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
297     JSObject* customSlotBase)
298 {
299     std::unique_ptr<AccessCase> result(new AccessCase());
300
301     result->m_type = type;
302     result->m_offset = offset;
303     result->m_structure.set(vm, owner, structure);
304     result->m_conditionSet = conditionSet;
305     result->m_rareData = std::make_unique<RareData>();
306     result->m_rareData->customAccessor.setter = customSetter;
307     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
308
309     return result;
310 }
311
312 std::unique_ptr<AccessCase> AccessCase::in(
313     VM& vm, JSCell* owner, AccessType type, Structure* structure,
314     const ObjectPropertyConditionSet& conditionSet)
315 {
316     std::unique_ptr<AccessCase> result(new AccessCase());
317
318     result->m_type = type;
319     result->m_structure.set(vm, owner, structure);
320     result->m_conditionSet = conditionSet;
321
322     return result;
323 }
324
325 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
326 {
327     std::unique_ptr<AccessCase> result(new AccessCase());
328
329     result->m_type = type;
330
331     return result;
332 }
333
334 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
335     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
336     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
337 {
338     std::unique_ptr<AccessCase> result(new AccessCase());
339
340     result->m_type = IntrinsicGetter;
341     result->m_structure.set(vm, owner, structure);
342     result->m_conditionSet = conditionSet;
343     result->m_offset = offset;
344
345     result->m_rareData = std::make_unique<RareData>();
346     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
347
348     return result;
349 }
350
351 AccessCase::~AccessCase()
352 {
353 }
354
355 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
356     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
357 {
358     switch (stubInfo.cacheType) {
359     case CacheType::GetByIdSelf:
360         return get(
361             vm, owner, Load, stubInfo.u.byIdSelf.offset,
362             stubInfo.u.byIdSelf.baseObjectStructure.get());
363
364     case CacheType::PutByIdReplace:
365         return replace(
366             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
367
368     default:
369         return nullptr;
370     }
371 }
372
373 std::unique_ptr<AccessCase> AccessCase::clone() const
374 {
375     std::unique_ptr<AccessCase> result(new AccessCase());
376     result->m_type = m_type;
377     result->m_offset = m_offset;
378     result->m_structure = m_structure;
379     result->m_conditionSet = m_conditionSet;
380     if (RareData* rareData = m_rareData.get()) {
381         result->m_rareData = std::make_unique<RareData>();
382         result->m_rareData->viaProxy = rareData->viaProxy;
383         result->m_rareData->additionalSet = rareData->additionalSet;
384         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
385         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
386         result->m_rareData->customSlotBase = rareData->customSlotBase;
387         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
388     }
389     return result;
390 }
391
392 bool AccessCase::guardedByStructureCheck() const
393 {
394     if (viaProxy())
395         return false;
396
397     switch (m_type) {
398     case MegamorphicLoad:
399     case ArrayLength:
400     case StringLength:
401     case DirectArgumentsLength:
402     case ScopedArgumentsLength:
403         return false;
404     default:
405         return true;
406     }
407 }
408
409 JSObject* AccessCase::alternateBase() const
410 {
411     if (customSlotBase())
412         return customSlotBase();
413     return conditionSet().slotBaseCondition().object();
414 }
415
416 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
417 {
418     switch (type()) {
419     case Getter:
420     case Setter:
421     case CustomValueGetter:
422     case CustomAccessorGetter:
423     case CustomValueSetter:
424     case CustomAccessorSetter:
425         return true;
426     case Transition:
427         if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
428             && structure()->couldHaveIndexingHeader()) {
429             if (cellsToMark)
430                 cellsToMark->append(newStructure());
431             return true;
432         }
433         return false;
434     default:
435         return false;
436     }
437 }
438
439 bool AccessCase::couldStillSucceed() const
440 {
441     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
442 }
443
444 bool AccessCase::canBeReplacedByMegamorphicLoad() const
445 {
446     return type() == Load
447         && !viaProxy()
448         && conditionSet().isEmpty()
449         && !additionalSet()
450         && !customSlotBase();
451 }
452
453 bool AccessCase::canReplace(const AccessCase& other) const
454 {
455     // We could do a lot better here, but for now we just do something obvious.
456     
457     if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
458         return true;
459
460     if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
461         // FIXME: Implement this!
462         return false;
463     }
464
465     return structure() == other.structure();
466 }
467
468 void AccessCase::dump(PrintStream& out) const
469 {
470     out.print(m_type, ":(");
471
472     CommaPrinter comma;
473
474     if (m_type == Transition)
475         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
476     else if (m_structure)
477         out.print(comma, "structure = ", pointerDump(m_structure.get()));
478
479     if (isValidOffset(m_offset))
480         out.print(comma, "offset = ", m_offset);
481     if (!m_conditionSet.isEmpty())
482         out.print(comma, "conditions = ", m_conditionSet);
483
484     if (RareData* rareData = m_rareData.get()) {
485         if (rareData->viaProxy)
486             out.print(comma, "viaProxy = ", rareData->viaProxy);
487         if (rareData->additionalSet)
488             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
489         if (rareData->callLinkInfo)
490             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
491         if (rareData->customAccessor.opaque)
492             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
493         if (rareData->customSlotBase)
494             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
495     }
496
497     out.print(")");
498 }
499
500 bool AccessCase::visitWeak(VM& vm) const
501 {
502     if (m_structure && !Heap::isMarked(m_structure.get()))
503         return false;
504     if (!m_conditionSet.areStillLive())
505         return false;
506     if (m_rareData) {
507         if (m_rareData->callLinkInfo)
508             m_rareData->callLinkInfo->visitWeak(vm);
509         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
510             return false;
511         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
512             return false;
513     }
514     return true;
515 }
516
517 void AccessCase::generateWithGuard(
518     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
519 {
520     CCallHelpers& jit = *state.jit;
521     VM& vm = *jit.vm();
522     const Identifier& ident = *state.ident;
523     StructureStubInfo& stubInfo = *state.stubInfo;
524     JSValueRegs valueRegs = state.valueRegs;
525     GPRReg baseGPR = state.baseGPR;
526     GPRReg scratchGPR = state.scratchGPR;
527     
528     UNUSED_PARAM(vm);
529
530     switch (m_type) {
531     case ArrayLength: {
532         ASSERT(!viaProxy());
533         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
534         fallThrough.append(
535             jit.branchTest32(
536                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
537         fallThrough.append(
538             jit.branchTest32(
539                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
540         break;
541     }
542
543     case StringLength: {
544         ASSERT(!viaProxy());
545         fallThrough.append(
546             jit.branch8(
547                 CCallHelpers::NotEqual,
548                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
549                 CCallHelpers::TrustedImm32(StringType)));
550         break;
551     }
552         
553     case DirectArgumentsLength: {
554         ASSERT(!viaProxy());
555         fallThrough.append(
556             jit.branch8(
557                 CCallHelpers::NotEqual,
558                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
559                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
560
561         fallThrough.append(
562             jit.branchTestPtr(
563                 CCallHelpers::NonZero,
564                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
565         jit.load32(
566             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
567             valueRegs.payloadGPR());
568         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
569         state.succeed();
570         return;
571     }
572         
573     case ScopedArgumentsLength: {
574         ASSERT(!viaProxy());
575         fallThrough.append(
576             jit.branch8(
577                 CCallHelpers::NotEqual,
578                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
579                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
580
581         fallThrough.append(
582             jit.branchTest8(
583                 CCallHelpers::NonZero,
584                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
585         jit.load32(
586             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
587             valueRegs.payloadGPR());
588         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
589         state.succeed();
590         return;
591     }
592         
593     case MegamorphicLoad: {
594         UniquedStringImpl* key = ident.impl();
595         unsigned hash = IdentifierRepHash::hash(key);
596         
597         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
598         allocator.lock(baseGPR);
599 #if USE(JSVALUE32_64)
600         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
601 #endif
602         allocator.lock(valueRegs);
603         allocator.lock(scratchGPR);
604         
605         GPRReg intermediateGPR = scratchGPR;
606         GPRReg maskGPR = allocator.allocateScratchGPR();
607         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
608         GPRReg indexGPR = allocator.allocateScratchGPR();
609         GPRReg offsetGPR = allocator.allocateScratchGPR();
610         
611         if (verbose) {
612             dataLog("baseGPR = ", baseGPR, "\n");
613             dataLog("valueRegs = ", valueRegs, "\n");
614             dataLog("scratchGPR = ", scratchGPR, "\n");
615             dataLog("intermediateGPR = ", intermediateGPR, "\n");
616             dataLog("maskGPR = ", maskGPR, "\n");
617             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
618             dataLog("indexGPR = ", indexGPR, "\n");
619             dataLog("offsetGPR = ", offsetGPR, "\n");
620         }
621
622         ScratchRegisterAllocator::PreservedState preservedState =
623             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
624
625         CCallHelpers::JumpList myFailAndIgnore;
626         CCallHelpers::JumpList myFallThrough;
627         
628         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
629         jit.loadPtr(
630             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
631             intermediateGPR);
632         
633         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
634         
635         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
636         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
637         jit.load32(
638             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
639             intermediateGPR);
640
641         jit.move(maskGPR, maskedHashGPR);
642         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
643         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
644         jit.addPtr(indexGPR, intermediateGPR);
645         
646         CCallHelpers::Label loop = jit.label();
647         
648         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
649         
650         myFallThrough.append(
651             jit.branch32(
652                 CCallHelpers::Equal,
653                 offsetGPR,
654                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
655         
656         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
657         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
658         jit.addPtr(intermediateGPR, offsetGPR);
659         
660         CCallHelpers::Jump collision =  jit.branchPtr(
661             CCallHelpers::NotEqual,
662             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
663             CCallHelpers::TrustedImmPtr(key));
664         
665         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
666         // Check them and then attempt the load.
667         
668         myFallThrough.append(
669             jit.branchTest32(
670                 CCallHelpers::NonZero,
671                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
672                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
673         
674         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
675         
676         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
677         
678         allocator.restoreReusedRegistersByPopping(jit, preservedState);
679         state.succeed();
680         
681         collision.link(&jit);
682
683         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
684         
685         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
686         // around isn't super common so we could, for example, recompute the mask from the difference between
687         // the table and index. But before we do that we should probably make it easier to multiply and
688         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
689         // to have a power-of-2 size.
690         jit.and32(maskGPR, maskedHashGPR);
691         jit.jump().linkTo(loop, &jit);
692         
693         if (allocator.didReuseRegisters()) {
694             myFailAndIgnore.link(&jit);
695             allocator.restoreReusedRegistersByPopping(jit, preservedState);
696             state.failAndIgnore.append(jit.jump());
697             
698             myFallThrough.link(&jit);
699             allocator.restoreReusedRegistersByPopping(jit, preservedState);
700             fallThrough.append(jit.jump());
701         } else {
702             state.failAndIgnore.append(myFailAndIgnore);
703             fallThrough.append(myFallThrough);
704         }
705         return;
706     }
707
708     default: {
709         if (viaProxy()) {
710             fallThrough.append(
711                 jit.branch8(
712                     CCallHelpers::NotEqual,
713                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
714                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
715
716             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
717
718             fallThrough.append(
719                 jit.branchStructure(
720                     CCallHelpers::NotEqual,
721                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
722                     structure()));
723         } else {
724             fallThrough.append(
725                 jit.branchStructure(
726                     CCallHelpers::NotEqual,
727                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
728                     structure()));
729         }
730         break;
731     } };
732
733     generate(state);
734 }
735
736 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
737 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
738 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
739 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
740 #else
741 #define EABI_32BIT_DUMMY_ARG
742 #endif
743
744 void AccessCase::generate(AccessGenerationState& state)
745 {
746     if (verbose)
747         dataLog("Generating code for: ", *this, "\n");
748     
749     CCallHelpers& jit = *state.jit;
750     VM& vm = *jit.vm();
751     CodeBlock* codeBlock = jit.codeBlock();
752     StructureStubInfo& stubInfo = *state.stubInfo;
753     const Identifier& ident = *state.ident;
754     JSValueRegs valueRegs = state.valueRegs;
755     GPRReg baseGPR = state.baseGPR;
756     GPRReg scratchGPR = state.scratchGPR;
757
758     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
759
760     if ((structure() && structure()->needImpurePropertyWatchpoint())
761         || m_conditionSet.needImpurePropertyWatchpoint())
762         vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
763
764     if (additionalSet())
765         additionalSet()->add(state.addWatchpoint());
766
767     for (const ObjectPropertyCondition& condition : m_conditionSet) {
768         Structure* structure = condition.object()->structure();
769
770         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
771             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
772             continue;
773         }
774
775         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
776             dataLog("This condition is no longer met: ", condition, "\n");
777             RELEASE_ASSERT_NOT_REACHED();
778         }
779
780         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
781         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
782         
783         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
784         state.failAndRepatch.append(
785             jit.branchStructure(
786                 CCallHelpers::NotEqual,
787                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
788                 structure));
789     }
790
791     switch (m_type) {
792     case InHit:
793     case InMiss:
794         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
795         state.succeed();
796         return;
797
798     case Miss:
799         jit.moveTrustedValue(jsUndefined(), valueRegs);
800         state.succeed();
801         return;
802
803     case Load:
804     case GetGetter:
805     case Getter:
806     case Setter:
807     case CustomValueGetter:
808     case CustomAccessorGetter:
809     case CustomValueSetter:
810     case CustomAccessorSetter: {
811         if (isValidOffset(m_offset)) {
812             Structure* currStructure;
813             if (m_conditionSet.isEmpty())
814                 currStructure = structure();
815             else
816                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
817             currStructure->startWatchingPropertyForReplacements(vm, offset());
818         }
819
820         GPRReg baseForGetGPR;
821         if (viaProxy()) {
822             baseForGetGPR = valueRegs.payloadGPR();
823             jit.loadPtr(
824                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
825                 baseForGetGPR);
826         } else
827             baseForGetGPR = baseGPR;
828
829         GPRReg baseForAccessGPR;
830         if (!m_conditionSet.isEmpty()) {
831             jit.move(
832                 CCallHelpers::TrustedImmPtr(alternateBase()),
833                 scratchGPR);
834             baseForAccessGPR = scratchGPR;
835         } else
836             baseForAccessGPR = baseForGetGPR;
837
838         GPRReg loadedValueGPR = InvalidGPRReg;
839         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
840             if (m_type == Load || m_type == GetGetter)
841                 loadedValueGPR = valueRegs.payloadGPR();
842             else
843                 loadedValueGPR = scratchGPR;
844
845             GPRReg storageGPR;
846             if (isInlineOffset(m_offset))
847                 storageGPR = baseForAccessGPR;
848             else {
849                 jit.loadPtr(
850                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
851                     loadedValueGPR);
852                 storageGPR = loadedValueGPR;
853             }
854
855 #if USE(JSVALUE64)
856             jit.load64(
857                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
858 #else
859             if (m_type == Load || m_type == GetGetter) {
860                 jit.load32(
861                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
862                     valueRegs.tagGPR());
863             }
864             jit.load32(
865                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
866                 loadedValueGPR);
867 #endif
868         }
869
870         if (m_type == Load || m_type == GetGetter) {
871             state.succeed();
872             return;
873         }
874
875         // Stuff for custom getters/setters.
876         CCallHelpers::Call operationCall;
877
878         // Stuff for JS getters/setters.
879         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
880         CCallHelpers::Call fastPathCall;
881         CCallHelpers::Call slowPathCall;
882
883         CCallHelpers::Jump success;
884         CCallHelpers::Jump fail;
885
886         // This also does the necessary calculations of whether or not we're an
887         // exception handling call site.
888         state.preserveLiveRegistersToStackForCall();
889
890         jit.store32(
891             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
892             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
893
894         if (m_type == Getter || m_type == Setter) {
895             // Create a JS call using a JS call inline cache. Assume that:
896             //
897             // - SP is aligned and represents the extent of the calling compiler's stack usage.
898             //
899             // - FP is set correctly (i.e. it points to the caller's call frame header).
900             //
901             // - SP - FP is an aligned difference.
902             //
903             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
904             //   code.
905             //
906             // Therefore, we temporarily grow the stack for the purpose of the call and then
907             // shrink it after.
908
909             RELEASE_ASSERT(!m_rareData->callLinkInfo);
910             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
911             
912             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
913             // stub, which then jumped back to the main code, then we'd have a reachability
914             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
915             // call stub stayed alive, and it would ensure that the main code stayed alive, but
916             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
917             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
918             // reference to the getter stub.
919             // https://bugs.webkit.org/show_bug.cgi?id=148914
920             m_rareData->callLinkInfo->disallowStubs();
921             
922             m_rareData->callLinkInfo->setUpCall(
923                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
924
925             CCallHelpers::JumpList done;
926
927             // There is a "this" argument.
928             unsigned numberOfParameters = 1;
929             // ... and a value argument if we're calling a setter.
930             if (m_type == Setter)
931                 numberOfParameters++;
932
933             // Get the accessor; if there ain't one then the result is jsUndefined().
934             if (m_type == Setter) {
935                 jit.loadPtr(
936                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
937                     loadedValueGPR);
938             } else {
939                 jit.loadPtr(
940                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
941                     loadedValueGPR);
942             }
943
944             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
945                 CCallHelpers::Zero, loadedValueGPR);
946
947             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
948
949             unsigned numberOfBytesForCall =
950                 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
951
952             unsigned alignedNumberOfBytesForCall =
953                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
954
955             jit.subPtr(
956                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
957                 CCallHelpers::stackPointerRegister);
958
959             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
960                 CCallHelpers::stackPointerRegister,
961                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
962
963             jit.store32(
964                 CCallHelpers::TrustedImm32(numberOfParameters),
965                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
966
967             jit.storeCell(
968                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
969
970             jit.storeCell(
971                 baseForGetGPR,
972                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
973
974             if (m_type == Setter) {
975                 jit.storeValue(
976                     valueRegs,
977                     calleeFrame.withOffset(
978                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
979             }
980
981             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
982                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
983                 CCallHelpers::TrustedImmPtr(0));
984
985             fastPathCall = jit.nearCall();
986             if (m_type == Getter)
987                 jit.setupResults(valueRegs);
988             done.append(jit.jump());
989
990             slowCase.link(&jit);
991             jit.move(loadedValueGPR, GPRInfo::regT0);
992 #if USE(JSVALUE32_64)
993             // We *always* know that the getter/setter, if non-null, is a cell.
994             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
995 #endif
996             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
997             slowPathCall = jit.nearCall();
998             if (m_type == Getter)
999                 jit.setupResults(valueRegs);
1000             done.append(jit.jump());
1001
1002             returnUndefined.link(&jit);
1003             if (m_type == Getter)
1004                 jit.moveTrustedValue(jsUndefined(), valueRegs);
1005
1006             done.link(&jit);
1007
1008             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1009                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1010             state.restoreLiveRegistersFromStackForCall(isGetter());
1011
1012             jit.addLinkTask(
1013                 [=, &vm] (LinkBuffer& linkBuffer) {
1014                     m_rareData->callLinkInfo->setCallLocations(
1015                         linkBuffer.locationOfNearCall(slowPathCall),
1016                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
1017                         linkBuffer.locationOfNearCall(fastPathCall));
1018
1019                     linkBuffer.link(
1020                         slowPathCall,
1021                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1022                 });
1023         } else {
1024             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1025             // hard to track if someone did spillage or not, so we just assume that we always need
1026             // to make some space here.
1027             jit.makeSpaceOnStackForCCall();
1028
1029             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1030             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1031             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1032             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1033 #if USE(JSVALUE64)
1034             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1035                 jit.setupArgumentsWithExecState(
1036                     baseForCustomValue,
1037                     CCallHelpers::TrustedImmPtr(ident.impl()));
1038             } else
1039                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1040 #else
1041             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1042                 jit.setupArgumentsWithExecState(
1043                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1044                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1045                     CCallHelpers::TrustedImmPtr(ident.impl()));
1046             } else {
1047                 jit.setupArgumentsWithExecState(
1048                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1049                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1050                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1051             }
1052 #endif
1053             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1054
1055             operationCall = jit.call();
1056             jit.addLinkTask(
1057                 [=] (LinkBuffer& linkBuffer) {
1058                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1059                 });
1060
1061             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1062                 jit.setupResults(valueRegs);
1063             jit.reclaimSpaceOnStackForCCall();
1064
1065             CCallHelpers::Jump noException =
1066                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1067
1068             state.restoreLiveRegistersFromStackForCallWithThrownException();
1069             state.emitExplicitExceptionHandler();
1070         
1071             noException.link(&jit);
1072             state.restoreLiveRegistersFromStackForCall(isGetter());
1073         }
1074         state.succeed();
1075         return;
1076     }
1077
1078     case Replace: {
1079         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1080             if (verbose)
1081                 dataLog("Have type: ", type->descriptor(), "\n");
1082             state.failAndRepatch.append(
1083                 jit.branchIfNotType(
1084                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1085         } else if (verbose)
1086             dataLog("Don't have type.\n");
1087         
1088         if (isInlineOffset(m_offset)) {
1089             jit.storeValue(
1090                 valueRegs,
1091                 CCallHelpers::Address(
1092                     baseGPR,
1093                     JSObject::offsetOfInlineStorage() +
1094                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1095         } else {
1096             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1097             jit.storeValue(
1098                 valueRegs,
1099                 CCallHelpers::Address(
1100                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1101         }
1102         state.succeed();
1103         return;
1104     }
1105
1106     case Transition: {
1107         // AccessCase::transition() should have returned null if this wasn't true.
1108         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1109
1110         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1111             if (verbose)
1112                 dataLog("Have type: ", type->descriptor(), "\n");
1113             state.failAndRepatch.append(
1114                 jit.branchIfNotType(
1115                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1116         } else if (verbose)
1117             dataLog("Don't have type.\n");
1118         
1119         // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1120         // exactly when this would make calls.
1121         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1122         bool reallocating = allocating && structure()->outOfLineCapacity();
1123         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1124
1125         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1126         allocator.lock(baseGPR);
1127 #if USE(JSVALUE32_64)
1128         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1129 #endif
1130         allocator.lock(valueRegs);
1131         allocator.lock(scratchGPR);
1132
1133         GPRReg scratchGPR2 = InvalidGPRReg;
1134         GPRReg scratchGPR3 = InvalidGPRReg;
1135         if (allocatingInline) {
1136             scratchGPR2 = allocator.allocateScratchGPR();
1137             scratchGPR3 = allocator.allocateScratchGPR();
1138         }
1139
1140         ScratchRegisterAllocator::PreservedState preservedState =
1141             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1142         
1143         CCallHelpers::JumpList slowPath;
1144
1145         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1146
1147         if (allocating) {
1148             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1149             
1150             if (allocatingInline) {
1151                 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1152
1153                 if (!reallocating) {
1154                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1155                     slowPath.append(
1156                         jit.branchSubPtr(
1157                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1158                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1159                     jit.negPtr(scratchGPR);
1160                     jit.addPtr(
1161                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1162                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1163                 } else {
1164                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1165                     // already had out-of-line property storage).
1166                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1167                     ASSERT(newSize > oldSize);
1168             
1169                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1170                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1171                     slowPath.append(
1172                         jit.branchSubPtr(
1173                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1174                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1175                     jit.negPtr(scratchGPR);
1176                     jit.addPtr(
1177                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1178                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1179                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1180                     // scratchGPR2 = available
1181                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1182                         jit.loadPtr(
1183                             CCallHelpers::Address(
1184                                 scratchGPR3,
1185                                 -static_cast<ptrdiff_t>(
1186                                     offset + sizeof(JSValue) + sizeof(void*))),
1187                             scratchGPR2);
1188                         jit.storePtr(
1189                             scratchGPR2,
1190                             CCallHelpers::Address(
1191                                 scratchGPR,
1192                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1193                     }
1194                 }
1195             } else {
1196                 // Handle the case where we are allocating out-of-line using an operation.
1197                 RegisterSet extraRegistersToPreserve;
1198                 extraRegistersToPreserve.set(baseGPR);
1199                 extraRegistersToPreserve.set(valueRegs);
1200                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1201                 
1202                 jit.store32(
1203                     CCallHelpers::TrustedImm32(
1204                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1205                     CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1206                 
1207                 jit.makeSpaceOnStackForCCall();
1208                 
1209                 if (!reallocating) {
1210                     jit.setupArgumentsWithExecState(baseGPR);
1211                     
1212                     CCallHelpers::Call operationCall = jit.call();
1213                     jit.addLinkTask(
1214                         [=] (LinkBuffer& linkBuffer) {
1215                             linkBuffer.link(
1216                                 operationCall,
1217                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1218                         });
1219                 } else {
1220                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1221                     // already had out-of-line property storage).
1222                     jit.setupArgumentsWithExecState(
1223                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1224                     
1225                     CCallHelpers::Call operationCall = jit.call();
1226                     jit.addLinkTask(
1227                         [=] (LinkBuffer& linkBuffer) {
1228                             linkBuffer.link(
1229                                 operationCall,
1230                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1231                         });
1232                 }
1233                 
1234                 jit.reclaimSpaceOnStackForCCall();
1235                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1236                 
1237                 CCallHelpers::Jump noException =
1238                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1239                 
1240                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1241                 state.emitExplicitExceptionHandler();
1242                 
1243                 noException.link(&jit);
1244                 state.restoreLiveRegistersFromStackForCall();
1245             }
1246         }
1247
1248         if (isInlineOffset(m_offset)) {
1249             jit.storeValue(
1250                 valueRegs,
1251                 CCallHelpers::Address(
1252                     baseGPR,
1253                     JSObject::offsetOfInlineStorage() +
1254                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1255         } else {
1256             if (!allocating)
1257                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1258             jit.storeValue(
1259                 valueRegs,
1260                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1261         }
1262         
1263         // If we had allocated using an operation then we would have already executed the store
1264         // barrier and we would have already stored the butterfly into the object.
1265         if (allocatingInline) {
1266             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1267             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1268             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1269             slowPath.append(
1270                 jit.branch32(
1271                     CCallHelpers::AboveOrEqual, scratchGPR2,
1272                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1273             
1274             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1275             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1276             
1277             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1278             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1279             jit.storePtr(
1280                 baseGPR,
1281                 CCallHelpers::BaseIndex(
1282                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1283                     static_cast<int32_t>(-sizeof(void*))));
1284             ownerIsRememberedOrInEden.link(&jit);
1285             
1286             // We set the new butterfly and the structure last. Doing it this way ensures that
1287             // whatever we had done up to this point is forgotten if we choose to branch to slow
1288             // path.
1289             
1290             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1291         }
1292         
1293         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1294         jit.store32(
1295             CCallHelpers::TrustedImm32(structureBits),
1296             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1297
1298         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1299         state.succeed();
1300         
1301         // We will have a slow path if we were allocating without the help of an operation.
1302         if (allocatingInline) {
1303             if (allocator.didReuseRegisters()) {
1304                 slowPath.link(&jit);
1305                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1306                 state.failAndIgnore.append(jit.jump());
1307             } else
1308                 state.failAndIgnore.append(slowPath);
1309         } else
1310             RELEASE_ASSERT(slowPath.empty());
1311         return;
1312     }
1313
1314     case ArrayLength: {
1315         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1316         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1317         state.failAndIgnore.append(
1318             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1319         jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1320         state.succeed();
1321         return;
1322     }
1323
1324     case StringLength: {
1325         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1326         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1327         state.succeed();
1328         return;
1329     }
1330         
1331     case IntrinsicGetter: {
1332         RELEASE_ASSERT(isValidOffset(offset()));
1333
1334         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1335         // are immutable so we just need to watch the property not any value inside it.
1336         Structure* currStructure;
1337         if (m_conditionSet.isEmpty())
1338             currStructure = structure();
1339         else
1340             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1341         currStructure->startWatchingPropertyForReplacements(vm, offset());
1342
1343         emitIntrinsicGetter(state);
1344         return;
1345     }
1346
1347     case DirectArgumentsLength:
1348     case ScopedArgumentsLength:
1349     case MegamorphicLoad:
1350         // These need to be handled by generateWithGuard(), since the guard is part of the
1351         // algorithm. We can be sure that nobody will call generate() directly for these since they
1352         // are not guarded by structure checks.
1353         RELEASE_ASSERT_NOT_REACHED();
1354     }
1355     
1356     RELEASE_ASSERT_NOT_REACHED();
1357 }
1358
1359 PolymorphicAccess::PolymorphicAccess() { }
1360 PolymorphicAccess::~PolymorphicAccess() { }
1361
1362 AccessGenerationResult PolymorphicAccess::regenerateWithCases(
1363     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1364     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1365 {
1366     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1367     // invariants:
1368     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1369     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1370     //   can be removed via the canReplace() rule.
1371     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1372     //   cascade through the cases in reverse order, you will get the most recent cases first.
1373     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1374     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1375     //   add more things after failure.
1376     
1377     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1378     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1379     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1380         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1381
1382         // Add it only if it is not replaced by the subsequent cases in the list.
1383         bool found = false;
1384         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1385             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1386                 found = true;
1387                 break;
1388             }
1389         }
1390
1391         if (found)
1392             continue;
1393         
1394         casesToAdd.append(WTFMove(myCase));
1395     }
1396
1397     if (verbose)
1398         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1399
1400     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1401     // new stub that will be identical to the old one. Returning null should tell the caller to just
1402     // keep doing what they were doing before.
1403     if (casesToAdd.isEmpty())
1404         return AccessGenerationResult::MadeNoChanges;
1405
1406     // Now construct the list of cases as they should appear if we are successful. This means putting
1407     // all of the previous cases in this list in order but excluding those that can be replaced, and
1408     // then adding the new cases.
1409     ListType newCases;
1410     for (auto& oldCase : m_list) {
1411         // Ignore old cases that cannot possibly succeed anymore.
1412         if (!oldCase->couldStillSucceed())
1413             continue;
1414
1415         // Figure out if this is replaced by any new cases.
1416         bool found = false;
1417         for (auto& caseToAdd : casesToAdd) {
1418             if (caseToAdd->canReplace(*oldCase)) {
1419                 found = true;
1420                 break;
1421             }
1422         }
1423         if (found)
1424             continue;
1425         
1426         newCases.append(oldCase->clone());
1427     }
1428     for (auto& caseToAdd : casesToAdd)
1429         newCases.append(WTFMove(caseToAdd));
1430
1431     if (verbose)
1432         dataLog("newCases: ", listDump(newCases), "\n");
1433     
1434     // See if we are close to having too many cases and if some of those cases can be subsumed by a
1435     // megamorphic load.
1436     if (newCases.size() >= Options::maxAccessVariantListSize()) {
1437         unsigned numSelfLoads = 0;
1438         for (auto& newCase : newCases) {
1439             if (newCase->canBeReplacedByMegamorphicLoad())
1440                 numSelfLoads++;
1441         }
1442         
1443         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1444             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1445                 newCases.removeAllMatching(
1446                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1447                         return newCase->canBeReplacedByMegamorphicLoad();
1448                     });
1449                 
1450                 newCases.append(WTFMove(mega));
1451             }
1452         }
1453     }
1454
1455     if (newCases.size() > Options::maxAccessVariantListSize()) {
1456         if (verbose)
1457             dataLog("Too many cases.\n");
1458         return AccessGenerationResult::GaveUp;
1459     }
1460
1461     MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
1462     if (!result)
1463         return AccessGenerationResult::GaveUp;
1464
1465     m_list = WTFMove(newCases);
1466     return result;
1467 }
1468
1469 AccessGenerationResult PolymorphicAccess::regenerateWithCase(
1470     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1471     std::unique_ptr<AccessCase> newAccess)
1472 {
1473     Vector<std::unique_ptr<AccessCase>> newAccesses;
1474     newAccesses.append(WTFMove(newAccess));
1475     return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1476 }
1477
1478 bool PolymorphicAccess::visitWeak(VM& vm) const
1479 {
1480     for (unsigned i = 0; i < size(); ++i) {
1481         if (!at(i).visitWeak(vm))
1482             return false;
1483     }
1484     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1485         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1486             if (!Heap::isMarked(weakReference.get()))
1487                 return false;
1488         }
1489     }
1490     return true;
1491 }
1492
1493 void PolymorphicAccess::dump(PrintStream& out) const
1494 {
1495     out.print(RawPointer(this), ":[");
1496     CommaPrinter comma;
1497     for (auto& entry : m_list)
1498         out.print(comma, *entry);
1499     out.print("]");
1500 }
1501
1502 MacroAssemblerCodePtr PolymorphicAccess::regenerate(
1503     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1504     PolymorphicAccess::ListType& cases)
1505 {
1506     if (verbose)
1507         dataLog("Generating code for cases: ", listDump(cases), "\n");
1508     
1509     AccessGenerationState state;
1510
1511     state.access = this;
1512     state.stubInfo = &stubInfo;
1513     state.ident = &ident;
1514     
1515     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1516     state.valueRegs = JSValueRegs(
1517 #if USE(JSVALUE32_64)
1518         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1519 #endif
1520         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1521
1522     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1523     state.allocator = &allocator;
1524     allocator.lock(state.baseGPR);
1525     allocator.lock(state.valueRegs);
1526 #if USE(JSVALUE32_64)
1527     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1528 #endif
1529
1530     state.scratchGPR = allocator.allocateScratchGPR();
1531     
1532     CCallHelpers jit(&vm, codeBlock);
1533     state.jit = &jit;
1534
1535     state.preservedReusedRegisterState =
1536         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1537
1538     bool allGuardedByStructureCheck = true;
1539     bool hasJSGetterSetterCall = false;
1540     for (auto& entry : cases) {
1541         allGuardedByStructureCheck &= entry->guardedByStructureCheck();
1542         if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
1543             hasJSGetterSetterCall = true;
1544     }
1545
1546     if (cases.isEmpty()) {
1547         // This is super unlikely, but we make it legal anyway.
1548         state.failAndRepatch.append(jit.jump());
1549     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1550         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1551         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1552         // one case.
1553         CCallHelpers::JumpList fallThrough;
1554
1555         // Cascade through the list, preferring newer entries.
1556         for (unsigned i = cases.size(); i--;) {
1557             fallThrough.link(&jit);
1558             cases[i]->generateWithGuard(state, fallThrough);
1559         }
1560         state.failAndRepatch.append(fallThrough);
1561     } else {
1562         jit.load32(
1563             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1564             state.scratchGPR);
1565         
1566         Vector<int64_t> caseValues(cases.size());
1567         for (unsigned i = 0; i < cases.size(); ++i)
1568             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1569         
1570         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1571         while (binarySwitch.advance(jit))
1572             cases[binarySwitch.caseIndex()]->generate(state);
1573         state.failAndRepatch.append(binarySwitch.fallThrough());
1574     }
1575
1576     if (!state.failAndIgnore.empty()) {
1577         state.failAndIgnore.link(&jit);
1578         
1579         // Make sure that the inline cache optimization code knows that we are taking slow path because
1580         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1581         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1582         // that the slow path does not try to patch.
1583         jit.load8(&stubInfo.countdown, state.scratchGPR);
1584         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1585         jit.store8(state.scratchGPR, &stubInfo.countdown);
1586     }
1587
1588     CCallHelpers::JumpList failure;
1589     if (allocator.didReuseRegisters()) {
1590         state.failAndRepatch.link(&jit);
1591         state.restoreScratch();
1592     } else
1593         failure = state.failAndRepatch;
1594     failure.append(jit.jump());
1595
1596     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1597     CallSiteIndex callSiteIndexForExceptionHandling;
1598     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1599         // Emit the exception handler.
1600         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1601         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1602         // their own exception handling logic that doesn't go through genericUnwind.
1603         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1604
1605         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1606         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1607         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1608
1609         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1610         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1611
1612         state.restoreLiveRegistersFromStackForCallWithThrownException();
1613         state.restoreScratch();
1614         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1615
1616         HandlerInfo oldHandler = state.originalExceptionHandler();
1617         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1618         jit.addLinkTask(
1619             [=] (LinkBuffer& linkBuffer) {
1620                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1621
1622                 HandlerInfo handlerToRegister = oldHandler;
1623                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1624                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1625                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1626                 codeBlock->appendExceptionHandler(handlerToRegister);
1627             });
1628
1629         // We set these to indicate to the stub to remove itself from the CodeBlock's
1630         // exception handler table when it is deallocated.
1631         codeBlockThatOwnsExceptionHandlers = codeBlock;
1632         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1633         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1634     }
1635
1636     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1637     if (linkBuffer.didFailToAllocate()) {
1638         if (verbose)
1639             dataLog("Did fail to allocate.\n");
1640         return MacroAssemblerCodePtr();
1641     }
1642
1643     CodeLocationLabel successLabel =
1644         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1645         
1646     linkBuffer.link(state.success, successLabel);
1647
1648     linkBuffer.link(
1649         failure,
1650         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1651     
1652     if (verbose)
1653         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1654
1655     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1656         codeBlock, linkBuffer,
1657         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1658
1659     bool doesCalls = false;
1660     Vector<JSCell*> cellsToMark;
1661     for (auto& entry : cases)
1662         doesCalls |= entry->doesCalls(&cellsToMark);
1663     
1664     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1665     m_watchpoints = WTFMove(state.watchpoints);
1666     if (!state.weakReferences.isEmpty())
1667         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1668     if (verbose)
1669         dataLog("Returning: ", code.code(), "\n");
1670     return code.code();
1671 }
1672
1673 void PolymorphicAccess::aboutToDie()
1674 {
1675     m_stubRoutine->aboutToDie();
1676 }
1677
1678 } // namespace JSC
1679
1680 namespace WTF {
1681
1682 using namespace JSC;
1683
1684 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1685 {
1686     switch (kind) {
1687     case AccessGenerationResult::MadeNoChanges:
1688         out.print("MadeNoChanges");
1689         return;
1690     case AccessGenerationResult::GaveUp:
1691         out.print("GaveUp");
1692         return;
1693     case AccessGenerationResult::GeneratedNewCode:
1694         out.print("GeneratedNewCode");
1695         return;
1696     }
1697     
1698     RELEASE_ASSERT_NOT_REACHED();
1699 }
1700
1701 void printInternal(PrintStream& out, AccessCase::AccessType type)
1702 {
1703     switch (type) {
1704     case AccessCase::Load:
1705         out.print("Load");
1706         return;
1707     case AccessCase::MegamorphicLoad:
1708         out.print("MegamorphicLoad");
1709         return;
1710     case AccessCase::Transition:
1711         out.print("Transition");
1712         return;
1713     case AccessCase::Replace:
1714         out.print("Replace");
1715         return;
1716     case AccessCase::Miss:
1717         out.print("Miss");
1718         return;
1719     case AccessCase::GetGetter:
1720         out.print("GetGetter");
1721         return;
1722     case AccessCase::Getter:
1723         out.print("Getter");
1724         return;
1725     case AccessCase::Setter:
1726         out.print("Setter");
1727         return;
1728     case AccessCase::CustomValueGetter:
1729         out.print("CustomValueGetter");
1730         return;
1731     case AccessCase::CustomAccessorGetter:
1732         out.print("CustomAccessorGetter");
1733         return;
1734     case AccessCase::CustomValueSetter:
1735         out.print("CustomValueSetter");
1736         return;
1737     case AccessCase::CustomAccessorSetter:
1738         out.print("CustomAccessorSetter");
1739         return;
1740     case AccessCase::IntrinsicGetter:
1741         out.print("IntrinsicGetter");
1742         return;
1743     case AccessCase::InHit:
1744         out.print("InHit");
1745         return;
1746     case AccessCase::InMiss:
1747         out.print("InMiss");
1748         return;
1749     case AccessCase::ArrayLength:
1750         out.print("ArrayLength");
1751         return;
1752     case AccessCase::StringLength:
1753         out.print("StringLength");
1754         return;
1755     case AccessCase::DirectArgumentsLength:
1756         out.print("DirectArgumentsLength");
1757         return;
1758     case AccessCase::ScopedArgumentsLength:
1759         out.print("ScopedArgumentsLength");
1760         return;
1761     }
1762
1763     RELEASE_ASSERT_NOT_REACHED();
1764 }
1765
1766 } // namespace WTF
1767
1768 #endif // ENABLE(JIT)
1769
1770