08bc1018b4c557d1099cd92918d043ab0916d239
[WebKit-https.git] / Source / JavaScriptCore / bytecode / PolymorphicAccess.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "PolymorphicAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "Heap.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
46
47 namespace JSC {
48
49 static const bool verbose = false;
50
51 void AccessGenerationResult::dump(PrintStream& out) const
52 {
53     out.print(m_kind);
54     if (m_code)
55         out.print(":", m_code);
56 }
57
58 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
59 {
60     return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61         watchpoints, jit->codeBlock(), stubInfo, condition);
62 }
63
64 void AccessGenerationState::restoreScratch()
65 {
66     allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67 }
68
69 void AccessGenerationState::succeed()
70 {
71     restoreScratch();
72     success.append(jit->jump());
73 }
74
75 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
76 {
77     if (!m_calculatedRegistersForCallAndExceptionHandling) {
78         m_calculatedRegistersForCallAndExceptionHandling = true;
79
80         m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
81         m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
82         if (m_needsToRestoreRegistersIfException)
83             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
84
85         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
86         m_liveRegistersForCall.merge(extra);
87         m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
88         m_liveRegistersForCall.merge(extra);
89     }
90 }
91
92 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
93 {
94     calculateLiveRegistersForCallAndExceptionHandling(extra);
95     
96     unsigned extraStackPadding = 0;
97     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
98     if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
99         RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
100     m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
101 }
102
103 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
104 {
105     RegisterSet dontRestore;
106     if (isGetter) {
107         // This is the result value. We don't want to overwrite the result with what we stored to the stack.
108         // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
109         dontRestore.set(valueRegs);
110     }
111     restoreLiveRegistersFromStackForCall(dontRestore);
112 }
113
114 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
115 {
116     // Even if we're a getter, we don't want to ignore the result value like we normally do
117     // because the getter threw, and therefore, didn't return a value that means anything.
118     // Instead, we want to restore that register to what it was upon entering the getter
119     // inline cache. The subtlety here is if the base and the result are the same register,
120     // and the getter threw, we want OSR exit to see the original base value, not the result
121     // of the getter call.
122     RegisterSet dontRestore = liveRegistersForCall();
123     // As an optimization here, we only need to restore what is live for exception handling.
124     // We can construct the dontRestore set to accomplish this goal by having it contain only
125     // what is live for call but not live for exception handling. By ignoring things that are
126     // only live at the call but not the exception handler, we will only restore things live
127     // at the exception handler.
128     dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
129     restoreLiveRegistersFromStackForCall(dontRestore);
130 }
131
132 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
133 {
134     unsigned extraStackPadding = 0;
135     ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
136 }
137
138 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
139 {
140     RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
141
142     if (!m_calculatedCallSiteIndex) {
143         m_calculatedCallSiteIndex = true;
144
145         if (m_needsToRestoreRegistersIfException)
146             m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
147         else
148             m_callSiteIndex = originalCallSiteIndex();
149     }
150
151     return m_callSiteIndex;
152 }
153
154 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
155 {
156     RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
157     HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
158     RELEASE_ASSERT(exceptionHandler);
159     return *exceptionHandler;
160 }
161
162 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
163
164 void AccessGenerationState::emitExplicitExceptionHandler()
165 {
166     restoreScratch();
167     jit->copyCalleeSavesToVMCalleeSavesBuffer();
168     if (needsToRestoreRegistersIfException()) {
169         // To the JIT that produces the original exception handling
170         // call site, they will expect the OSR exit to be arrived
171         // at from genericUnwind. Therefore we must model what genericUnwind
172         // does here. I.e, set callFrameForCatch and copy callee saves.
173
174         jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
175         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
176
177         // We don't need to insert a new exception handler in the table
178         // because we're doing a manual exception check here. i.e, we'll
179         // never arrive here from genericUnwind().
180         HandlerInfo originalHandler = originalExceptionHandler();
181         jit->addLinkTask(
182             [=] (LinkBuffer& linkBuffer) {
183                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
184             });
185     } else {
186         jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
187         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
188         jit->addLinkTask(
189             [=] (LinkBuffer& linkBuffer) {
190                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
191             });
192         jit->jumpToExceptionHandler();
193     }
194 }
195
196 AccessCase::AccessCase()
197 {
198 }
199
200 std::unique_ptr<AccessCase> AccessCase::tryGet(
201     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
202     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
203 {
204     std::unique_ptr<AccessCase> result(new AccessCase());
205
206     result->m_type = type;
207     result->m_offset = offset;
208     result->m_structure.set(vm, owner, structure);
209     result->m_conditionSet = conditionSet;
210
211     if (viaProxy || additionalSet) {
212         result->m_rareData = std::make_unique<RareData>();
213         result->m_rareData->viaProxy = viaProxy;
214         result->m_rareData->additionalSet = additionalSet;
215     }
216
217     return result;
218 }
219
220 std::unique_ptr<AccessCase> AccessCase::get(
221     VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
222     const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
223     PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
224 {
225     std::unique_ptr<AccessCase> result(new AccessCase());
226
227     result->m_type = type;
228     result->m_offset = offset;
229     result->m_structure.set(vm, owner, structure);
230     result->m_conditionSet = conditionSet;
231
232     if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
233         result->m_rareData = std::make_unique<RareData>();
234         result->m_rareData->viaProxy = viaProxy;
235         result->m_rareData->additionalSet = additionalSet;
236         result->m_rareData->customAccessor.getter = customGetter;
237         result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
238     }
239
240     return result;
241 }
242
243 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
244 {
245     UNUSED_PARAM(vm);
246     UNUSED_PARAM(owner);
247     
248     if (GPRInfo::numberOfRegisters < 9)
249         return nullptr;
250     
251     std::unique_ptr<AccessCase> result(new AccessCase());
252     
253     result->m_type = MegamorphicLoad;
254     
255     return result;
256 }
257
258 std::unique_ptr<AccessCase> AccessCase::replace(
259     VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
260 {
261     std::unique_ptr<AccessCase> result(new AccessCase());
262
263     result->m_type = Replace;
264     result->m_offset = offset;
265     result->m_structure.set(vm, owner, structure);
266
267     return result;
268 }
269
270 std::unique_ptr<AccessCase> AccessCase::transition(
271     VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
272     const ObjectPropertyConditionSet& conditionSet)
273 {
274     RELEASE_ASSERT(oldStructure == newStructure->previousID());
275
276     // Skip optimizing the case where we need a realloc, if we don't have
277     // enough registers to make it happen.
278     if (GPRInfo::numberOfRegisters < 6
279         && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
280         && oldStructure->outOfLineCapacity()) {
281         return nullptr;
282     }
283
284     std::unique_ptr<AccessCase> result(new AccessCase());
285
286     result->m_type = Transition;
287     result->m_offset = offset;
288     result->m_structure.set(vm, owner, newStructure);
289     result->m_conditionSet = conditionSet;
290
291     return result;
292 }
293
294 std::unique_ptr<AccessCase> AccessCase::setter(
295     VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
296     const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
297     JSObject* customSlotBase)
298 {
299     std::unique_ptr<AccessCase> result(new AccessCase());
300
301     result->m_type = type;
302     result->m_offset = offset;
303     result->m_structure.set(vm, owner, structure);
304     result->m_conditionSet = conditionSet;
305     result->m_rareData = std::make_unique<RareData>();
306     result->m_rareData->customAccessor.setter = customSetter;
307     result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
308
309     return result;
310 }
311
312 std::unique_ptr<AccessCase> AccessCase::in(
313     VM& vm, JSCell* owner, AccessType type, Structure* structure,
314     const ObjectPropertyConditionSet& conditionSet)
315 {
316     std::unique_ptr<AccessCase> result(new AccessCase());
317
318     result->m_type = type;
319     result->m_structure.set(vm, owner, structure);
320     result->m_conditionSet = conditionSet;
321
322     return result;
323 }
324
325 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
326 {
327     std::unique_ptr<AccessCase> result(new AccessCase());
328
329     result->m_type = type;
330
331     return result;
332 }
333
334 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
335     VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
336     Structure* structure, const ObjectPropertyConditionSet& conditionSet)
337 {
338     std::unique_ptr<AccessCase> result(new AccessCase());
339
340     result->m_type = IntrinsicGetter;
341     result->m_structure.set(vm, owner, structure);
342     result->m_conditionSet = conditionSet;
343     result->m_offset = offset;
344
345     result->m_rareData = std::make_unique<RareData>();
346     result->m_rareData->intrinsicFunction.set(vm, owner, getter);
347
348     return result;
349 }
350
351 AccessCase::~AccessCase()
352 {
353 }
354
355 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
356     VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
357 {
358     switch (stubInfo.cacheType) {
359     case CacheType::GetByIdSelf:
360         return get(
361             vm, owner, Load, stubInfo.u.byIdSelf.offset,
362             stubInfo.u.byIdSelf.baseObjectStructure.get());
363
364     case CacheType::PutByIdReplace:
365         return replace(
366             vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
367
368     default:
369         return nullptr;
370     }
371 }
372
373 std::unique_ptr<AccessCase> AccessCase::clone() const
374 {
375     std::unique_ptr<AccessCase> result(new AccessCase());
376     result->m_type = m_type;
377     result->m_offset = m_offset;
378     result->m_structure = m_structure;
379     result->m_conditionSet = m_conditionSet;
380     if (RareData* rareData = m_rareData.get()) {
381         result->m_rareData = std::make_unique<RareData>();
382         result->m_rareData->viaProxy = rareData->viaProxy;
383         result->m_rareData->additionalSet = rareData->additionalSet;
384         // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
385         result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
386         result->m_rareData->customSlotBase = rareData->customSlotBase;
387         result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
388     }
389     return result;
390 }
391
392 bool AccessCase::guardedByStructureCheck() const
393 {
394     if (viaProxy())
395         return false;
396
397     switch (m_type) {
398     case MegamorphicLoad:
399     case ArrayLength:
400     case StringLength:
401     case DirectArgumentsLength:
402     case ScopedArgumentsLength:
403         return false;
404     default:
405         return true;
406     }
407 }
408
409 JSObject* AccessCase::alternateBase() const
410 {
411     if (customSlotBase())
412         return customSlotBase();
413     return conditionSet().slotBaseCondition().object();
414 }
415
416 bool AccessCase::couldStillSucceed() const
417 {
418     return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
419 }
420
421 bool AccessCase::canBeReplacedByMegamorphicLoad() const
422 {
423     return type() == Load
424         && !viaProxy()
425         && conditionSet().isEmpty()
426         && !additionalSet()
427         && !customSlotBase();
428 }
429
430 bool AccessCase::canReplace(const AccessCase& other) const
431 {
432     // We could do a lot better here, but for now we just do something obvious.
433     
434     if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
435         return true;
436
437     if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
438         // FIXME: Implement this!
439         return false;
440     }
441
442     return structure() == other.structure();
443 }
444
445 void AccessCase::dump(PrintStream& out) const
446 {
447     out.print(m_type, ":(");
448
449     CommaPrinter comma;
450
451     if (m_type == Transition)
452         out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
453     else if (m_structure)
454         out.print(comma, "structure = ", pointerDump(m_structure.get()));
455
456     if (isValidOffset(m_offset))
457         out.print(comma, "offset = ", m_offset);
458     if (!m_conditionSet.isEmpty())
459         out.print(comma, "conditions = ", m_conditionSet);
460
461     if (RareData* rareData = m_rareData.get()) {
462         if (rareData->viaProxy)
463             out.print(comma, "viaProxy = ", rareData->viaProxy);
464         if (rareData->additionalSet)
465             out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
466         if (rareData->callLinkInfo)
467             out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
468         if (rareData->customAccessor.opaque)
469             out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
470         if (rareData->customSlotBase)
471             out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
472     }
473
474     out.print(")");
475 }
476
477 bool AccessCase::visitWeak(VM& vm) const
478 {
479     if (m_structure && !Heap::isMarked(m_structure.get()))
480         return false;
481     if (!m_conditionSet.areStillLive())
482         return false;
483     if (m_rareData) {
484         if (m_rareData->callLinkInfo)
485             m_rareData->callLinkInfo->visitWeak(vm);
486         if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
487             return false;
488         if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
489             return false;
490     }
491     return true;
492 }
493
494 void AccessCase::generateWithGuard(
495     AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
496 {
497     CCallHelpers& jit = *state.jit;
498     VM& vm = *jit.vm();
499     const Identifier& ident = *state.ident;
500     StructureStubInfo& stubInfo = *state.stubInfo;
501     JSValueRegs valueRegs = state.valueRegs;
502     GPRReg baseGPR = state.baseGPR;
503     GPRReg scratchGPR = state.scratchGPR;
504     
505     UNUSED_PARAM(vm);
506
507     switch (m_type) {
508     case ArrayLength: {
509         ASSERT(!viaProxy());
510         jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
511         fallThrough.append(
512             jit.branchTest32(
513                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
514         fallThrough.append(
515             jit.branchTest32(
516                 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
517         break;
518     }
519
520     case StringLength: {
521         ASSERT(!viaProxy());
522         fallThrough.append(
523             jit.branch8(
524                 CCallHelpers::NotEqual,
525                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
526                 CCallHelpers::TrustedImm32(StringType)));
527         break;
528     }
529         
530     case DirectArgumentsLength: {
531         ASSERT(!viaProxy());
532         fallThrough.append(
533             jit.branch8(
534                 CCallHelpers::NotEqual,
535                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
536                 CCallHelpers::TrustedImm32(DirectArgumentsType)));
537
538         fallThrough.append(
539             jit.branchTestPtr(
540                 CCallHelpers::NonZero,
541                 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
542         jit.load32(
543             CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
544             valueRegs.payloadGPR());
545         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
546         state.succeed();
547         return;
548     }
549         
550     case ScopedArgumentsLength: {
551         ASSERT(!viaProxy());
552         fallThrough.append(
553             jit.branch8(
554                 CCallHelpers::NotEqual,
555                 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
556                 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
557
558         fallThrough.append(
559             jit.branchTest8(
560                 CCallHelpers::NonZero,
561                 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
562         jit.load32(
563             CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
564             valueRegs.payloadGPR());
565         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
566         state.succeed();
567         return;
568     }
569         
570     case MegamorphicLoad: {
571         UniquedStringImpl* key = ident.impl();
572         unsigned hash = IdentifierRepHash::hash(key);
573         
574         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
575         allocator.lock(baseGPR);
576 #if USE(JSVALUE32_64)
577         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
578 #endif
579         allocator.lock(valueRegs);
580         allocator.lock(scratchGPR);
581         
582         GPRReg intermediateGPR = scratchGPR;
583         GPRReg maskGPR = allocator.allocateScratchGPR();
584         GPRReg maskedHashGPR = allocator.allocateScratchGPR();
585         GPRReg indexGPR = allocator.allocateScratchGPR();
586         GPRReg offsetGPR = allocator.allocateScratchGPR();
587         
588         if (verbose) {
589             dataLog("baseGPR = ", baseGPR, "\n");
590             dataLog("valueRegs = ", valueRegs, "\n");
591             dataLog("scratchGPR = ", scratchGPR, "\n");
592             dataLog("intermediateGPR = ", intermediateGPR, "\n");
593             dataLog("maskGPR = ", maskGPR, "\n");
594             dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
595             dataLog("indexGPR = ", indexGPR, "\n");
596             dataLog("offsetGPR = ", offsetGPR, "\n");
597         }
598
599         ScratchRegisterAllocator::PreservedState preservedState =
600             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
601
602         CCallHelpers::JumpList myFailAndIgnore;
603         CCallHelpers::JumpList myFallThrough;
604         
605         jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
606         jit.loadPtr(
607             CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
608             intermediateGPR);
609         
610         myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
611         
612         jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
613         jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
614         jit.load32(
615             CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
616             intermediateGPR);
617
618         jit.move(maskGPR, maskedHashGPR);
619         jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
620         jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
621         jit.addPtr(indexGPR, intermediateGPR);
622         
623         CCallHelpers::Label loop = jit.label();
624         
625         jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
626         
627         myFallThrough.append(
628             jit.branch32(
629                 CCallHelpers::Equal,
630                 offsetGPR,
631                 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
632         
633         jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
634         jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
635         jit.addPtr(intermediateGPR, offsetGPR);
636         
637         CCallHelpers::Jump collision =  jit.branchPtr(
638             CCallHelpers::NotEqual,
639             CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
640             CCallHelpers::TrustedImmPtr(key));
641         
642         // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
643         // Check them and then attempt the load.
644         
645         myFallThrough.append(
646             jit.branchTest32(
647                 CCallHelpers::NonZero,
648                 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
649                 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
650         
651         jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
652         
653         jit.loadProperty(baseGPR, offsetGPR, valueRegs);
654         
655         allocator.restoreReusedRegistersByPopping(jit, preservedState);
656         state.succeed();
657         
658         collision.link(&jit);
659
660         jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
661         
662         // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
663         // around isn't super common so we could, for example, recompute the mask from the difference between
664         // the table and index. But before we do that we should probably make it easier to multiply and
665         // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
666         // to have a power-of-2 size.
667         jit.and32(maskGPR, maskedHashGPR);
668         jit.jump().linkTo(loop, &jit);
669         
670         if (allocator.didReuseRegisters()) {
671             myFailAndIgnore.link(&jit);
672             allocator.restoreReusedRegistersByPopping(jit, preservedState);
673             state.failAndIgnore.append(jit.jump());
674             
675             myFallThrough.link(&jit);
676             allocator.restoreReusedRegistersByPopping(jit, preservedState);
677             fallThrough.append(jit.jump());
678         } else {
679             state.failAndIgnore.append(myFailAndIgnore);
680             fallThrough.append(myFallThrough);
681         }
682         return;
683     }
684
685     default: {
686         if (viaProxy()) {
687             fallThrough.append(
688                 jit.branch8(
689                     CCallHelpers::NotEqual,
690                     CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
691                     CCallHelpers::TrustedImm32(PureForwardingProxyType)));
692
693             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
694
695             fallThrough.append(
696                 jit.branchStructure(
697                     CCallHelpers::NotEqual,
698                     CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
699                     structure()));
700         } else {
701             fallThrough.append(
702                 jit.branchStructure(
703                     CCallHelpers::NotEqual,
704                     CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
705                     structure()));
706         }
707         break;
708     } };
709
710     generate(state);
711 }
712
713 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
714 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
715 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
716 #define EABI_32BIT_DUMMY_ARG      CCallHelpers::TrustedImm32(0),
717 #else
718 #define EABI_32BIT_DUMMY_ARG
719 #endif
720
721 void AccessCase::generate(AccessGenerationState& state)
722 {
723     if (verbose)
724         dataLog("Generating code for: ", *this, "\n");
725     
726     CCallHelpers& jit = *state.jit;
727     VM& vm = *jit.vm();
728     CodeBlock* codeBlock = jit.codeBlock();
729     StructureStubInfo& stubInfo = *state.stubInfo;
730     const Identifier& ident = *state.ident;
731     JSValueRegs valueRegs = state.valueRegs;
732     GPRReg baseGPR = state.baseGPR;
733     GPRReg scratchGPR = state.scratchGPR;
734
735     ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
736
737     if ((structure() && structure()->needImpurePropertyWatchpoint())
738         || m_conditionSet.needImpurePropertyWatchpoint())
739         vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
740
741     if (additionalSet())
742         additionalSet()->add(state.addWatchpoint());
743
744     for (const ObjectPropertyCondition& condition : m_conditionSet) {
745         Structure* structure = condition.object()->structure();
746
747         if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
748             structure->addTransitionWatchpoint(state.addWatchpoint(condition));
749             continue;
750         }
751
752         if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
753             dataLog("This condition is no longer met: ", condition, "\n");
754             RELEASE_ASSERT_NOT_REACHED();
755         }
756
757         // We will emit code that has a weak reference that isn't otherwise listed anywhere.
758         state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
759         
760         jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
761         state.failAndRepatch.append(
762             jit.branchStructure(
763                 CCallHelpers::NotEqual,
764                 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
765                 structure));
766     }
767
768     switch (m_type) {
769     case InHit:
770     case InMiss:
771         jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
772         state.succeed();
773         return;
774
775     case Miss:
776         jit.moveTrustedValue(jsUndefined(), valueRegs);
777         state.succeed();
778         return;
779
780     case Load:
781     case GetGetter:
782     case Getter:
783     case Setter:
784     case CustomValueGetter:
785     case CustomAccessorGetter:
786     case CustomValueSetter:
787     case CustomAccessorSetter: {
788         if (isValidOffset(m_offset)) {
789             Structure* currStructure;
790             if (m_conditionSet.isEmpty())
791                 currStructure = structure();
792             else
793                 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
794             currStructure->startWatchingPropertyForReplacements(vm, offset());
795         }
796
797         GPRReg baseForGetGPR;
798         if (viaProxy()) {
799             baseForGetGPR = valueRegs.payloadGPR();
800             jit.loadPtr(
801                 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
802                 baseForGetGPR);
803         } else
804             baseForGetGPR = baseGPR;
805
806         GPRReg baseForAccessGPR;
807         if (!m_conditionSet.isEmpty()) {
808             jit.move(
809                 CCallHelpers::TrustedImmPtr(alternateBase()),
810                 scratchGPR);
811             baseForAccessGPR = scratchGPR;
812         } else
813             baseForAccessGPR = baseForGetGPR;
814
815         GPRReg loadedValueGPR = InvalidGPRReg;
816         if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
817             if (m_type == Load || m_type == GetGetter)
818                 loadedValueGPR = valueRegs.payloadGPR();
819             else
820                 loadedValueGPR = scratchGPR;
821
822             GPRReg storageGPR;
823             if (isInlineOffset(m_offset))
824                 storageGPR = baseForAccessGPR;
825             else {
826                 jit.loadPtr(
827                     CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
828                     loadedValueGPR);
829                 storageGPR = loadedValueGPR;
830             }
831
832 #if USE(JSVALUE64)
833             jit.load64(
834                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
835 #else
836             if (m_type == Load || m_type == GetGetter) {
837                 jit.load32(
838                     CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
839                     valueRegs.tagGPR());
840             }
841             jit.load32(
842                 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
843                 loadedValueGPR);
844 #endif
845         }
846
847         if (m_type == Load || m_type == GetGetter) {
848             state.succeed();
849             return;
850         }
851
852         // Stuff for custom getters/setters.
853         CCallHelpers::Call operationCall;
854
855         // Stuff for JS getters/setters.
856         CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
857         CCallHelpers::Call fastPathCall;
858         CCallHelpers::Call slowPathCall;
859
860         CCallHelpers::Jump success;
861         CCallHelpers::Jump fail;
862
863         // This also does the necessary calculations of whether or not we're an
864         // exception handling call site.
865         state.preserveLiveRegistersToStackForCall();
866
867         jit.store32(
868             CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
869             CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
870
871         if (m_type == Getter || m_type == Setter) {
872             // Create a JS call using a JS call inline cache. Assume that:
873             //
874             // - SP is aligned and represents the extent of the calling compiler's stack usage.
875             //
876             // - FP is set correctly (i.e. it points to the caller's call frame header).
877             //
878             // - SP - FP is an aligned difference.
879             //
880             // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
881             //   code.
882             //
883             // Therefore, we temporarily grow the stack for the purpose of the call and then
884             // shrink it after.
885
886             RELEASE_ASSERT(!m_rareData->callLinkInfo);
887             m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
888             
889             // FIXME: If we generated a polymorphic call stub that jumped back to the getter
890             // stub, which then jumped back to the main code, then we'd have a reachability
891             // situation that the GC doesn't know about. The GC would ensure that the polymorphic
892             // call stub stayed alive, and it would ensure that the main code stayed alive, but
893             // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
894             // be GC objects, and then we'd be able to say that the polymorphic call stub has a
895             // reference to the getter stub.
896             // https://bugs.webkit.org/show_bug.cgi?id=148914
897             m_rareData->callLinkInfo->disallowStubs();
898             
899             m_rareData->callLinkInfo->setUpCall(
900                 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
901
902             CCallHelpers::JumpList done;
903
904             // There is a "this" argument.
905             unsigned numberOfParameters = 1;
906             // ... and a value argument if we're calling a setter.
907             if (m_type == Setter)
908                 numberOfParameters++;
909
910             // Get the accessor; if there ain't one then the result is jsUndefined().
911             if (m_type == Setter) {
912                 jit.loadPtr(
913                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
914                     loadedValueGPR);
915             } else {
916                 jit.loadPtr(
917                     CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
918                     loadedValueGPR);
919             }
920
921             CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
922                 CCallHelpers::Zero, loadedValueGPR);
923
924             unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
925
926             unsigned numberOfBytesForCall =
927                 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
928
929             unsigned alignedNumberOfBytesForCall =
930                 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
931
932             jit.subPtr(
933                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
934                 CCallHelpers::stackPointerRegister);
935
936             CCallHelpers::Address calleeFrame = CCallHelpers::Address(
937                 CCallHelpers::stackPointerRegister,
938                 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
939
940             jit.store32(
941                 CCallHelpers::TrustedImm32(numberOfParameters),
942                 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
943
944             jit.storeCell(
945                 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
946
947             jit.storeCell(
948                 baseForGetGPR,
949                 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
950
951             if (m_type == Setter) {
952                 jit.storeValue(
953                     valueRegs,
954                     calleeFrame.withOffset(
955                         virtualRegisterForArgument(1).offset() * sizeof(Register)));
956             }
957
958             CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
959                 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
960                 CCallHelpers::TrustedImmPtr(0));
961
962             fastPathCall = jit.nearCall();
963             if (m_type == Getter)
964                 jit.setupResults(valueRegs);
965             done.append(jit.jump());
966
967             slowCase.link(&jit);
968             jit.move(loadedValueGPR, GPRInfo::regT0);
969 #if USE(JSVALUE32_64)
970             // We *always* know that the getter/setter, if non-null, is a cell.
971             jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
972 #endif
973             jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
974             slowPathCall = jit.nearCall();
975             if (m_type == Getter)
976                 jit.setupResults(valueRegs);
977             done.append(jit.jump());
978
979             returnUndefined.link(&jit);
980             if (m_type == Getter)
981                 jit.moveTrustedValue(jsUndefined(), valueRegs);
982
983             done.link(&jit);
984
985             jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
986                 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
987             state.restoreLiveRegistersFromStackForCall(isGetter());
988
989             jit.addLinkTask(
990                 [=, &vm] (LinkBuffer& linkBuffer) {
991                     m_rareData->callLinkInfo->setCallLocations(
992                         linkBuffer.locationOfNearCall(slowPathCall),
993                         linkBuffer.locationOf(addressOfLinkFunctionCheck),
994                         linkBuffer.locationOfNearCall(fastPathCall));
995
996                     linkBuffer.link(
997                         slowPathCall,
998                         CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
999                 });
1000         } else {
1001             // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1002             // hard to track if someone did spillage or not, so we just assume that we always need
1003             // to make some space here.
1004             jit.makeSpaceOnStackForCCall();
1005
1006             // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1007             // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1008             // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1009             GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1010 #if USE(JSVALUE64)
1011             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1012                 jit.setupArgumentsWithExecState(
1013                     baseForCustomValue,
1014                     CCallHelpers::TrustedImmPtr(ident.impl()));
1015             } else
1016                 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1017 #else
1018             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1019                 jit.setupArgumentsWithExecState(
1020                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1021                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1022                     CCallHelpers::TrustedImmPtr(ident.impl()));
1023             } else {
1024                 jit.setupArgumentsWithExecState(
1025                     EABI_32BIT_DUMMY_ARG baseForCustomValue,
1026                     CCallHelpers::TrustedImm32(JSValue::CellTag),
1027                     valueRegs.payloadGPR(), valueRegs.tagGPR());
1028             }
1029 #endif
1030             jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1031
1032             operationCall = jit.call();
1033             jit.addLinkTask(
1034                 [=] (LinkBuffer& linkBuffer) {
1035                     linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1036                 });
1037
1038             if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1039                 jit.setupResults(valueRegs);
1040             jit.reclaimSpaceOnStackForCCall();
1041
1042             CCallHelpers::Jump noException =
1043                 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1044
1045             state.restoreLiveRegistersFromStackForCallWithThrownException();
1046             state.emitExplicitExceptionHandler();
1047         
1048             noException.link(&jit);
1049             state.restoreLiveRegistersFromStackForCall(isGetter());
1050         }
1051         state.succeed();
1052         return;
1053     }
1054
1055     case Replace: {
1056         if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1057             if (verbose)
1058                 dataLog("Have type: ", type->descriptor(), "\n");
1059             state.failAndRepatch.append(
1060                 jit.branchIfNotType(
1061                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1062         } else if (verbose)
1063             dataLog("Don't have type.\n");
1064         
1065         if (isInlineOffset(m_offset)) {
1066             jit.storeValue(
1067                 valueRegs,
1068                 CCallHelpers::Address(
1069                     baseGPR,
1070                     JSObject::offsetOfInlineStorage() +
1071                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1072         } else {
1073             jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1074             jit.storeValue(
1075                 valueRegs,
1076                 CCallHelpers::Address(
1077                     scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1078         }
1079         state.succeed();
1080         return;
1081     }
1082
1083     case Transition: {
1084         // AccessCase::transition() should have returned null if this wasn't true.
1085         RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1086
1087         if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1088             if (verbose)
1089                 dataLog("Have type: ", type->descriptor(), "\n");
1090             state.failAndRepatch.append(
1091                 jit.branchIfNotType(
1092                     valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1093         } else if (verbose)
1094             dataLog("Don't have type.\n");
1095         
1096         bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1097         bool reallocating = allocating && structure()->outOfLineCapacity();
1098         bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1099
1100         ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1101         allocator.lock(baseGPR);
1102 #if USE(JSVALUE32_64)
1103         allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1104 #endif
1105         allocator.lock(valueRegs);
1106         allocator.lock(scratchGPR);
1107
1108         GPRReg scratchGPR2 = InvalidGPRReg;
1109         GPRReg scratchGPR3 = InvalidGPRReg;
1110         if (allocatingInline) {
1111             scratchGPR2 = allocator.allocateScratchGPR();
1112             scratchGPR3 = allocator.allocateScratchGPR();
1113         }
1114
1115         ScratchRegisterAllocator::PreservedState preservedState =
1116             allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1117         
1118         CCallHelpers::JumpList slowPath;
1119
1120         ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1121
1122         if (allocating) {
1123             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1124             
1125             if (allocatingInline) {
1126                 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1127
1128                 if (!reallocating) {
1129                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1130                     slowPath.append(
1131                         jit.branchSubPtr(
1132                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1133                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1134                     jit.negPtr(scratchGPR);
1135                     jit.addPtr(
1136                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1137                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1138                 } else {
1139                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1140                     // already had out-of-line property storage).
1141                     size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1142                     ASSERT(newSize > oldSize);
1143             
1144                     jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1145                     jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1146                     slowPath.append(
1147                         jit.branchSubPtr(
1148                             CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1149                     jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1150                     jit.negPtr(scratchGPR);
1151                     jit.addPtr(
1152                         CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1153                     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1154                     // We have scratchGPR = new storage, scratchGPR3 = old storage,
1155                     // scratchGPR2 = available
1156                     for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1157                         jit.loadPtr(
1158                             CCallHelpers::Address(
1159                                 scratchGPR3,
1160                                 -static_cast<ptrdiff_t>(
1161                                     offset + sizeof(JSValue) + sizeof(void*))),
1162                             scratchGPR2);
1163                         jit.storePtr(
1164                             scratchGPR2,
1165                             CCallHelpers::Address(
1166                                 scratchGPR,
1167                                 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1168                     }
1169                 }
1170             } else {
1171                 // Handle the case where we are allocating out-of-line using an operation.
1172                 RegisterSet extraRegistersToPreserve;
1173                 extraRegistersToPreserve.set(baseGPR);
1174                 extraRegistersToPreserve.set(valueRegs);
1175                 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1176                 
1177                 jit.store32(
1178                     CCallHelpers::TrustedImm32(
1179                         state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1180                     CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1181                 
1182                 jit.makeSpaceOnStackForCCall();
1183                 
1184                 if (!reallocating) {
1185                     jit.setupArgumentsWithExecState(baseGPR);
1186                     
1187                     CCallHelpers::Call operationCall = jit.call();
1188                     jit.addLinkTask(
1189                         [=] (LinkBuffer& linkBuffer) {
1190                             linkBuffer.link(
1191                                 operationCall,
1192                                 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1193                         });
1194                 } else {
1195                     // Handle the case where we are reallocating (i.e. the old structure/butterfly
1196                     // already had out-of-line property storage).
1197                     jit.setupArgumentsWithExecState(
1198                         baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1199                     
1200                     CCallHelpers::Call operationCall = jit.call();
1201                     jit.addLinkTask(
1202                         [=] (LinkBuffer& linkBuffer) {
1203                             linkBuffer.link(
1204                                 operationCall,
1205                                 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1206                         });
1207                 }
1208                 
1209                 jit.reclaimSpaceOnStackForCCall();
1210                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1211                 
1212                 CCallHelpers::Jump noException =
1213                     jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1214                 
1215                 state.restoreLiveRegistersFromStackForCallWithThrownException();
1216                 state.emitExplicitExceptionHandler();
1217                 
1218                 noException.link(&jit);
1219                 state.restoreLiveRegistersFromStackForCall();
1220             }
1221         }
1222
1223         if (isInlineOffset(m_offset)) {
1224             jit.storeValue(
1225                 valueRegs,
1226                 CCallHelpers::Address(
1227                     baseGPR,
1228                     JSObject::offsetOfInlineStorage() +
1229                     offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1230         } else {
1231             if (!allocating)
1232                 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1233             jit.storeValue(
1234                 valueRegs,
1235                 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1236         }
1237         
1238         // If we had allocated using an operation then we would have already executed the store
1239         // barrier and we would have already stored the butterfly into the object.
1240         if (allocatingInline) {
1241             CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1242             WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1243             jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1244             slowPath.append(
1245                 jit.branch32(
1246                     CCallHelpers::AboveOrEqual, scratchGPR2,
1247                     CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1248             
1249             jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1250             jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1251             
1252             jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1253             // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1254             jit.storePtr(
1255                 baseGPR,
1256                 CCallHelpers::BaseIndex(
1257                     scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1258                     static_cast<int32_t>(-sizeof(void*))));
1259             ownerIsRememberedOrInEden.link(&jit);
1260             
1261             // We set the new butterfly and the structure last. Doing it this way ensures that
1262             // whatever we had done up to this point is forgotten if we choose to branch to slow
1263             // path.
1264             
1265             jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1266         }
1267         
1268         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1269         jit.store32(
1270             CCallHelpers::TrustedImm32(structureBits),
1271             CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1272
1273         allocator.restoreReusedRegistersByPopping(jit, preservedState);
1274         state.succeed();
1275         
1276         // We will have a slow path if we were allocating without the help of an operation.
1277         if (allocatingInline) {
1278             if (allocator.didReuseRegisters()) {
1279                 slowPath.link(&jit);
1280                 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1281                 state.failAndIgnore.append(jit.jump());
1282             } else
1283                 state.failAndIgnore.append(slowPath);
1284         } else
1285             RELEASE_ASSERT(slowPath.empty());
1286         return;
1287     }
1288
1289     case ArrayLength: {
1290         jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1291         jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1292         state.failAndIgnore.append(
1293             jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1294         jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1295         state.succeed();
1296         return;
1297     }
1298
1299     case StringLength: {
1300         jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1301         jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1302         state.succeed();
1303         return;
1304     }
1305         
1306     case IntrinsicGetter: {
1307         RELEASE_ASSERT(isValidOffset(offset()));
1308
1309         // We need to ensure the getter value does not move from under us. Note that GetterSetters
1310         // are immutable so we just need to watch the property not any value inside it.
1311         Structure* currStructure;
1312         if (m_conditionSet.isEmpty())
1313             currStructure = structure();
1314         else
1315             currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1316         currStructure->startWatchingPropertyForReplacements(vm, offset());
1317
1318         emitIntrinsicGetter(state);
1319         return;
1320     }
1321
1322     case DirectArgumentsLength:
1323     case ScopedArgumentsLength:
1324     case MegamorphicLoad:
1325         // These need to be handled by generateWithGuard(), since the guard is part of the
1326         // algorithm. We can be sure that nobody will call generate() directly for these since they
1327         // are not guarded by structure checks.
1328         RELEASE_ASSERT_NOT_REACHED();
1329     }
1330     
1331     RELEASE_ASSERT_NOT_REACHED();
1332 }
1333
1334 PolymorphicAccess::PolymorphicAccess() { }
1335 PolymorphicAccess::~PolymorphicAccess() { }
1336
1337 AccessGenerationResult PolymorphicAccess::regenerateWithCases(
1338     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1339     Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1340 {
1341     // This method will add the originalCasesToAdd to the list one at a time while preserving the
1342     // invariants:
1343     // - If a newly added case canReplace() any existing case, then the existing case is removed before
1344     //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
1345     //   can be removed via the canReplace() rule.
1346     // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1347     //   cascade through the cases in reverse order, you will get the most recent cases first.
1348     // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1349     //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1350     //   add more things after failure.
1351     
1352     // First ensure that the originalCasesToAdd doesn't contain duplicates.
1353     Vector<std::unique_ptr<AccessCase>> casesToAdd;
1354     for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1355         std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1356
1357         // Add it only if it is not replaced by the subsequent cases in the list.
1358         bool found = false;
1359         for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1360             if (originalCasesToAdd[j]->canReplace(*myCase)) {
1361                 found = true;
1362                 break;
1363             }
1364         }
1365
1366         if (found)
1367             continue;
1368         
1369         casesToAdd.append(WTFMove(myCase));
1370     }
1371
1372     if (verbose)
1373         dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1374
1375     // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1376     // new stub that will be identical to the old one. Returning null should tell the caller to just
1377     // keep doing what they were doing before.
1378     if (casesToAdd.isEmpty())
1379         return AccessGenerationResult::MadeNoChanges;
1380
1381     // Now construct the list of cases as they should appear if we are successful. This means putting
1382     // all of the previous cases in this list in order but excluding those that can be replaced, and
1383     // then adding the new cases.
1384     ListType newCases;
1385     for (auto& oldCase : m_list) {
1386         // Ignore old cases that cannot possibly succeed anymore.
1387         if (!oldCase->couldStillSucceed())
1388             continue;
1389
1390         // Figure out if this is replaced by any new cases.
1391         bool found = false;
1392         for (auto& caseToAdd : casesToAdd) {
1393             if (caseToAdd->canReplace(*oldCase)) {
1394                 found = true;
1395                 break;
1396             }
1397         }
1398         if (found)
1399             continue;
1400         
1401         newCases.append(oldCase->clone());
1402     }
1403     for (auto& caseToAdd : casesToAdd)
1404         newCases.append(WTFMove(caseToAdd));
1405
1406     if (verbose)
1407         dataLog("newCases: ", listDump(newCases), "\n");
1408     
1409     // See if we are close to having too many cases and if some of those cases can be subsumed by a
1410     // megamorphic load.
1411     if (newCases.size() >= Options::maxAccessVariantListSize()) {
1412         unsigned numSelfLoads = 0;
1413         for (auto& newCase : newCases) {
1414             if (newCase->canBeReplacedByMegamorphicLoad())
1415                 numSelfLoads++;
1416         }
1417         
1418         if (numSelfLoads >= Options::megamorphicLoadCost()) {
1419             if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1420                 newCases.removeAllMatching(
1421                     [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1422                         return newCase->canBeReplacedByMegamorphicLoad();
1423                     });
1424                 
1425                 newCases.append(WTFMove(mega));
1426             }
1427         }
1428     }
1429
1430     if (newCases.size() > Options::maxAccessVariantListSize()) {
1431         if (verbose)
1432             dataLog("Too many cases.\n");
1433         return AccessGenerationResult::GaveUp;
1434     }
1435
1436     MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
1437     if (!result)
1438         return AccessGenerationResult::GaveUp;
1439
1440     m_list = WTFMove(newCases);
1441     return result;
1442 }
1443
1444 AccessGenerationResult PolymorphicAccess::regenerateWithCase(
1445     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1446     std::unique_ptr<AccessCase> newAccess)
1447 {
1448     Vector<std::unique_ptr<AccessCase>> newAccesses;
1449     newAccesses.append(WTFMove(newAccess));
1450     return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1451 }
1452
1453 bool PolymorphicAccess::visitWeak(VM& vm) const
1454 {
1455     for (unsigned i = 0; i < size(); ++i) {
1456         if (!at(i).visitWeak(vm))
1457             return false;
1458     }
1459     if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1460         for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1461             if (!Heap::isMarked(weakReference.get()))
1462                 return false;
1463         }
1464     }
1465     return true;
1466 }
1467
1468 void PolymorphicAccess::dump(PrintStream& out) const
1469 {
1470     out.print(RawPointer(this), ":[");
1471     CommaPrinter comma;
1472     for (auto& entry : m_list)
1473         out.print(comma, *entry);
1474     out.print("]");
1475 }
1476
1477 MacroAssemblerCodePtr PolymorphicAccess::regenerate(
1478     VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1479     PolymorphicAccess::ListType& cases)
1480 {
1481     if (verbose)
1482         dataLog("Generating code for cases: ", listDump(cases), "\n");
1483     
1484     AccessGenerationState state;
1485
1486     state.access = this;
1487     state.stubInfo = &stubInfo;
1488     state.ident = &ident;
1489     
1490     state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1491     state.valueRegs = JSValueRegs(
1492 #if USE(JSVALUE32_64)
1493         static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1494 #endif
1495         static_cast<GPRReg>(stubInfo.patch.valueGPR));
1496
1497     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1498     state.allocator = &allocator;
1499     allocator.lock(state.baseGPR);
1500     allocator.lock(state.valueRegs);
1501 #if USE(JSVALUE32_64)
1502     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1503 #endif
1504
1505     state.scratchGPR = allocator.allocateScratchGPR();
1506     
1507     CCallHelpers jit(&vm, codeBlock);
1508     state.jit = &jit;
1509
1510     state.preservedReusedRegisterState =
1511         allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1512
1513     bool allGuardedByStructureCheck = true;
1514     bool hasJSGetterSetterCall = false;
1515     for (auto& entry : cases) {
1516         allGuardedByStructureCheck &= entry->guardedByStructureCheck();
1517         if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
1518             hasJSGetterSetterCall = true;
1519     }
1520
1521     if (cases.isEmpty()) {
1522         // This is super unlikely, but we make it legal anyway.
1523         state.failAndRepatch.append(jit.jump());
1524     } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1525         // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1526         // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1527         // one case.
1528         CCallHelpers::JumpList fallThrough;
1529
1530         // Cascade through the list, preferring newer entries.
1531         for (unsigned i = cases.size(); i--;) {
1532             fallThrough.link(&jit);
1533             cases[i]->generateWithGuard(state, fallThrough);
1534         }
1535         state.failAndRepatch.append(fallThrough);
1536     } else {
1537         jit.load32(
1538             CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1539             state.scratchGPR);
1540         
1541         Vector<int64_t> caseValues(cases.size());
1542         for (unsigned i = 0; i < cases.size(); ++i)
1543             caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1544         
1545         BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1546         while (binarySwitch.advance(jit))
1547             cases[binarySwitch.caseIndex()]->generate(state);
1548         state.failAndRepatch.append(binarySwitch.fallThrough());
1549     }
1550
1551     if (!state.failAndIgnore.empty()) {
1552         state.failAndIgnore.link(&jit);
1553         
1554         // Make sure that the inline cache optimization code knows that we are taking slow path because
1555         // of something that isn't patchable. The slow path will decrement "countdown" and will only
1556         // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1557         // that the slow path does not try to patch.
1558         jit.load8(&stubInfo.countdown, state.scratchGPR);
1559         jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1560         jit.store8(state.scratchGPR, &stubInfo.countdown);
1561     }
1562
1563     CCallHelpers::JumpList failure;
1564     if (allocator.didReuseRegisters()) {
1565         state.failAndRepatch.link(&jit);
1566         state.restoreScratch();
1567     } else
1568         failure = state.failAndRepatch;
1569     failure.append(jit.jump());
1570
1571     CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1572     CallSiteIndex callSiteIndexForExceptionHandling;
1573     if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1574         // Emit the exception handler.
1575         // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1576         // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
1577         // their own exception handling logic that doesn't go through genericUnwind.
1578         MacroAssembler::Label makeshiftCatchHandler = jit.label();
1579
1580         int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1581         stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1582         stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1583
1584         jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1585         jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1586
1587         state.restoreLiveRegistersFromStackForCallWithThrownException();
1588         state.restoreScratch();
1589         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1590
1591         HandlerInfo oldHandler = state.originalExceptionHandler();
1592         CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1593         jit.addLinkTask(
1594             [=] (LinkBuffer& linkBuffer) {
1595                 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1596
1597                 HandlerInfo handlerToRegister = oldHandler;
1598                 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1599                 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1600                 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1601                 codeBlock->appendExceptionHandler(handlerToRegister);
1602             });
1603
1604         // We set these to indicate to the stub to remove itself from the CodeBlock's
1605         // exception handler table when it is deallocated.
1606         codeBlockThatOwnsExceptionHandlers = codeBlock;
1607         ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1608         callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1609     }
1610
1611     LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1612     if (linkBuffer.didFailToAllocate()) {
1613         if (verbose)
1614             dataLog("Did fail to allocate.\n");
1615         return MacroAssemblerCodePtr();
1616     }
1617
1618     CodeLocationLabel successLabel =
1619         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1620         
1621     linkBuffer.link(state.success, successLabel);
1622
1623     linkBuffer.link(
1624         failure,
1625         stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1626     
1627     if (verbose)
1628         dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1629
1630     MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1631         codeBlock, linkBuffer,
1632         ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1633
1634     bool doesCalls = false;
1635     for (auto& entry : cases)
1636         doesCalls |= entry->doesCalls();
1637     
1638     m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1639     m_watchpoints = WTFMove(state.watchpoints);
1640     if (!state.weakReferences.isEmpty())
1641         m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1642     if (verbose)
1643         dataLog("Returning: ", code.code(), "\n");
1644     return code.code();
1645 }
1646
1647 void PolymorphicAccess::aboutToDie()
1648 {
1649     m_stubRoutine->aboutToDie();
1650 }
1651
1652 } // namespace JSC
1653
1654 namespace WTF {
1655
1656 using namespace JSC;
1657
1658 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1659 {
1660     switch (kind) {
1661     case AccessGenerationResult::MadeNoChanges:
1662         out.print("MadeNoChanges");
1663         return;
1664     case AccessGenerationResult::GaveUp:
1665         out.print("GaveUp");
1666         return;
1667     case AccessGenerationResult::GeneratedNewCode:
1668         out.print("GeneratedNewCode");
1669         return;
1670     }
1671     
1672     RELEASE_ASSERT_NOT_REACHED();
1673 }
1674
1675 void printInternal(PrintStream& out, AccessCase::AccessType type)
1676 {
1677     switch (type) {
1678     case AccessCase::Load:
1679         out.print("Load");
1680         return;
1681     case AccessCase::MegamorphicLoad:
1682         out.print("MegamorphicLoad");
1683         return;
1684     case AccessCase::Transition:
1685         out.print("Transition");
1686         return;
1687     case AccessCase::Replace:
1688         out.print("Replace");
1689         return;
1690     case AccessCase::Miss:
1691         out.print("Miss");
1692         return;
1693     case AccessCase::GetGetter:
1694         out.print("GetGetter");
1695         return;
1696     case AccessCase::Getter:
1697         out.print("Getter");
1698         return;
1699     case AccessCase::Setter:
1700         out.print("Setter");
1701         return;
1702     case AccessCase::CustomValueGetter:
1703         out.print("CustomValueGetter");
1704         return;
1705     case AccessCase::CustomAccessorGetter:
1706         out.print("CustomAccessorGetter");
1707         return;
1708     case AccessCase::CustomValueSetter:
1709         out.print("CustomValueSetter");
1710         return;
1711     case AccessCase::CustomAccessorSetter:
1712         out.print("CustomAccessorSetter");
1713         return;
1714     case AccessCase::IntrinsicGetter:
1715         out.print("IntrinsicGetter");
1716         return;
1717     case AccessCase::InHit:
1718         out.print("InHit");
1719         return;
1720     case AccessCase::InMiss:
1721         out.print("InMiss");
1722         return;
1723     case AccessCase::ArrayLength:
1724         out.print("ArrayLength");
1725         return;
1726     case AccessCase::StringLength:
1727         out.print("StringLength");
1728         return;
1729     case AccessCase::DirectArgumentsLength:
1730         out.print("DirectArgumentsLength");
1731         return;
1732     case AccessCase::ScopedArgumentsLength:
1733         out.print("ScopedArgumentsLength");
1734         return;
1735     }
1736
1737     RELEASE_ASSERT_NOT_REACHED();
1738 }
1739
1740 } // namespace WTF
1741
1742 #endif // ENABLE(JIT)
1743
1744