2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "PolymorphicAccess.h"
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "FullCodeOrigin.h"
36 #include "JITOperations.h"
37 #include "JSCInlines.h"
38 #include "LinkBuffer.h"
39 #include "StructureStubClearingWatchpoint.h"
40 #include "StructureStubInfo.h"
41 #include "SuperSampler.h"
42 #include <wtf/CommaPrinter.h>
43 #include <wtf/ListDump.h>
47 namespace PolymorphicAccessInternal {
48 static const bool verbose = false;
51 void AccessGenerationResult::dump(PrintStream& out) const
55 out.print(":", m_code);
58 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
60 return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61 watchpoints, jit->codeBlock(), stubInfo, condition);
64 void AccessGenerationState::restoreScratch()
66 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
69 void AccessGenerationState::succeed()
72 success.append(jit->jump());
75 const RegisterSet& AccessGenerationState::liveRegistersForCall()
77 if (!m_calculatedRegistersForCallAndExceptionHandling)
78 calculateLiveRegistersForCallAndExceptionHandling();
79 return m_liveRegistersForCall;
82 const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
84 if (!m_calculatedRegistersForCallAndExceptionHandling)
85 calculateLiveRegistersForCallAndExceptionHandling();
86 return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
89 static RegisterSet calleeSaveRegisters()
91 RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
92 result.filter(RegisterSet::registersToNotSaveForCCall());
96 const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
98 if (!m_calculatedRegistersForCallAndExceptionHandling) {
99 m_calculatedRegistersForCallAndExceptionHandling = true;
101 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
102 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
103 if (m_needsToRestoreRegistersIfException)
104 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
106 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
107 m_liveRegistersForCall.exclude(calleeSaveRegisters());
109 return m_liveRegistersForCall;
112 auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
114 RegisterSet liveRegisters = liveRegistersForCall();
115 liveRegisters.merge(extra);
117 unsigned extraStackPadding = 0;
118 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
120 WTFMove(liveRegisters),
121 numberOfStackBytesUsedForRegisterPreservation
125 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
127 // Even if we're a getter, we don't want to ignore the result value like we normally do
128 // because the getter threw, and therefore, didn't return a value that means anything.
129 // Instead, we want to restore that register to what it was upon entering the getter
130 // inline cache. The subtlety here is if the base and the result are the same register,
131 // and the getter threw, we want OSR exit to see the original base value, not the result
132 // of the getter call.
133 RegisterSet dontRestore = spillState.spilledRegisters;
134 // As an optimization here, we only need to restore what is live for exception handling.
135 // We can construct the dontRestore set to accomplish this goal by having it contain only
136 // what is live for call but not live for exception handling. By ignoring things that are
137 // only live at the call but not the exception handler, we will only restore things live
138 // at the exception handler.
139 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
140 restoreLiveRegistersFromStackForCall(spillState, dontRestore);
143 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
145 unsigned extraStackPadding = 0;
146 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
149 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
151 if (!m_calculatedRegistersForCallAndExceptionHandling)
152 calculateLiveRegistersForCallAndExceptionHandling();
154 if (!m_calculatedCallSiteIndex) {
155 m_calculatedCallSiteIndex = true;
157 if (m_needsToRestoreRegistersIfException)
158 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
160 m_callSiteIndex = originalCallSiteIndex();
163 return m_callSiteIndex;
166 const HandlerInfo& AccessGenerationState::originalExceptionHandler()
168 if (!m_calculatedRegistersForCallAndExceptionHandling)
169 calculateLiveRegistersForCallAndExceptionHandling();
171 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
172 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
173 RELEASE_ASSERT(exceptionHandler);
174 return *exceptionHandler;
177 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
179 void AccessGenerationState::emitExplicitExceptionHandler()
182 jit->copyCalleeSavesToEntryFrameCalleeSavesBuffer(m_vm.topEntryFrame);
183 if (needsToRestoreRegistersIfException()) {
184 // To the JIT that produces the original exception handling
185 // call site, they will expect the OSR exit to be arrived
186 // at from genericUnwind. Therefore we must model what genericUnwind
187 // does here. I.e, set callFrameForCatch and copy callee saves.
189 jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch());
190 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
192 // We don't need to insert a new exception handler in the table
193 // because we're doing a manual exception check here. i.e, we'll
194 // never arrive here from genericUnwind().
195 HandlerInfo originalHandler = originalExceptionHandler();
197 [=] (LinkBuffer& linkBuffer) {
198 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
201 jit->setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm), GPRInfo::callFrameRegister);
202 CCallHelpers::Call lookupExceptionHandlerCall = jit->call(NoPtrTag);
204 [=] (LinkBuffer& linkBuffer) {
205 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler, NoPtrTag);
207 jit->jumpToExceptionHandler(m_vm);
212 PolymorphicAccess::PolymorphicAccess() { }
213 PolymorphicAccess::~PolymorphicAccess() { }
215 AccessGenerationResult PolymorphicAccess::addCases(
216 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
217 const Identifier& ident, Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
219 SuperSamplerScope superSamplerScope(false);
221 // This method will add the originalCasesToAdd to the list one at a time while preserving the
223 // - If a newly added case canReplace() any existing case, then the existing case is removed before
224 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
225 // can be removed via the canReplace() rule.
226 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
227 // cascade through the cases in reverse order, you will get the most recent cases first.
228 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
229 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
230 // add more things after failure.
232 // First ensure that the originalCasesToAdd doesn't contain duplicates.
233 Vector<std::unique_ptr<AccessCase>> casesToAdd;
234 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
235 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
237 // Add it only if it is not replaced by the subsequent cases in the list.
239 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
240 if (originalCasesToAdd[j]->canReplace(*myCase)) {
249 casesToAdd.append(WTFMove(myCase));
252 if (PolymorphicAccessInternal::verbose)
253 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
255 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
256 // new stub that will be identical to the old one. Returning null should tell the caller to just
257 // keep doing what they were doing before.
258 if (casesToAdd.isEmpty())
259 return AccessGenerationResult::MadeNoChanges;
261 bool shouldReset = false;
262 AccessGenerationResult resetResult(AccessGenerationResult::ResetStubAndFireWatchpoints);
263 auto considerPolyProtoReset = [&] (Structure* a, Structure* b) {
264 if (Structure::shouldConvertToPolyProto(a, b)) {
265 // For now, we only reset if this is our first time invalidating this watchpoint.
266 // The reason we don't immediately fire this watchpoint is that we may be already
267 // watching the poly proto watchpoint, which if fired, would destroy us. We let
268 // the person handling the result to do a delayed fire.
269 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get());
270 if (a->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) {
272 resetResult.addWatchpointToFire(*a->rareData()->sharedPolyProtoWatchpoint(), StringFireDetail("Detected poly proto optimization opportunity."));
277 for (auto& caseToAdd : casesToAdd) {
278 for (auto& existingCase : m_list) {
279 Structure* a = caseToAdd->structure();
280 Structure* b = existingCase->structure();
281 considerPolyProtoReset(a, b);
284 for (unsigned i = 0; i < casesToAdd.size(); ++i) {
285 for (unsigned j = i + 1; j < casesToAdd.size(); ++j) {
286 Structure* a = casesToAdd[i]->structure();
287 Structure* b = casesToAdd[j]->structure();
288 considerPolyProtoReset(a, b);
295 // Now add things to the new list. Note that at this point, we will still have old cases that
296 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
297 for (auto& caseToAdd : casesToAdd) {
298 commit(locker, vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
299 m_list.append(WTFMove(caseToAdd));
302 if (PolymorphicAccessInternal::verbose)
303 dataLog("After addCases: m_list: ", listDump(m_list), "\n");
305 return AccessGenerationResult::Buffered;
308 AccessGenerationResult PolymorphicAccess::addCase(
309 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
310 const Identifier& ident, std::unique_ptr<AccessCase> newAccess)
312 Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
313 newAccesses.append(WTFMove(newAccess));
314 return addCases(locker, vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
317 bool PolymorphicAccess::visitWeak(VM& vm) const
319 for (unsigned i = 0; i < size(); ++i) {
320 if (!at(i).visitWeak(vm))
323 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
324 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
325 if (!Heap::isMarked(weakReference.get()))
332 bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
335 for (unsigned i = 0; i < size(); ++i)
336 result &= at(i).propagateTransitions(visitor);
340 void PolymorphicAccess::dump(PrintStream& out) const
342 out.print(RawPointer(this), ":[");
344 for (auto& entry : m_list)
345 out.print(comma, *entry);
349 void PolymorphicAccess::commit(
350 const GCSafeConcurrentJSLocker&, VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
351 StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
353 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
354 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
355 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
356 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
357 // Those common kinds of JSC object accesses don't hit this case.
359 for (WatchpointSet* set : accessCase.commit(vm, ident)) {
360 Watchpoint* watchpoint =
361 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
362 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
364 set->add(watchpoint);
368 AccessGenerationResult PolymorphicAccess::regenerate(
369 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
371 SuperSamplerScope superSamplerScope(false);
373 if (PolymorphicAccessInternal::verbose)
374 dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
376 AccessGenerationState state(vm, codeBlock->globalObject());
379 state.stubInfo = &stubInfo;
380 state.ident = &ident;
382 state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
383 state.thisGPR = static_cast<GPRReg>(stubInfo.patch.thisGPR);
384 state.valueRegs = stubInfo.valueRegs();
386 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
387 state.allocator = &allocator;
388 allocator.lock(state.baseGPR);
389 if (state.thisGPR != InvalidGPRReg)
390 allocator.lock(state.thisGPR);
391 allocator.lock(state.valueRegs);
392 #if USE(JSVALUE32_64)
393 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
396 state.scratchGPR = allocator.allocateScratchGPR();
398 CCallHelpers jit(codeBlock);
401 state.preservedReusedRegisterState =
402 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
404 // Regenerating is our opportunity to figure out what our list of cases should look like. We
405 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
406 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
407 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
408 // from the code of the current stub (aka previous).
410 unsigned srcIndex = 0;
411 unsigned dstIndex = 0;
412 while (srcIndex < m_list.size()) {
413 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
415 // If the case had been generated, then we have to keep the original in m_list in case we
416 // fail to regenerate. That case may have data structures that are used by the code that it
417 // had generated. If the case had not been generated, then we want to remove it from m_list.
418 bool isGenerated = someCase->state() == AccessCase::Generated;
421 if (!someCase->couldStillSucceed())
424 // Figure out if this is replaced by any later case.
425 for (unsigned j = srcIndex; j < m_list.size(); ++j) {
426 if (m_list[j]->canReplace(*someCase))
431 cases.append(someCase->clone());
433 cases.append(WTFMove(someCase));
437 m_list[dstIndex++] = WTFMove(someCase);
439 m_list.resize(dstIndex);
441 if (PolymorphicAccessInternal::verbose)
442 dataLog("Optimized cases: ", listDump(cases), "\n");
444 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
445 // won't change that set anymore.
447 bool allGuardedByStructureCheck = true;
448 bool hasJSGetterSetterCall = false;
449 for (auto& newCase : cases) {
450 commit(locker, vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
451 allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
452 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
453 hasJSGetterSetterCall = true;
456 if (cases.isEmpty()) {
457 // This is super unlikely, but we make it legal anyway.
458 state.failAndRepatch.append(jit.jump());
459 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
460 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
461 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
463 CCallHelpers::JumpList fallThrough;
465 // Cascade through the list, preferring newer entries.
466 for (unsigned i = cases.size(); i--;) {
467 fallThrough.link(&jit);
469 cases[i]->generateWithGuard(state, fallThrough);
471 state.failAndRepatch.append(fallThrough);
474 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
477 Vector<int64_t> caseValues(cases.size());
478 for (unsigned i = 0; i < cases.size(); ++i)
479 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
481 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
482 while (binarySwitch.advance(jit))
483 cases[binarySwitch.caseIndex()]->generate(state);
484 state.failAndRepatch.append(binarySwitch.fallThrough());
487 if (!state.failAndIgnore.empty()) {
488 state.failAndIgnore.link(&jit);
490 // Make sure that the inline cache optimization code knows that we are taking slow path because
491 // of something that isn't patchable. The slow path will decrement "countdown" and will only
492 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
493 // that the slow path does not try to patch.
494 #if CPU(X86) || CPU(X86_64)
495 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
496 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
498 jit.load8(&stubInfo.countdown, state.scratchGPR);
499 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
500 jit.store8(state.scratchGPR, &stubInfo.countdown);
504 CCallHelpers::JumpList failure;
505 if (allocator.didReuseRegisters()) {
506 state.failAndRepatch.link(&jit);
507 state.restoreScratch();
509 failure = state.failAndRepatch;
510 failure.append(jit.jump());
512 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
513 CallSiteIndex callSiteIndexForExceptionHandling;
514 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
515 // Emit the exception handler.
516 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
517 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
518 // their own exception handling logic that doesn't go through genericUnwind.
519 MacroAssembler::Label makeshiftCatchHandler = jit.label();
521 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
522 AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
523 ASSERT(!spillStateForJSGetterSetter.isEmpty());
524 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
525 stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
527 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
528 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
530 state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
531 state.restoreScratch();
532 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
534 HandlerInfo oldHandler = state.originalExceptionHandler();
535 CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
537 [=] (LinkBuffer& linkBuffer) {
538 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
540 HandlerInfo handlerToRegister = oldHandler;
541 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler, NoPtrTag);
542 handlerToRegister.start = newExceptionHandlingCallSite.bits();
543 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
544 codeBlock->appendExceptionHandler(handlerToRegister);
547 // We set these to indicate to the stub to remove itself from the CodeBlock's
548 // exception handler table when it is deallocated.
549 codeBlockThatOwnsExceptionHandlers = codeBlock;
550 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
551 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
554 LinkBuffer linkBuffer(jit, codeBlock, JITCompilationCanFail);
555 if (linkBuffer.didFailToAllocate()) {
556 if (PolymorphicAccessInternal::verbose)
557 dataLog("Did fail to allocate.\n");
558 return AccessGenerationResult::GaveUp;
561 CodeLocationLabel successLabel = stubInfo.doneLocation();
563 linkBuffer.link(state.success, successLabel);
565 linkBuffer.link(failure, stubInfo.slowPathStartLocation());
567 if (PolymorphicAccessInternal::verbose)
568 dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n");
570 MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
571 codeBlock, linkBuffer, NoPtrTag,
572 "%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data());
574 bool doesCalls = false;
575 Vector<JSCell*> cellsToMark;
576 for (auto& entry : cases)
577 doesCalls |= entry->doesCalls(&cellsToMark);
579 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
580 m_watchpoints = WTFMove(state.watchpoints);
581 if (!state.weakReferences.isEmpty())
582 m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
583 if (PolymorphicAccessInternal::verbose)
584 dataLog("Returning: ", code.code(), "\n");
586 m_list = WTFMove(cases);
588 AccessGenerationResult::Kind resultKind;
589 if (m_list.size() >= Options::maxAccessVariantListSize())
590 resultKind = AccessGenerationResult::GeneratedFinalCode;
592 resultKind = AccessGenerationResult::GeneratedNewCode;
594 return AccessGenerationResult(resultKind, code.code());
597 void PolymorphicAccess::aboutToDie()
600 m_stubRoutine->aboutToDie();
609 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
612 case AccessGenerationResult::MadeNoChanges:
613 out.print("MadeNoChanges");
615 case AccessGenerationResult::GaveUp:
618 case AccessGenerationResult::Buffered:
619 out.print("Buffered");
621 case AccessGenerationResult::GeneratedNewCode:
622 out.print("GeneratedNewCode");
624 case AccessGenerationResult::GeneratedFinalCode:
625 out.print("GeneratedFinalCode");
627 case AccessGenerationResult::ResetStubAndFireWatchpoints:
628 out.print("ResetStubAndFireWatchpoints");
632 RELEASE_ASSERT_NOT_REACHED();
635 void printInternal(PrintStream& out, AccessCase::AccessType type)
638 case AccessCase::Load:
641 case AccessCase::Transition:
642 out.print("Transition");
644 case AccessCase::Replace:
645 out.print("Replace");
647 case AccessCase::Miss:
650 case AccessCase::GetGetter:
651 out.print("GetGetter");
653 case AccessCase::Getter:
656 case AccessCase::Setter:
659 case AccessCase::CustomValueGetter:
660 out.print("CustomValueGetter");
662 case AccessCase::CustomAccessorGetter:
663 out.print("CustomAccessorGetter");
665 case AccessCase::CustomValueSetter:
666 out.print("CustomValueSetter");
668 case AccessCase::CustomAccessorSetter:
669 out.print("CustomAccessorSetter");
671 case AccessCase::IntrinsicGetter:
672 out.print("IntrinsicGetter");
674 case AccessCase::InHit:
677 case AccessCase::InMiss:
680 case AccessCase::ArrayLength:
681 out.print("ArrayLength");
683 case AccessCase::StringLength:
684 out.print("StringLength");
686 case AccessCase::DirectArgumentsLength:
687 out.print("DirectArgumentsLength");
689 case AccessCase::ScopedArgumentsLength:
690 out.print("ScopedArgumentsLength");
692 case AccessCase::ModuleNamespaceLoad:
693 out.print("ModuleNamespaceLoad");
697 RELEASE_ASSERT_NOT_REACHED();
700 void printInternal(PrintStream& out, AccessCase::State state)
703 case AccessCase::Primordial:
704 out.print("Primordial");
706 case AccessCase::Committed:
707 out.print("Committed");
709 case AccessCase::Generated:
710 out.print("Generated");
714 RELEASE_ASSERT_NOT_REACHED();
719 #endif // ENABLE(JIT)