2 * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "PolymorphicAccess.h"
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
49 static const bool verbose = false;
51 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
52 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
53 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
54 #define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0),
56 #define EABI_32BIT_DUMMY_ARG
59 void AccessGenerationResult::dump(PrintStream& out) const
63 out.print(":", m_code);
66 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
68 return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
69 watchpoints, jit->codeBlock(), stubInfo, condition);
72 void AccessGenerationState::restoreScratch()
74 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
77 void AccessGenerationState::succeed()
80 success.append(jit->jump());
83 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
85 if (!m_calculatedRegistersForCallAndExceptionHandling) {
86 m_calculatedRegistersForCallAndExceptionHandling = true;
88 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
89 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
90 if (m_needsToRestoreRegistersIfException)
91 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
93 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
94 m_liveRegistersForCall.merge(extra);
95 m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
96 m_liveRegistersForCall.merge(extra);
100 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
102 calculateLiveRegistersForCallAndExceptionHandling(extra);
104 unsigned extraStackPadding = 0;
105 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
106 if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
107 RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
108 m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
111 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
113 RegisterSet dontRestore;
115 // This is the result value. We don't want to overwrite the result with what we stored to the stack.
116 // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
117 dontRestore.set(valueRegs);
119 restoreLiveRegistersFromStackForCall(dontRestore);
122 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
124 // Even if we're a getter, we don't want to ignore the result value like we normally do
125 // because the getter threw, and therefore, didn't return a value that means anything.
126 // Instead, we want to restore that register to what it was upon entering the getter
127 // inline cache. The subtlety here is if the base and the result are the same register,
128 // and the getter threw, we want OSR exit to see the original base value, not the result
129 // of the getter call.
130 RegisterSet dontRestore = liveRegistersForCall();
131 // As an optimization here, we only need to restore what is live for exception handling.
132 // We can construct the dontRestore set to accomplish this goal by having it contain only
133 // what is live for call but not live for exception handling. By ignoring things that are
134 // only live at the call but not the exception handler, we will only restore things live
135 // at the exception handler.
136 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
137 restoreLiveRegistersFromStackForCall(dontRestore);
140 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
142 unsigned extraStackPadding = 0;
143 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
146 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
148 RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
150 if (!m_calculatedCallSiteIndex) {
151 m_calculatedCallSiteIndex = true;
153 if (m_needsToRestoreRegistersIfException)
154 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
156 m_callSiteIndex = originalCallSiteIndex();
159 return m_callSiteIndex;
162 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
164 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
165 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
166 RELEASE_ASSERT(exceptionHandler);
167 return *exceptionHandler;
170 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
172 void AccessGenerationState::emitExplicitExceptionHandler()
175 jit->copyCalleeSavesToVMCalleeSavesBuffer();
176 if (needsToRestoreRegistersIfException()) {
177 // To the JIT that produces the original exception handling
178 // call site, they will expect the OSR exit to be arrived
179 // at from genericUnwind. Therefore we must model what genericUnwind
180 // does here. I.e, set callFrameForCatch and copy callee saves.
182 jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
183 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
185 // We don't need to insert a new exception handler in the table
186 // because we're doing a manual exception check here. i.e, we'll
187 // never arrive here from genericUnwind().
188 HandlerInfo originalHandler = originalExceptionHandler();
190 [=] (LinkBuffer& linkBuffer) {
191 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
194 jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
195 CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
197 [=] (LinkBuffer& linkBuffer) {
198 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
200 jit->jumpToExceptionHandler();
204 AccessCase::AccessCase()
208 std::unique_ptr<AccessCase> AccessCase::tryGet(
209 VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
210 const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
212 std::unique_ptr<AccessCase> result(new AccessCase());
214 result->m_type = type;
215 result->m_offset = offset;
216 result->m_structure.set(vm, owner, structure);
217 result->m_conditionSet = conditionSet;
219 if (viaProxy || additionalSet) {
220 result->m_rareData = std::make_unique<RareData>();
221 result->m_rareData->viaProxy = viaProxy;
222 result->m_rareData->additionalSet = additionalSet;
228 std::unique_ptr<AccessCase> AccessCase::get(
229 VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
230 const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
231 PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
233 std::unique_ptr<AccessCase> result(new AccessCase());
235 result->m_type = type;
236 result->m_offset = offset;
237 result->m_structure.set(vm, owner, structure);
238 result->m_conditionSet = conditionSet;
240 if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
241 result->m_rareData = std::make_unique<RareData>();
242 result->m_rareData->viaProxy = viaProxy;
243 result->m_rareData->additionalSet = additionalSet;
244 result->m_rareData->customAccessor.getter = customGetter;
245 result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
251 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
256 if (GPRInfo::numberOfRegisters < 9)
259 std::unique_ptr<AccessCase> result(new AccessCase());
261 result->m_type = MegamorphicLoad;
266 std::unique_ptr<AccessCase> AccessCase::replace(
267 VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
269 std::unique_ptr<AccessCase> result(new AccessCase());
271 result->m_type = Replace;
272 result->m_offset = offset;
273 result->m_structure.set(vm, owner, structure);
278 std::unique_ptr<AccessCase> AccessCase::transition(
279 VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
280 const ObjectPropertyConditionSet& conditionSet)
282 RELEASE_ASSERT(oldStructure == newStructure->previousID());
284 // Skip optimizing the case where we need a realloc, if we don't have
285 // enough registers to make it happen.
286 if (GPRInfo::numberOfRegisters < 6
287 && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
288 && oldStructure->outOfLineCapacity()) {
292 std::unique_ptr<AccessCase> result(new AccessCase());
294 result->m_type = Transition;
295 result->m_offset = offset;
296 result->m_structure.set(vm, owner, newStructure);
297 result->m_conditionSet = conditionSet;
302 std::unique_ptr<AccessCase> AccessCase::setter(
303 VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
304 const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
305 JSObject* customSlotBase)
307 std::unique_ptr<AccessCase> result(new AccessCase());
309 result->m_type = type;
310 result->m_offset = offset;
311 result->m_structure.set(vm, owner, structure);
312 result->m_conditionSet = conditionSet;
313 result->m_rareData = std::make_unique<RareData>();
314 result->m_rareData->customAccessor.setter = customSetter;
315 result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
320 std::unique_ptr<AccessCase> AccessCase::in(
321 VM& vm, JSCell* owner, AccessType type, Structure* structure,
322 const ObjectPropertyConditionSet& conditionSet)
324 std::unique_ptr<AccessCase> result(new AccessCase());
326 result->m_type = type;
327 result->m_structure.set(vm, owner, structure);
328 result->m_conditionSet = conditionSet;
333 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
335 std::unique_ptr<AccessCase> result(new AccessCase());
337 result->m_type = type;
342 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
343 VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
344 Structure* structure, const ObjectPropertyConditionSet& conditionSet)
346 std::unique_ptr<AccessCase> result(new AccessCase());
348 result->m_type = IntrinsicGetter;
349 result->m_structure.set(vm, owner, structure);
350 result->m_conditionSet = conditionSet;
351 result->m_offset = offset;
353 result->m_rareData = std::make_unique<RareData>();
354 result->m_rareData->intrinsicFunction.set(vm, owner, getter);
359 AccessCase::~AccessCase()
363 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
364 VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
366 switch (stubInfo.cacheType) {
367 case CacheType::GetByIdSelf:
369 vm, owner, Load, stubInfo.u.byIdSelf.offset,
370 stubInfo.u.byIdSelf.baseObjectStructure.get());
372 case CacheType::PutByIdReplace:
374 vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
381 std::unique_ptr<AccessCase> AccessCase::clone() const
383 std::unique_ptr<AccessCase> result(new AccessCase());
384 result->m_type = m_type;
385 result->m_offset = m_offset;
386 result->m_structure = m_structure;
387 result->m_conditionSet = m_conditionSet;
388 if (RareData* rareData = m_rareData.get()) {
389 result->m_rareData = std::make_unique<RareData>();
390 result->m_rareData->viaProxy = rareData->viaProxy;
391 result->m_rareData->additionalSet = rareData->additionalSet;
392 // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
393 result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
394 result->m_rareData->customSlotBase = rareData->customSlotBase;
395 result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
400 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
402 // It's fine to commit something that is already committed. That arises when we switch to using
403 // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
404 // because most AccessCases have no extra watchpoints anyway.
405 RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
407 Vector<WatchpointSet*, 2> result;
409 if ((structure() && structure()->needImpurePropertyWatchpoint())
410 || m_conditionSet.needImpurePropertyWatchpoint())
411 result.append(vm.ensureWatchpointSetForImpureProperty(ident));
414 result.append(additionalSet());
421 bool AccessCase::guardedByStructureCheck() const
427 case MegamorphicLoad:
430 case DirectArgumentsLength:
431 case ScopedArgumentsLength:
438 JSObject* AccessCase::alternateBase() const
440 if (customSlotBase())
441 return customSlotBase();
442 return conditionSet().slotBaseCondition().object();
445 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
450 case CustomValueGetter:
451 case CustomAccessorGetter:
452 case CustomValueSetter:
453 case CustomAccessorSetter:
456 if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
457 && structure()->couldHaveIndexingHeader()) {
459 cellsToMark->append(newStructure());
468 bool AccessCase::couldStillSucceed() const
470 return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
473 bool AccessCase::canBeReplacedByMegamorphicLoad() const
475 if (type() == MegamorphicLoad)
478 return type() == Load
480 && conditionSet().isEmpty()
482 && !customSlotBase();
485 bool AccessCase::canReplace(const AccessCase& other) const
487 // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
488 // It's fine for this to return false if it's in doubt.
491 case MegamorphicLoad:
492 return other.canBeReplacedByMegamorphicLoad();
495 case DirectArgumentsLength:
496 case ScopedArgumentsLength:
497 return other.type() == type();
499 if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
502 return structure() == other.structure();
506 void AccessCase::dump(PrintStream& out) const
508 out.print(m_type, ":(");
512 out.print(comma, m_state);
514 if (m_type == Transition)
515 out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
516 else if (m_structure)
517 out.print(comma, "structure = ", pointerDump(m_structure.get()));
519 if (isValidOffset(m_offset))
520 out.print(comma, "offset = ", m_offset);
521 if (!m_conditionSet.isEmpty())
522 out.print(comma, "conditions = ", m_conditionSet);
524 if (RareData* rareData = m_rareData.get()) {
525 if (rareData->viaProxy)
526 out.print(comma, "viaProxy = ", rareData->viaProxy);
527 if (rareData->additionalSet)
528 out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
529 if (rareData->callLinkInfo)
530 out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
531 if (rareData->customAccessor.opaque)
532 out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
533 if (rareData->customSlotBase)
534 out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
540 bool AccessCase::visitWeak(VM& vm) const
542 if (m_structure && !Heap::isMarked(m_structure.get()))
544 if (!m_conditionSet.areStillLive())
547 if (m_rareData->callLinkInfo)
548 m_rareData->callLinkInfo->visitWeak(vm);
549 if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
551 if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
557 void AccessCase::generateWithGuard(
558 AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
560 SuperSamplerScope superSamplerScope(false);
562 RELEASE_ASSERT(m_state == Committed);
565 CCallHelpers& jit = *state.jit;
567 const Identifier& ident = *state.ident;
568 StructureStubInfo& stubInfo = *state.stubInfo;
569 JSValueRegs valueRegs = state.valueRegs;
570 GPRReg baseGPR = state.baseGPR;
571 GPRReg scratchGPR = state.scratchGPR;
578 jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
581 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
584 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
592 CCallHelpers::NotEqual,
593 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
594 CCallHelpers::TrustedImm32(StringType)));
598 case DirectArgumentsLength: {
602 CCallHelpers::NotEqual,
603 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
604 CCallHelpers::TrustedImm32(DirectArgumentsType)));
608 CCallHelpers::NonZero,
609 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
611 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
612 valueRegs.payloadGPR());
613 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
618 case ScopedArgumentsLength: {
622 CCallHelpers::NotEqual,
623 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
624 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
628 CCallHelpers::NonZero,
629 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
631 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
632 valueRegs.payloadGPR());
633 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
638 case MegamorphicLoad: {
639 UniquedStringImpl* key = ident.impl();
640 unsigned hash = IdentifierRepHash::hash(key);
642 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
643 allocator.lock(baseGPR);
644 #if USE(JSVALUE32_64)
645 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
647 allocator.lock(valueRegs);
648 allocator.lock(scratchGPR);
650 GPRReg intermediateGPR = scratchGPR;
651 GPRReg maskGPR = allocator.allocateScratchGPR();
652 GPRReg maskedHashGPR = allocator.allocateScratchGPR();
653 GPRReg indexGPR = allocator.allocateScratchGPR();
654 GPRReg offsetGPR = allocator.allocateScratchGPR();
657 dataLog("baseGPR = ", baseGPR, "\n");
658 dataLog("valueRegs = ", valueRegs, "\n");
659 dataLog("scratchGPR = ", scratchGPR, "\n");
660 dataLog("intermediateGPR = ", intermediateGPR, "\n");
661 dataLog("maskGPR = ", maskGPR, "\n");
662 dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
663 dataLog("indexGPR = ", indexGPR, "\n");
664 dataLog("offsetGPR = ", offsetGPR, "\n");
667 ScratchRegisterAllocator::PreservedState preservedState =
668 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
670 CCallHelpers::JumpList myFailAndIgnore;
671 CCallHelpers::JumpList myFallThrough;
673 jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
675 CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
678 myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
680 jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
681 jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
683 CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
686 jit.move(maskGPR, maskedHashGPR);
687 jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
688 jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
689 jit.addPtr(indexGPR, intermediateGPR);
691 CCallHelpers::Label loop = jit.label();
693 jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
695 myFallThrough.append(
699 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
701 jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
702 jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
703 jit.addPtr(intermediateGPR, offsetGPR);
705 CCallHelpers::Jump collision = jit.branchPtr(
706 CCallHelpers::NotEqual,
707 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
708 CCallHelpers::TrustedImmPtr(key));
710 // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
711 // Check them and then attempt the load.
713 myFallThrough.append(
715 CCallHelpers::NonZero,
716 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
717 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
719 jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
721 jit.loadProperty(baseGPR, offsetGPR, valueRegs);
723 allocator.restoreReusedRegistersByPopping(jit, preservedState);
726 collision.link(&jit);
728 jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
730 // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
731 // around isn't super common so we could, for example, recompute the mask from the difference between
732 // the table and index. But before we do that we should probably make it easier to multiply and
733 // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
734 // to have a power-of-2 size.
735 jit.and32(maskGPR, maskedHashGPR);
736 jit.jump().linkTo(loop, &jit);
738 if (allocator.didReuseRegisters()) {
739 myFailAndIgnore.link(&jit);
740 allocator.restoreReusedRegistersByPopping(jit, preservedState);
741 state.failAndIgnore.append(jit.jump());
743 myFallThrough.link(&jit);
744 allocator.restoreReusedRegistersByPopping(jit, preservedState);
745 fallThrough.append(jit.jump());
747 state.failAndIgnore.append(myFailAndIgnore);
748 fallThrough.append(myFallThrough);
757 CCallHelpers::NotEqual,
758 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
759 CCallHelpers::TrustedImm32(PureForwardingProxyType)));
761 jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
765 CCallHelpers::NotEqual,
766 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
771 CCallHelpers::NotEqual,
772 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
781 void AccessCase::generate(AccessGenerationState& state)
783 RELEASE_ASSERT(m_state == Committed);
789 void AccessCase::generateImpl(AccessGenerationState& state)
791 SuperSamplerScope superSamplerScope(false);
793 dataLog("Generating code for: ", *this, "\n");
795 ASSERT(m_state == Generated); // We rely on the callers setting this for us.
797 CCallHelpers& jit = *state.jit;
799 CodeBlock* codeBlock = jit.codeBlock();
800 StructureStubInfo& stubInfo = *state.stubInfo;
801 const Identifier& ident = *state.ident;
802 JSValueRegs valueRegs = state.valueRegs;
803 GPRReg baseGPR = state.baseGPR;
804 GPRReg scratchGPR = state.scratchGPR;
806 ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
808 for (const ObjectPropertyCondition& condition : m_conditionSet) {
809 Structure* structure = condition.object()->structure();
811 if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
812 structure->addTransitionWatchpoint(state.addWatchpoint(condition));
816 if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
817 // The reason why this cannot happen is that we require that PolymorphicAccess calls
818 // AccessCase::generate() only after it has verified that
819 // AccessCase::couldStillSucceed() returned true.
821 dataLog("This condition is no longer met: ", condition, "\n");
822 RELEASE_ASSERT_NOT_REACHED();
825 // We will emit code that has a weak reference that isn't otherwise listed anywhere.
826 state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
828 jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
829 state.failAndRepatch.append(
831 CCallHelpers::NotEqual,
832 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
839 jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
844 jit.moveTrustedValue(jsUndefined(), valueRegs);
852 case CustomValueGetter:
853 case CustomAccessorGetter:
854 case CustomValueSetter:
855 case CustomAccessorSetter: {
856 if (isValidOffset(m_offset)) {
857 Structure* currStructure;
858 if (m_conditionSet.isEmpty())
859 currStructure = structure();
861 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
862 currStructure->startWatchingPropertyForReplacements(vm, offset());
865 GPRReg baseForGetGPR;
867 baseForGetGPR = valueRegs.payloadGPR();
869 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
872 baseForGetGPR = baseGPR;
874 GPRReg baseForAccessGPR;
875 if (!m_conditionSet.isEmpty()) {
877 CCallHelpers::TrustedImmPtr(alternateBase()),
879 baseForAccessGPR = scratchGPR;
881 baseForAccessGPR = baseForGetGPR;
883 GPRReg loadedValueGPR = InvalidGPRReg;
884 if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
885 if (m_type == Load || m_type == GetGetter)
886 loadedValueGPR = valueRegs.payloadGPR();
888 loadedValueGPR = scratchGPR;
891 if (isInlineOffset(m_offset))
892 storageGPR = baseForAccessGPR;
895 CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
897 storageGPR = loadedValueGPR;
902 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
904 if (m_type == Load || m_type == GetGetter) {
906 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
910 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
915 if (m_type == Load || m_type == GetGetter) {
920 // Stuff for custom getters/setters.
921 CCallHelpers::Call operationCall;
923 // Stuff for JS getters/setters.
924 CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
925 CCallHelpers::Call fastPathCall;
926 CCallHelpers::Call slowPathCall;
928 CCallHelpers::Jump success;
929 CCallHelpers::Jump fail;
931 // This also does the necessary calculations of whether or not we're an
932 // exception handling call site.
933 state.preserveLiveRegistersToStackForCall();
936 CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
937 CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
939 if (m_type == Getter || m_type == Setter) {
940 // Create a JS call using a JS call inline cache. Assume that:
942 // - SP is aligned and represents the extent of the calling compiler's stack usage.
944 // - FP is set correctly (i.e. it points to the caller's call frame header).
946 // - SP - FP is an aligned difference.
948 // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
951 // Therefore, we temporarily grow the stack for the purpose of the call and then
954 RELEASE_ASSERT(!m_rareData->callLinkInfo);
955 m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
957 // FIXME: If we generated a polymorphic call stub that jumped back to the getter
958 // stub, which then jumped back to the main code, then we'd have a reachability
959 // situation that the GC doesn't know about. The GC would ensure that the polymorphic
960 // call stub stayed alive, and it would ensure that the main code stayed alive, but
961 // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
962 // be GC objects, and then we'd be able to say that the polymorphic call stub has a
963 // reference to the getter stub.
964 // https://bugs.webkit.org/show_bug.cgi?id=148914
965 m_rareData->callLinkInfo->disallowStubs();
967 m_rareData->callLinkInfo->setUpCall(
968 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
970 CCallHelpers::JumpList done;
972 // There is a "this" argument.
973 unsigned numberOfParameters = 1;
974 // ... and a value argument if we're calling a setter.
975 if (m_type == Setter)
976 numberOfParameters++;
978 // Get the accessor; if there ain't one then the result is jsUndefined().
979 if (m_type == Setter) {
981 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
985 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
989 CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
990 CCallHelpers::Zero, loadedValueGPR);
992 unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
994 unsigned numberOfBytesForCall =
995 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
997 unsigned alignedNumberOfBytesForCall =
998 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
1001 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
1002 CCallHelpers::stackPointerRegister);
1004 CCallHelpers::Address calleeFrame = CCallHelpers::Address(
1005 CCallHelpers::stackPointerRegister,
1006 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
1009 CCallHelpers::TrustedImm32(numberOfParameters),
1010 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
1013 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
1017 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
1019 if (m_type == Setter) {
1022 calleeFrame.withOffset(
1023 virtualRegisterForArgument(1).offset() * sizeof(Register)));
1026 CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
1027 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
1028 CCallHelpers::TrustedImmPtr(0));
1030 fastPathCall = jit.nearCall();
1031 if (m_type == Getter)
1032 jit.setupResults(valueRegs);
1033 done.append(jit.jump());
1035 slowCase.link(&jit);
1036 jit.move(loadedValueGPR, GPRInfo::regT0);
1037 #if USE(JSVALUE32_64)
1038 // We *always* know that the getter/setter, if non-null, is a cell.
1039 jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
1041 jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
1042 slowPathCall = jit.nearCall();
1043 if (m_type == Getter)
1044 jit.setupResults(valueRegs);
1045 done.append(jit.jump());
1047 returnUndefined.link(&jit);
1048 if (m_type == Getter)
1049 jit.moveTrustedValue(jsUndefined(), valueRegs);
1053 jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1054 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1055 state.restoreLiveRegistersFromStackForCall(isGetter());
1058 [=, &vm] (LinkBuffer& linkBuffer) {
1059 m_rareData->callLinkInfo->setCallLocations(
1060 linkBuffer.locationOfNearCall(slowPathCall),
1061 linkBuffer.locationOf(addressOfLinkFunctionCheck),
1062 linkBuffer.locationOfNearCall(fastPathCall));
1066 CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1069 // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1070 // hard to track if someone did spillage or not, so we just assume that we always need
1071 // to make some space here.
1072 jit.makeSpaceOnStackForCCall();
1074 // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1075 // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1076 // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1077 GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1079 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1080 jit.setupArgumentsWithExecState(
1082 CCallHelpers::TrustedImmPtr(ident.impl()));
1084 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1086 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1087 jit.setupArgumentsWithExecState(
1088 EABI_32BIT_DUMMY_ARG baseForCustomValue,
1089 CCallHelpers::TrustedImm32(JSValue::CellTag),
1090 CCallHelpers::TrustedImmPtr(ident.impl()));
1092 jit.setupArgumentsWithExecState(
1093 EABI_32BIT_DUMMY_ARG baseForCustomValue,
1094 CCallHelpers::TrustedImm32(JSValue::CellTag),
1095 valueRegs.payloadGPR(), valueRegs.tagGPR());
1098 jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1100 operationCall = jit.call();
1102 [=] (LinkBuffer& linkBuffer) {
1103 linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1106 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1107 jit.setupResults(valueRegs);
1108 jit.reclaimSpaceOnStackForCCall();
1110 CCallHelpers::Jump noException =
1111 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1113 state.restoreLiveRegistersFromStackForCallWithThrownException();
1114 state.emitExplicitExceptionHandler();
1116 noException.link(&jit);
1117 state.restoreLiveRegistersFromStackForCall(isGetter());
1124 if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1126 dataLog("Have type: ", type->descriptor(), "\n");
1127 state.failAndRepatch.append(
1128 jit.branchIfNotType(
1129 valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1131 dataLog("Don't have type.\n");
1133 if (isInlineOffset(m_offset)) {
1136 CCallHelpers::Address(
1138 JSObject::offsetOfInlineStorage() +
1139 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1141 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1144 CCallHelpers::Address(
1145 scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1152 // AccessCase::transition() should have returned null if this wasn't true.
1153 RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1155 if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1157 dataLog("Have type: ", type->descriptor(), "\n");
1158 state.failAndRepatch.append(
1159 jit.branchIfNotType(
1160 valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
1162 dataLog("Don't have type.\n");
1164 // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1165 // exactly when this would make calls.
1166 bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1167 bool reallocating = allocating && structure()->outOfLineCapacity();
1168 bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1170 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1171 allocator.lock(baseGPR);
1172 #if USE(JSVALUE32_64)
1173 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1175 allocator.lock(valueRegs);
1176 allocator.lock(scratchGPR);
1178 GPRReg scratchGPR2 = InvalidGPRReg;
1179 GPRReg scratchGPR3 = InvalidGPRReg;
1180 if (allocatingInline) {
1181 scratchGPR2 = allocator.allocateScratchGPR();
1182 scratchGPR3 = allocator.allocateScratchGPR();
1185 ScratchRegisterAllocator::PreservedState preservedState =
1186 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1188 CCallHelpers::JumpList slowPath;
1190 ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1193 size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1195 if (allocatingInline) {
1196 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1198 if (!reallocating) {
1199 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1202 CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1203 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1204 jit.negPtr(scratchGPR);
1206 CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1207 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1209 // Handle the case where we are reallocating (i.e. the old structure/butterfly
1210 // already had out-of-line property storage).
1211 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1212 ASSERT(newSize > oldSize);
1214 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1215 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1218 CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1219 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1220 jit.negPtr(scratchGPR);
1222 CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1223 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1224 // We have scratchGPR = new storage, scratchGPR3 = old storage,
1225 // scratchGPR2 = available
1226 for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1228 CCallHelpers::Address(
1230 -static_cast<ptrdiff_t>(
1231 offset + sizeof(JSValue) + sizeof(void*))),
1235 CCallHelpers::Address(
1237 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1241 // Handle the case where we are allocating out-of-line using an operation.
1242 RegisterSet extraRegistersToPreserve;
1243 extraRegistersToPreserve.set(baseGPR);
1244 extraRegistersToPreserve.set(valueRegs);
1245 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1248 CCallHelpers::TrustedImm32(
1249 state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1250 CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1252 jit.makeSpaceOnStackForCCall();
1254 if (!reallocating) {
1255 jit.setupArgumentsWithExecState(baseGPR);
1257 CCallHelpers::Call operationCall = jit.call();
1259 [=] (LinkBuffer& linkBuffer) {
1262 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1265 // Handle the case where we are reallocating (i.e. the old structure/butterfly
1266 // already had out-of-line property storage).
1267 jit.setupArgumentsWithExecState(
1268 baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1270 CCallHelpers::Call operationCall = jit.call();
1272 [=] (LinkBuffer& linkBuffer) {
1275 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1279 jit.reclaimSpaceOnStackForCCall();
1280 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1282 CCallHelpers::Jump noException =
1283 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1285 state.restoreLiveRegistersFromStackForCallWithThrownException();
1286 state.emitExplicitExceptionHandler();
1288 noException.link(&jit);
1289 state.restoreLiveRegistersFromStackForCall();
1293 if (isInlineOffset(m_offset)) {
1296 CCallHelpers::Address(
1298 JSObject::offsetOfInlineStorage() +
1299 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1302 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1305 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1308 // If we had allocated using an operation then we would have already executed the store
1309 // barrier and we would have already stored the butterfly into the object.
1310 if (allocatingInline) {
1311 CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1312 WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1313 jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1316 CCallHelpers::AboveOrEqual, scratchGPR2,
1317 CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1319 jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1320 jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1322 jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1323 // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1326 CCallHelpers::BaseIndex(
1327 scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1328 static_cast<int32_t>(-sizeof(void*))));
1329 ownerIsRememberedOrInEden.link(&jit);
1331 // We set the new butterfly and the structure last. Doing it this way ensures that
1332 // whatever we had done up to this point is forgotten if we choose to branch to slow
1335 jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1338 uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1340 CCallHelpers::TrustedImm32(structureBits),
1341 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1343 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1346 // We will have a slow path if we were allocating without the help of an operation.
1347 if (allocatingInline) {
1348 if (allocator.didReuseRegisters()) {
1349 slowPath.link(&jit);
1350 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1351 state.failAndIgnore.append(jit.jump());
1353 state.failAndIgnore.append(slowPath);
1355 RELEASE_ASSERT(slowPath.empty());
1360 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1361 jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1362 state.failAndIgnore.append(
1363 jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1364 jit.boxInt32(scratchGPR, valueRegs);
1369 case StringLength: {
1370 jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1371 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1376 case IntrinsicGetter: {
1377 RELEASE_ASSERT(isValidOffset(offset()));
1379 // We need to ensure the getter value does not move from under us. Note that GetterSetters
1380 // are immutable so we just need to watch the property not any value inside it.
1381 Structure* currStructure;
1382 if (m_conditionSet.isEmpty())
1383 currStructure = structure();
1385 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1386 currStructure->startWatchingPropertyForReplacements(vm, offset());
1388 emitIntrinsicGetter(state);
1392 case DirectArgumentsLength:
1393 case ScopedArgumentsLength:
1394 case MegamorphicLoad:
1395 // These need to be handled by generateWithGuard(), since the guard is part of the
1396 // algorithm. We can be sure that nobody will call generate() directly for these since they
1397 // are not guarded by structure checks.
1398 RELEASE_ASSERT_NOT_REACHED();
1401 RELEASE_ASSERT_NOT_REACHED();
1404 PolymorphicAccess::PolymorphicAccess() { }
1405 PolymorphicAccess::~PolymorphicAccess() { }
1407 AccessGenerationResult PolymorphicAccess::addCases(
1408 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1409 Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1411 SuperSamplerScope superSamplerScope(false);
1413 // This method will add the originalCasesToAdd to the list one at a time while preserving the
1415 // - If a newly added case canReplace() any existing case, then the existing case is removed before
1416 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
1417 // can be removed via the canReplace() rule.
1418 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1419 // cascade through the cases in reverse order, you will get the most recent cases first.
1420 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1421 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1422 // add more things after failure.
1424 // First ensure that the originalCasesToAdd doesn't contain duplicates.
1425 Vector<std::unique_ptr<AccessCase>> casesToAdd;
1426 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1427 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1429 // Add it only if it is not replaced by the subsequent cases in the list.
1431 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1432 if (originalCasesToAdd[j]->canReplace(*myCase)) {
1441 casesToAdd.append(WTFMove(myCase));
1445 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1447 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1448 // new stub that will be identical to the old one. Returning null should tell the caller to just
1449 // keep doing what they were doing before.
1450 if (casesToAdd.isEmpty())
1451 return AccessGenerationResult::MadeNoChanges;
1453 // Now add things to the new list. Note that at this point, we will still have old cases that
1454 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
1455 for (auto& caseToAdd : casesToAdd) {
1456 commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
1457 m_list.append(WTFMove(caseToAdd));
1461 dataLog("After addCases: m_list: ", listDump(m_list), "\n");
1463 return AccessGenerationResult::Buffered;
1466 AccessGenerationResult PolymorphicAccess::addCase(
1467 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1468 std::unique_ptr<AccessCase> newAccess)
1470 Vector<std::unique_ptr<AccessCase>> newAccesses;
1471 newAccesses.append(WTFMove(newAccess));
1472 return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1475 bool PolymorphicAccess::visitWeak(VM& vm) const
1477 for (unsigned i = 0; i < size(); ++i) {
1478 if (!at(i).visitWeak(vm))
1481 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1482 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1483 if (!Heap::isMarked(weakReference.get()))
1490 void PolymorphicAccess::dump(PrintStream& out) const
1492 out.print(RawPointer(this), ":[");
1494 for (auto& entry : m_list)
1495 out.print(comma, *entry);
1499 void PolymorphicAccess::commit(
1500 VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
1501 StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
1503 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
1504 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
1505 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
1506 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
1507 // Those common kinds of JSC object accesses don't hit this case.
1509 for (WatchpointSet* set : accessCase.commit(vm, ident)) {
1510 Watchpoint* watchpoint =
1511 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
1512 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
1514 set->add(watchpoint);
1518 AccessGenerationResult PolymorphicAccess::regenerate(
1519 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
1521 SuperSamplerScope superSamplerScope(false);
1524 dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
1526 AccessGenerationState state;
1528 state.access = this;
1529 state.stubInfo = &stubInfo;
1530 state.ident = &ident;
1532 state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1533 state.valueRegs = JSValueRegs(
1534 #if USE(JSVALUE32_64)
1535 static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1537 static_cast<GPRReg>(stubInfo.patch.valueGPR));
1539 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1540 state.allocator = &allocator;
1541 allocator.lock(state.baseGPR);
1542 allocator.lock(state.valueRegs);
1543 #if USE(JSVALUE32_64)
1544 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1547 state.scratchGPR = allocator.allocateScratchGPR();
1549 CCallHelpers jit(&vm, codeBlock);
1552 state.preservedReusedRegisterState =
1553 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1555 // Regenerating is our opportunity to figure out what our list of cases should look like. We
1556 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
1557 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
1558 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
1559 // from the code of the current stub (aka previous).
1561 unsigned srcIndex = 0;
1562 unsigned dstIndex = 0;
1563 while (srcIndex < m_list.size()) {
1564 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
1566 // If the case had been generated, then we have to keep the original in m_list in case we
1567 // fail to regenerate. That case may have data structures that are used by the code that it
1568 // had generated. If the case had not been generated, then we want to remove it from m_list.
1569 bool isGenerated = someCase->state() == AccessCase::Generated;
1572 if (!someCase->couldStillSucceed())
1575 // Figure out if this is replaced by any later case.
1576 for (unsigned j = srcIndex; j < m_list.size(); ++j) {
1577 if (m_list[j]->canReplace(*someCase))
1582 cases.append(someCase->clone());
1584 cases.append(WTFMove(someCase));
1588 m_list[dstIndex++] = WTFMove(someCase);
1590 m_list.resize(dstIndex);
1593 dataLog("In regenerate: cases: ", listDump(cases), "\n");
1595 // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
1596 // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
1597 // size. It would be faster to just allow more repatching with many load cases, and avoid the
1598 // megamorphicLoad optimization, if we had infinite executable memory.
1599 if (cases.size() >= Options::maxAccessVariantListSize()) {
1600 unsigned numSelfLoads = 0;
1601 for (auto& newCase : cases) {
1602 if (newCase->canBeReplacedByMegamorphicLoad())
1606 if (numSelfLoads >= Options::megamorphicLoadCost()) {
1607 if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1608 cases.removeAllMatching(
1609 [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1610 return newCase->canBeReplacedByMegamorphicLoad();
1613 cases.append(WTFMove(mega));
1619 dataLog("Optimized cases: ", listDump(cases), "\n");
1621 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
1622 // won't change that set anymore.
1624 bool allGuardedByStructureCheck = true;
1625 bool hasJSGetterSetterCall = false;
1626 for (auto& newCase : cases) {
1627 commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
1628 allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
1629 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
1630 hasJSGetterSetterCall = true;
1633 if (cases.isEmpty()) {
1634 // This is super unlikely, but we make it legal anyway.
1635 state.failAndRepatch.append(jit.jump());
1636 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1637 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1638 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1640 CCallHelpers::JumpList fallThrough;
1642 // Cascade through the list, preferring newer entries.
1643 for (unsigned i = cases.size(); i--;) {
1644 fallThrough.link(&jit);
1645 cases[i]->generateWithGuard(state, fallThrough);
1647 state.failAndRepatch.append(fallThrough);
1650 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1653 Vector<int64_t> caseValues(cases.size());
1654 for (unsigned i = 0; i < cases.size(); ++i)
1655 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1657 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1658 while (binarySwitch.advance(jit))
1659 cases[binarySwitch.caseIndex()]->generate(state);
1660 state.failAndRepatch.append(binarySwitch.fallThrough());
1663 if (!state.failAndIgnore.empty()) {
1664 state.failAndIgnore.link(&jit);
1666 // Make sure that the inline cache optimization code knows that we are taking slow path because
1667 // of something that isn't patchable. The slow path will decrement "countdown" and will only
1668 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1669 // that the slow path does not try to patch.
1670 #if CPU(X86) || CPU(X86_64)
1671 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
1672 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
1674 jit.load8(&stubInfo.countdown, state.scratchGPR);
1675 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1676 jit.store8(state.scratchGPR, &stubInfo.countdown);
1680 CCallHelpers::JumpList failure;
1681 if (allocator.didReuseRegisters()) {
1682 state.failAndRepatch.link(&jit);
1683 state.restoreScratch();
1685 failure = state.failAndRepatch;
1686 failure.append(jit.jump());
1688 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1689 CallSiteIndex callSiteIndexForExceptionHandling;
1690 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1691 // Emit the exception handler.
1692 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1693 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
1694 // their own exception handling logic that doesn't go through genericUnwind.
1695 MacroAssembler::Label makeshiftCatchHandler = jit.label();
1697 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1698 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1699 stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1701 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1702 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1704 state.restoreLiveRegistersFromStackForCallWithThrownException();
1705 state.restoreScratch();
1706 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1708 HandlerInfo oldHandler = state.originalExceptionHandler();
1709 CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1711 [=] (LinkBuffer& linkBuffer) {
1712 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1714 HandlerInfo handlerToRegister = oldHandler;
1715 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1716 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1717 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1718 codeBlock->appendExceptionHandler(handlerToRegister);
1721 // We set these to indicate to the stub to remove itself from the CodeBlock's
1722 // exception handler table when it is deallocated.
1723 codeBlockThatOwnsExceptionHandlers = codeBlock;
1724 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1725 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1728 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1729 if (linkBuffer.didFailToAllocate()) {
1731 dataLog("Did fail to allocate.\n");
1732 return AccessGenerationResult::GaveUp;
1735 CodeLocationLabel successLabel =
1736 stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1738 linkBuffer.link(state.success, successLabel);
1742 stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1745 dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1747 MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1748 codeBlock, linkBuffer,
1749 ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1751 bool doesCalls = false;
1752 Vector<JSCell*> cellsToMark;
1753 for (auto& entry : cases)
1754 doesCalls |= entry->doesCalls(&cellsToMark);
1756 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1757 m_watchpoints = WTFMove(state.watchpoints);
1758 if (!state.weakReferences.isEmpty())
1759 m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1761 dataLog("Returning: ", code.code(), "\n");
1763 m_list = WTFMove(cases);
1765 AccessGenerationResult::Kind resultKind;
1766 if (m_list.size() >= Options::maxAccessVariantListSize())
1767 resultKind = AccessGenerationResult::GeneratedFinalCode;
1769 resultKind = AccessGenerationResult::GeneratedNewCode;
1771 return AccessGenerationResult(resultKind, code.code());
1774 void PolymorphicAccess::aboutToDie()
1777 m_stubRoutine->aboutToDie();
1784 using namespace JSC;
1786 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1789 case AccessGenerationResult::MadeNoChanges:
1790 out.print("MadeNoChanges");
1792 case AccessGenerationResult::GaveUp:
1793 out.print("GaveUp");
1795 case AccessGenerationResult::Buffered:
1796 out.print("Buffered");
1798 case AccessGenerationResult::GeneratedNewCode:
1799 out.print("GeneratedNewCode");
1801 case AccessGenerationResult::GeneratedFinalCode:
1802 out.print("GeneratedFinalCode");
1806 RELEASE_ASSERT_NOT_REACHED();
1809 void printInternal(PrintStream& out, AccessCase::AccessType type)
1812 case AccessCase::Load:
1815 case AccessCase::MegamorphicLoad:
1816 out.print("MegamorphicLoad");
1818 case AccessCase::Transition:
1819 out.print("Transition");
1821 case AccessCase::Replace:
1822 out.print("Replace");
1824 case AccessCase::Miss:
1827 case AccessCase::GetGetter:
1828 out.print("GetGetter");
1830 case AccessCase::Getter:
1831 out.print("Getter");
1833 case AccessCase::Setter:
1834 out.print("Setter");
1836 case AccessCase::CustomValueGetter:
1837 out.print("CustomValueGetter");
1839 case AccessCase::CustomAccessorGetter:
1840 out.print("CustomAccessorGetter");
1842 case AccessCase::CustomValueSetter:
1843 out.print("CustomValueSetter");
1845 case AccessCase::CustomAccessorSetter:
1846 out.print("CustomAccessorSetter");
1848 case AccessCase::IntrinsicGetter:
1849 out.print("IntrinsicGetter");
1851 case AccessCase::InHit:
1854 case AccessCase::InMiss:
1855 out.print("InMiss");
1857 case AccessCase::ArrayLength:
1858 out.print("ArrayLength");
1860 case AccessCase::StringLength:
1861 out.print("StringLength");
1863 case AccessCase::DirectArgumentsLength:
1864 out.print("DirectArgumentsLength");
1866 case AccessCase::ScopedArgumentsLength:
1867 out.print("ScopedArgumentsLength");
1871 RELEASE_ASSERT_NOT_REACHED();
1874 void printInternal(PrintStream& out, AccessCase::State state)
1877 case AccessCase::Primordial:
1878 out.print("Primordial");
1880 case AccessCase::Committed:
1881 out.print("Committed");
1883 case AccessCase::Generated:
1884 out.print("Generated");
1888 RELEASE_ASSERT_NOT_REACHED();
1893 #endif // ENABLE(JIT)