2 * Copyright (C) 2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "AccessCase.h"
31 #include "CCallHelpers.h"
32 #include "CallLinkInfo.h"
33 #include "DOMJITGetterSetter.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
36 #include "GetterSetterAccessCase.h"
37 #include "HeapInlines.h"
38 #include "IntrinsicGetterAccessCase.h"
39 #include "JSCJSValueInlines.h"
40 #include "JSModuleEnvironment.h"
41 #include "JSModuleNamespaceObject.h"
42 #include "LinkBuffer.h"
43 #include "ModuleNamespaceAccessCase.h"
44 #include "PolymorphicAccess.h"
45 #include "ScopedArguments.h"
46 #include "ScratchRegisterAllocator.h"
47 #include "SlotVisitorInlines.h"
48 #include "StructureStubInfo.h"
49 #include "SuperSampler.h"
50 #include "ThunkGenerators.h"
54 namespace AccessCaseInternal {
55 static const bool verbose = false;
58 AccessCase::AccessCase(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, std::unique_ptr<PolyProtoAccessChain> prototypeAccessChain)
61 , m_polyProtoAccessChain(WTFMove(prototypeAccessChain))
63 m_structure.setMayBeNull(vm, owner, structure);
64 m_conditionSet = conditionSet;
67 std::unique_ptr<AccessCase> AccessCase::create(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, std::unique_ptr<PolyProtoAccessChain> prototypeAccessChain)
75 case DirectArgumentsLength:
76 case ScopedArgumentsLength:
77 case ModuleNamespaceLoad:
79 RELEASE_ASSERT(!prototypeAccessChain);
82 RELEASE_ASSERT_NOT_REACHED();
85 return std::unique_ptr<AccessCase>(new AccessCase(vm, owner, type, offset, structure, conditionSet, WTFMove(prototypeAccessChain)));
88 std::unique_ptr<AccessCase> AccessCase::create(
89 VM& vm, JSCell* owner, PropertyOffset offset, Structure* oldStructure, Structure* newStructure,
90 const ObjectPropertyConditionSet& conditionSet, std::unique_ptr<PolyProtoAccessChain> prototypeAccessChain)
92 RELEASE_ASSERT(oldStructure == newStructure->previousID());
94 // Skip optimizing the case where we need a realloc, if we don't have
95 // enough registers to make it happen.
96 if (GPRInfo::numberOfRegisters < 6
97 && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
98 && oldStructure->outOfLineCapacity()) {
102 return std::unique_ptr<AccessCase>(new AccessCase(vm, owner, Transition, offset, newStructure, conditionSet, WTFMove(prototypeAccessChain)));
105 AccessCase::~AccessCase()
109 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
110 VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
112 switch (stubInfo.cacheType) {
113 case CacheType::GetByIdSelf:
114 return ProxyableAccessCase::create(vm, owner, Load, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
116 case CacheType::PutByIdReplace:
117 return AccessCase::create(vm, owner, Replace, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
124 std::unique_ptr<AccessCase> AccessCase::clone() const
126 std::unique_ptr<AccessCase> result(new AccessCase(*this));
127 result->resetState();
131 Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
133 // It's fine to commit something that is already committed. That arises when we switch to using
134 // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
135 // because most AccessCases have no extra watchpoints anyway.
136 RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
138 Vector<WatchpointSet*, 2> result;
139 Structure* structure = this->structure();
141 if ((structure && structure->needImpurePropertyWatchpoint())
142 || m_conditionSet.needImpurePropertyWatchpoint()
143 || (m_polyProtoAccessChain && m_polyProtoAccessChain->needImpurePropertyWatchpoint()))
144 result.append(vm.ensureWatchpointSetForImpureProperty(ident));
147 result.append(additionalSet());
150 && structure->hasRareData()
151 && structure->rareData()->hasSharedPolyProtoWatchpoint()
152 && structure->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) {
153 WatchpointSet* set = structure->rareData()->sharedPolyProtoWatchpoint()->inflate();
162 bool AccessCase::guardedByStructureCheck() const
167 if (m_polyProtoAccessChain)
173 case DirectArgumentsLength:
174 case ScopedArgumentsLength:
175 case ModuleNamespaceLoad:
182 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
187 case CustomValueGetter:
188 case CustomAccessorGetter:
189 case CustomValueSetter:
190 case CustomAccessorSetter:
193 if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
194 && structure()->couldHaveIndexingHeader()) {
196 cellsToMark->append(newStructure());
205 bool AccessCase::couldStillSucceed() const
207 return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
210 bool AccessCase::canReplace(const AccessCase& other) const
212 // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
213 // It's fine for this to return false if it's in doubt.
218 case DirectArgumentsLength:
219 case ScopedArgumentsLength:
220 return other.type() == type();
221 case ModuleNamespaceLoad: {
222 if (other.type() != type())
224 auto& thisCase = this->as<ModuleNamespaceAccessCase>();
225 auto& otherCase = this->as<ModuleNamespaceAccessCase>();
226 return thisCase.moduleNamespaceObject() == otherCase.moduleNamespaceObject();
229 if (other.type() != type())
232 if (m_polyProtoAccessChain) {
233 if (!other.m_polyProtoAccessChain)
235 // This is the only check we need since PolyProtoAccessChain contains the base structure.
236 // If we ever change it to contain only the prototype chain, we'll also need to change
237 // this to check the base structure.
238 return structure() == other.structure()
239 && *m_polyProtoAccessChain == *other.m_polyProtoAccessChain;
242 if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
245 return structure() == other.structure();
249 void AccessCase::dump(PrintStream& out) const
251 out.print("\n", m_type, ":(");
255 out.print(comma, m_state);
257 if (isValidOffset(m_offset))
258 out.print(comma, "offset = ", m_offset);
259 if (!m_conditionSet.isEmpty())
260 out.print(comma, "conditions = ", m_conditionSet);
262 if (m_polyProtoAccessChain) {
263 out.print(comma, "prototype access chain = ");
264 m_polyProtoAccessChain->dump(structure(), out);
266 if (m_type == Transition)
267 out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
268 else if (m_structure)
269 out.print(comma, "structure = ", pointerDump(m_structure.get()));
272 dumpImpl(out, comma);
276 bool AccessCase::visitWeak(VM& vm) const
278 if (m_structure && !Heap::isMarked(m_structure.get()))
280 if (m_polyProtoAccessChain) {
281 for (Structure* structure : m_polyProtoAccessChain->chain()) {
282 if (!Heap::isMarked(structure))
286 if (!m_conditionSet.areStillLive())
289 auto& accessor = this->as<GetterSetterAccessCase>();
290 if (accessor.callLinkInfo())
291 accessor.callLinkInfo()->visitWeak(vm);
292 if (accessor.customSlotBase() && !Heap::isMarked(accessor.customSlotBase()))
294 } else if (type() == IntrinsicGetter) {
295 auto& intrinsic = this->as<IntrinsicGetterAccessCase>();
296 if (intrinsic.intrinsicFunction() && !Heap::isMarked(intrinsic.intrinsicFunction()))
298 } else if (type() == ModuleNamespaceLoad) {
299 auto& accessCase = this->as<ModuleNamespaceAccessCase>();
300 if (accessCase.moduleNamespaceObject() && !Heap::isMarked(accessCase.moduleNamespaceObject()))
302 if (accessCase.moduleEnvironment() && !Heap::isMarked(accessCase.moduleEnvironment()))
309 bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
314 result &= m_structure->markIfCheap(visitor);
316 if (m_polyProtoAccessChain) {
317 for (Structure* structure : m_polyProtoAccessChain->chain())
318 result &= structure->markIfCheap(visitor);
323 if (Heap::isMarkedConcurrently(m_structure->previousID()))
324 visitor.appendUnbarriered(m_structure.get());
335 void AccessCase::generateWithGuard(
336 AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
338 SuperSamplerScope superSamplerScope(false);
340 RELEASE_ASSERT(m_state == Committed);
343 CCallHelpers& jit = *state.jit;
345 JSValueRegs valueRegs = state.valueRegs;
346 GPRReg baseGPR = state.baseGPR;
347 GPRReg scratchGPR = state.scratchGPR;
354 jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeAndMiscOffset()), scratchGPR);
357 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
360 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
368 CCallHelpers::NotEqual,
369 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
370 CCallHelpers::TrustedImm32(StringType)));
374 case DirectArgumentsLength: {
378 CCallHelpers::NotEqual,
379 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
380 CCallHelpers::TrustedImm32(DirectArgumentsType)));
384 CCallHelpers::NonZero,
385 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfMappedArguments())));
387 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
388 valueRegs.payloadGPR());
389 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
394 case ScopedArgumentsLength: {
398 CCallHelpers::NotEqual,
399 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
400 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
404 CCallHelpers::NonZero,
405 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
407 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
408 valueRegs.payloadGPR());
409 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
414 case ModuleNamespaceLoad: {
415 this->as<ModuleNamespaceAccessCase>().emit(state, fallThrough);
420 if (m_polyProtoAccessChain) {
421 GPRReg baseForAccessGPR = state.scratchGPR;
422 jit.move(state.baseGPR, baseForAccessGPR);
423 m_polyProtoAccessChain->forEach(structure(), [&] (Structure* structure, bool atEnd) {
426 CCallHelpers::NotEqual,
427 CCallHelpers::Address(baseForAccessGPR, JSCell::structureIDOffset()),
430 if ((m_type == Miss || m_type == InMiss || m_type == Transition) && structure->hasPolyProto()) {
431 // For a Miss/InMiss/Transition, we must ensure we're at the end when the last item is poly proto.
432 // Transitions must do this because they need to verify there isn't a setter in the chain.
433 // Miss/InMiss need to do this to ensure there isn't a new item at the end of the chain that
436 jit.load64(MacroAssembler::Address(baseForAccessGPR, offsetRelativeToBase(knownPolyProtoOffset)), baseForAccessGPR);
437 fallThrough.append(jit.branch64(CCallHelpers::NotEqual, baseForAccessGPR, CCallHelpers::TrustedImm64(ValueNull)));
439 jit.load32(MacroAssembler::Address(baseForAccessGPR, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), baseForAccessGPR);
440 fallThrough.append(jit.branchTestPtr(CCallHelpers::NonZero, baseForAccessGPR));
444 if (structure->hasMonoProto()) {
445 JSValue prototype = structure->prototypeForLookup(state.m_globalObject);
446 RELEASE_ASSERT(prototype.isObject());
447 jit.move(CCallHelpers::TrustedImmPtr(asObject(prototype)), baseForAccessGPR);
449 RELEASE_ASSERT(structure->isObject()); // Primitives must have a stored prototype. We use prototypeForLookup for them.
451 jit.load64(MacroAssembler::Address(baseForAccessGPR, offsetRelativeToBase(knownPolyProtoOffset)), baseForAccessGPR);
452 fallThrough.append(jit.branch64(CCallHelpers::Equal, baseForAccessGPR, CCallHelpers::TrustedImm64(ValueNull)));
454 jit.load32(MacroAssembler::Address(baseForAccessGPR, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), baseForAccessGPR);
455 fallThrough.append(jit.branchTestPtr(CCallHelpers::Zero, baseForAccessGPR));
464 CCallHelpers::NotEqual,
465 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
466 CCallHelpers::TrustedImm32(PureForwardingProxyType)));
468 jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
472 CCallHelpers::NotEqual,
473 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
478 CCallHelpers::NotEqual,
479 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
489 void AccessCase::generate(AccessGenerationState& state)
491 RELEASE_ASSERT(m_state == Committed);
497 void AccessCase::generateImpl(AccessGenerationState& state)
499 SuperSamplerScope superSamplerScope(false);
500 if (AccessCaseInternal::verbose)
501 dataLog("\n\nGenerating code for: ", *this, "\n");
503 ASSERT(m_state == Generated); // We rely on the callers setting this for us.
505 CCallHelpers& jit = *state.jit;
507 CodeBlock* codeBlock = jit.codeBlock();
508 StructureStubInfo& stubInfo = *state.stubInfo;
509 const Identifier& ident = *state.ident;
510 JSValueRegs valueRegs = state.valueRegs;
511 GPRReg baseGPR = state.baseGPR;
512 GPRReg thisGPR = state.thisGPR != InvalidGPRReg ? state.thisGPR : baseGPR;
513 GPRReg scratchGPR = state.scratchGPR;
515 ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
517 for (const ObjectPropertyCondition& condition : m_conditionSet) {
518 RELEASE_ASSERT(!m_polyProtoAccessChain);
520 Structure* structure = condition.object()->structure();
522 if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
523 structure->addTransitionWatchpoint(state.addWatchpoint(condition));
527 if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
528 // The reason why this cannot happen is that we require that PolymorphicAccess calls
529 // AccessCase::generate() only after it has verified that
530 // AccessCase::couldStillSucceed() returned true.
532 dataLog("This condition is no longer met: ", condition, "\n");
533 RELEASE_ASSERT_NOT_REACHED();
536 // We will emit code that has a weak reference that isn't otherwise listed anywhere.
537 state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
539 jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
540 state.failAndRepatch.append(
542 CCallHelpers::NotEqual,
543 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
550 jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
555 jit.moveTrustedValue(jsUndefined(), valueRegs);
563 case CustomValueGetter:
564 case CustomAccessorGetter:
565 case CustomValueSetter:
566 case CustomAccessorSetter: {
567 GPRReg valueRegsPayloadGPR = valueRegs.payloadGPR();
569 if (isValidOffset(m_offset)) {
570 Structure* currStructure;
571 if (m_conditionSet.isEmpty())
572 currStructure = structure();
574 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
575 currStructure->startWatchingPropertyForReplacements(vm, offset());
578 GPRReg baseForGetGPR;
580 ASSERT(m_type != CustomValueSetter || m_type != CustomAccessorSetter); // Because setters need to not trash valueRegsPayloadGPR.
581 if (m_type == Getter || m_type == Setter)
582 baseForGetGPR = scratchGPR;
584 baseForGetGPR = valueRegsPayloadGPR;
586 ASSERT((m_type != Getter && m_type != Setter) || baseForGetGPR != baseGPR);
587 ASSERT(m_type != Setter || baseForGetGPR != valueRegsPayloadGPR);
590 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
593 baseForGetGPR = baseGPR;
595 GPRReg baseForAccessGPR;
596 if (m_polyProtoAccessChain) {
597 // This isn't pretty, but we know we got here via generateWithGuard,
598 // and it left the baseForAccess inside scratchGPR. We could re-derive the base,
599 // but it'd require emitting the same code to load the base twice.
600 baseForAccessGPR = scratchGPR;
602 if (!m_conditionSet.isEmpty()) {
604 CCallHelpers::TrustedImmPtr(alternateBase()), scratchGPR);
605 baseForAccessGPR = scratchGPR;
607 baseForAccessGPR = baseForGetGPR;
610 GPRReg loadedValueGPR = InvalidGPRReg;
611 if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
612 if (m_type == Load || m_type == GetGetter)
613 loadedValueGPR = valueRegsPayloadGPR;
615 loadedValueGPR = scratchGPR;
617 ASSERT((m_type != Getter && m_type != Setter) || loadedValueGPR != baseGPR);
618 ASSERT(m_type != Setter || loadedValueGPR != valueRegsPayloadGPR);
621 if (isInlineOffset(m_offset))
622 storageGPR = baseForAccessGPR;
625 CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
627 jit.cage(Gigacage::JSValue, loadedValueGPR);
628 storageGPR = loadedValueGPR;
633 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
635 if (m_type == Load || m_type == GetGetter) {
637 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
641 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
646 if (m_type == Load || m_type == GetGetter) {
651 if (m_type == CustomAccessorGetter && this->as<GetterSetterAccessCase>().domAttribute()) {
652 auto& access = this->as<GetterSetterAccessCase>();
653 // We do not need to emit CheckDOM operation since structure check ensures
654 // that the structure of the given base value is structure()! So all we should
655 // do is performing the CheckDOM thingy in IC compiling time here.
656 if (!structure()->classInfo()->isSubClassOf(access.domAttribute()->classInfo)) {
657 state.failAndIgnore.append(jit.jump());
661 if (Options::useDOMJIT() && access.domAttribute()->domJIT) {
662 access.emitDOMJITGetter(state, access.domAttribute()->domJIT, baseForGetGPR);
667 // Stuff for custom getters/setters.
668 CCallHelpers::Call operationCall;
670 // Stuff for JS getters/setters.
671 CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
672 CCallHelpers::Call fastPathCall;
673 CCallHelpers::Call slowPathCall;
675 // This also does the necessary calculations of whether or not we're an
676 // exception handling call site.
677 AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
679 auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
680 RegisterSet dontRestore;
681 if (callHasReturnValue) {
682 // This is the result value. We don't want to overwrite the result with what we stored to the stack.
683 // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
684 dontRestore.set(valueRegs);
686 state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
690 CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
691 CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
693 if (m_type == Getter || m_type == Setter) {
694 auto& access = this->as<GetterSetterAccessCase>();
695 ASSERT(baseGPR != loadedValueGPR);
696 ASSERT(m_type != Setter || (baseGPR != valueRegsPayloadGPR && loadedValueGPR != valueRegsPayloadGPR));
698 // Create a JS call using a JS call inline cache. Assume that:
700 // - SP is aligned and represents the extent of the calling compiler's stack usage.
702 // - FP is set correctly (i.e. it points to the caller's call frame header).
704 // - SP - FP is an aligned difference.
706 // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
709 // Therefore, we temporarily grow the stack for the purpose of the call and then
712 state.setSpillStateForJSGetterSetter(spillState);
714 RELEASE_ASSERT(!access.callLinkInfo());
715 access.m_callLinkInfo = std::make_unique<CallLinkInfo>();
717 // FIXME: If we generated a polymorphic call stub that jumped back to the getter
718 // stub, which then jumped back to the main code, then we'd have a reachability
719 // situation that the GC doesn't know about. The GC would ensure that the polymorphic
720 // call stub stayed alive, and it would ensure that the main code stayed alive, but
721 // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
722 // be GC objects, and then we'd be able to say that the polymorphic call stub has a
723 // reference to the getter stub.
724 // https://bugs.webkit.org/show_bug.cgi?id=148914
725 access.callLinkInfo()->disallowStubs();
727 access.callLinkInfo()->setUpCall(
728 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
730 CCallHelpers::JumpList done;
732 // There is a "this" argument.
733 unsigned numberOfParameters = 1;
734 // ... and a value argument if we're calling a setter.
735 if (m_type == Setter)
736 numberOfParameters++;
738 // Get the accessor; if there ain't one then the result is jsUndefined().
739 if (m_type == Setter) {
741 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
745 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
749 CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
750 CCallHelpers::Zero, loadedValueGPR);
752 unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
753 unsigned numberOfBytesForCall = numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
755 unsigned alignedNumberOfBytesForCall =
756 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
759 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
760 CCallHelpers::stackPointerRegister);
762 CCallHelpers::Address calleeFrame = CCallHelpers::Address(
763 CCallHelpers::stackPointerRegister,
764 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
767 CCallHelpers::TrustedImm32(numberOfParameters),
768 calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
771 loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
775 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
777 if (m_type == Setter) {
780 calleeFrame.withOffset(
781 virtualRegisterForArgument(1).offset() * sizeof(Register)));
784 CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
785 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
786 CCallHelpers::TrustedImmPtr(0));
788 fastPathCall = jit.nearCall();
789 if (m_type == Getter)
790 jit.setupResults(valueRegs);
791 done.append(jit.jump());
794 jit.move(loadedValueGPR, GPRInfo::regT0);
795 #if USE(JSVALUE32_64)
796 // We *always* know that the getter/setter, if non-null, is a cell.
797 jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
799 jit.move(CCallHelpers::TrustedImmPtr(access.callLinkInfo()), GPRInfo::regT2);
800 slowPathCall = jit.nearCall();
801 if (m_type == Getter)
802 jit.setupResults(valueRegs);
803 done.append(jit.jump());
805 returnUndefined.link(&jit);
806 if (m_type == Getter)
807 jit.moveTrustedValue(jsUndefined(), valueRegs);
811 jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation),
812 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
813 bool callHasReturnValue = isGetter();
814 restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
816 jit.addLinkTask([=, &vm] (LinkBuffer& linkBuffer) {
817 this->as<GetterSetterAccessCase>().callLinkInfo()->setCallLocations(
818 CodeLocationLabel(linkBuffer.locationOfNearCall(slowPathCall)),
819 CodeLocationLabel(linkBuffer.locationOf(addressOfLinkFunctionCheck)),
820 linkBuffer.locationOfNearCall(fastPathCall));
824 CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
827 ASSERT(m_type == CustomValueGetter || m_type == CustomAccessorGetter || m_type == CustomValueSetter || m_type == CustomAccessorSetter);
829 // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
830 // hard to track if someone did spillage or not, so we just assume that we always need
831 // to make some space here.
832 jit.makeSpaceOnStackForCCall();
834 // Check if it is a super access
835 GPRReg baseForCustomGetGPR = baseGPR != thisGPR ? thisGPR : baseForGetGPR;
837 // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
838 // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
839 // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
840 // FIXME: Remove this differences in custom values and custom accessors.
841 // https://bugs.webkit.org/show_bug.cgi?id=158014
842 GPRReg baseForCustom = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForCustomGetGPR;
844 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
845 jit.setupArgumentsWithExecState(
847 CCallHelpers::TrustedImmPtr(ident.impl()));
849 jit.setupArgumentsWithExecState(baseForCustom, valueRegs.gpr());
851 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
852 jit.setupArgumentsWithExecState(
853 EABI_32BIT_DUMMY_ARG baseForCustom,
854 CCallHelpers::TrustedImm32(JSValue::CellTag),
855 CCallHelpers::TrustedImmPtr(ident.impl()));
857 jit.setupArgumentsWithExecState(
858 EABI_32BIT_DUMMY_ARG baseForCustom,
859 CCallHelpers::TrustedImm32(JSValue::CellTag),
860 valueRegs.payloadGPR(), valueRegs.tagGPR());
863 jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
865 operationCall = jit.call();
866 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
867 linkBuffer.link(operationCall, FunctionPtr(this->as<GetterSetterAccessCase>().m_customAccessor.opaque));
870 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
871 jit.setupResults(valueRegs);
872 jit.reclaimSpaceOnStackForCCall();
874 CCallHelpers::Jump noException =
875 jit.emitExceptionCheck(vm, CCallHelpers::InvertedExceptionCheck);
877 state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
878 state.emitExplicitExceptionHandler();
880 noException.link(&jit);
881 bool callHasReturnValue = isGetter();
882 restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
889 if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
890 if (AccessCaseInternal::verbose)
891 dataLog("Have type: ", type->descriptor(), "\n");
892 state.failAndRepatch.append(
893 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
894 } else if (AccessCaseInternal::verbose)
895 dataLog("Don't have type.\n");
897 if (isInlineOffset(m_offset)) {
900 CCallHelpers::Address(
902 JSObject::offsetOfInlineStorage() +
903 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
905 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
908 CCallHelpers::Address(
909 scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
916 // AccessCase::transition() should have returned null if this wasn't true.
917 RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
919 if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
920 if (AccessCaseInternal::verbose)
921 dataLog("Have type: ", type->descriptor(), "\n");
922 state.failAndRepatch.append(
923 jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
924 } else if (AccessCaseInternal::verbose)
925 dataLog("Don't have type.\n");
927 // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
928 // exactly when this would make calls.
929 bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
930 bool reallocating = allocating && structure()->outOfLineCapacity();
931 bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
933 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
934 allocator.lock(baseGPR);
935 #if USE(JSVALUE32_64)
936 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
938 allocator.lock(valueRegs);
939 allocator.lock(scratchGPR);
941 GPRReg scratchGPR2 = InvalidGPRReg;
942 GPRReg scratchGPR3 = InvalidGPRReg;
943 if (allocatingInline) {
944 scratchGPR2 = allocator.allocateScratchGPR();
945 scratchGPR3 = allocator.allocateScratchGPR();
948 ScratchRegisterAllocator::PreservedState preservedState =
949 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
951 CCallHelpers::JumpList slowPath;
953 ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
956 size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
958 if (allocatingInline) {
959 MarkedAllocator* allocator = vm.jsValueGigacageAuxiliarySpace.allocatorFor(newSize);
962 // Yuck, this case would suck!
963 slowPath.append(jit.jump());
966 jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
967 jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
968 jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
970 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
971 ASSERT(newSize > oldSize);
974 // Handle the case where we are reallocating (i.e. the old structure/butterfly
975 // already had out-of-line property storage).
977 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
978 jit.cage(Gigacage::JSValue, scratchGPR3);
980 // We have scratchGPR = new storage, scratchGPR3 = old storage,
981 // scratchGPR2 = available
982 for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
984 CCallHelpers::Address(
986 -static_cast<ptrdiff_t>(
987 offset + sizeof(JSValue) + sizeof(void*))),
991 CCallHelpers::Address(
993 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
997 for (size_t offset = oldSize; offset < newSize; offset += sizeof(void*))
998 jit.storePtr(CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address(scratchGPR, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1000 // Handle the case where we are allocating out-of-line using an operation.
1001 RegisterSet extraRegistersToPreserve;
1002 extraRegistersToPreserve.set(baseGPR);
1003 extraRegistersToPreserve.set(valueRegs);
1004 AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1007 CCallHelpers::TrustedImm32(
1008 state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1009 CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
1011 jit.makeSpaceOnStackForCCall();
1013 if (!reallocating) {
1014 jit.setupArgumentsWithExecState(baseGPR);
1016 CCallHelpers::Call operationCall = jit.call();
1017 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1020 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1023 // Handle the case where we are reallocating (i.e. the old structure/butterfly
1024 // already had out-of-line property storage).
1025 jit.setupArgumentsWithExecState(
1026 baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1028 CCallHelpers::Call operationCall = jit.call();
1029 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1032 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1036 jit.reclaimSpaceOnStackForCCall();
1037 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1039 CCallHelpers::Jump noException = jit.emitExceptionCheck(vm, CCallHelpers::InvertedExceptionCheck);
1041 state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
1042 state.emitExplicitExceptionHandler();
1044 noException.link(&jit);
1045 state.restoreLiveRegistersFromStackForCall(spillState);
1049 if (isInlineOffset(m_offset)) {
1052 CCallHelpers::Address(
1054 JSObject::offsetOfInlineStorage() +
1055 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1058 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1059 jit.cage(Gigacage::JSValue, scratchGPR);
1063 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1066 if (allocatingInline) {
1067 // We set the new butterfly and the structure last. Doing it this way ensures that
1068 // whatever we had done up to this point is forgotten if we choose to branch to slow
1070 jit.nukeStructureAndStoreButterfly(vm, scratchGPR, baseGPR);
1073 uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1075 CCallHelpers::TrustedImm32(structureBits),
1076 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1078 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1081 // We will have a slow path if we were allocating without the help of an operation.
1082 if (allocatingInline) {
1083 if (allocator.didReuseRegisters()) {
1084 slowPath.link(&jit);
1085 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1086 state.failAndIgnore.append(jit.jump());
1088 state.failAndIgnore.append(slowPath);
1090 RELEASE_ASSERT(slowPath.empty());
1095 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1096 jit.cage(Gigacage::JSValue, scratchGPR);
1097 jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1098 state.failAndIgnore.append(
1099 jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1100 jit.boxInt32(scratchGPR, valueRegs);
1105 case StringLength: {
1106 jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1107 jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
1112 case IntrinsicGetter: {
1113 RELEASE_ASSERT(isValidOffset(offset()));
1115 // We need to ensure the getter value does not move from under us. Note that GetterSetters
1116 // are immutable so we just need to watch the property not any value inside it.
1117 Structure* currStructure;
1118 if (m_conditionSet.isEmpty())
1119 currStructure = structure();
1121 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1122 currStructure->startWatchingPropertyForReplacements(vm, offset());
1124 this->as<IntrinsicGetterAccessCase>().emitIntrinsicGetter(state);
1128 case DirectArgumentsLength:
1129 case ScopedArgumentsLength:
1130 case ModuleNamespaceLoad:
1131 // These need to be handled by generateWithGuard(), since the guard is part of the
1132 // algorithm. We can be sure that nobody will call generate() directly for these since they
1133 // are not guarded by structure checks.
1134 RELEASE_ASSERT_NOT_REACHED();
1137 RELEASE_ASSERT_NOT_REACHED();