2 * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "PolymorphicAccess.h"
31 #include "BinarySwitch.h"
32 #include "CCallHelpers.h"
33 #include "CodeBlock.h"
34 #include "DirectArguments.h"
35 #include "GetterSetter.h"
37 #include "JITOperations.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "ScopedArguments.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "StructureStubClearingWatchpoint.h"
43 #include "StructureStubInfo.h"
44 #include <wtf/CommaPrinter.h>
45 #include <wtf/ListDump.h>
49 static const bool verbose = false;
51 void AccessGenerationResult::dump(PrintStream& out) const
55 out.print(":", m_code);
58 Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
60 return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61 watchpoints, jit->codeBlock(), stubInfo, condition);
64 void AccessGenerationState::restoreScratch()
66 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
69 void AccessGenerationState::succeed()
72 success.append(jit->jump());
75 void AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling(const RegisterSet& extra)
77 if (!m_calculatedRegistersForCallAndExceptionHandling) {
78 m_calculatedRegistersForCallAndExceptionHandling = true;
80 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
81 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
82 if (m_needsToRestoreRegistersIfException)
83 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
85 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
86 m_liveRegistersForCall.merge(extra);
87 m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForJSCall());
88 m_liveRegistersForCall.merge(extra);
92 void AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra)
94 calculateLiveRegistersForCallAndExceptionHandling(extra);
96 unsigned extraStackPadding = 0;
97 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
98 if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
99 RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
100 m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
103 void AccessGenerationState::restoreLiveRegistersFromStackForCall(bool isGetter)
105 RegisterSet dontRestore;
107 // This is the result value. We don't want to overwrite the result with what we stored to the stack.
108 // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
109 dontRestore.set(valueRegs);
111 restoreLiveRegistersFromStackForCall(dontRestore);
114 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException()
116 // Even if we're a getter, we don't want to ignore the result value like we normally do
117 // because the getter threw, and therefore, didn't return a value that means anything.
118 // Instead, we want to restore that register to what it was upon entering the getter
119 // inline cache. The subtlety here is if the base and the result are the same register,
120 // and the getter threw, we want OSR exit to see the original base value, not the result
121 // of the getter call.
122 RegisterSet dontRestore = liveRegistersForCall();
123 // As an optimization here, we only need to restore what is live for exception handling.
124 // We can construct the dontRestore set to accomplish this goal by having it contain only
125 // what is live for call but not live for exception handling. By ignoring things that are
126 // only live at the call but not the exception handler, we will only restore things live
127 // at the exception handler.
128 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
129 restoreLiveRegistersFromStackForCall(dontRestore);
132 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
134 unsigned extraStackPadding = 0;
135 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
138 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
140 RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
142 if (!m_calculatedCallSiteIndex) {
143 m_calculatedCallSiteIndex = true;
145 if (m_needsToRestoreRegistersIfException)
146 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
148 m_callSiteIndex = originalCallSiteIndex();
151 return m_callSiteIndex;
154 const HandlerInfo& AccessGenerationState::originalExceptionHandler() const
156 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
157 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
158 RELEASE_ASSERT(exceptionHandler);
159 return *exceptionHandler;
162 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
164 void AccessGenerationState::emitExplicitExceptionHandler()
167 jit->copyCalleeSavesToVMCalleeSavesBuffer();
168 if (needsToRestoreRegistersIfException()) {
169 // To the JIT that produces the original exception handling
170 // call site, they will expect the OSR exit to be arrived
171 // at from genericUnwind. Therefore we must model what genericUnwind
172 // does here. I.e, set callFrameForCatch and copy callee saves.
174 jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
175 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
177 // We don't need to insert a new exception handler in the table
178 // because we're doing a manual exception check here. i.e, we'll
179 // never arrive here from genericUnwind().
180 HandlerInfo originalHandler = originalExceptionHandler();
182 [=] (LinkBuffer& linkBuffer) {
183 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
186 jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
187 CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
189 [=] (LinkBuffer& linkBuffer) {
190 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
192 jit->jumpToExceptionHandler();
196 AccessCase::AccessCase()
200 std::unique_ptr<AccessCase> AccessCase::tryGet(
201 VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
202 const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
204 std::unique_ptr<AccessCase> result(new AccessCase());
206 result->m_type = type;
207 result->m_offset = offset;
208 result->m_structure.set(vm, owner, structure);
209 result->m_conditionSet = conditionSet;
211 if (viaProxy || additionalSet) {
212 result->m_rareData = std::make_unique<RareData>();
213 result->m_rareData->viaProxy = viaProxy;
214 result->m_rareData->additionalSet = additionalSet;
220 std::unique_ptr<AccessCase> AccessCase::get(
221 VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
222 const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
223 PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase)
225 std::unique_ptr<AccessCase> result(new AccessCase());
227 result->m_type = type;
228 result->m_offset = offset;
229 result->m_structure.set(vm, owner, structure);
230 result->m_conditionSet = conditionSet;
232 if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase) {
233 result->m_rareData = std::make_unique<RareData>();
234 result->m_rareData->viaProxy = viaProxy;
235 result->m_rareData->additionalSet = additionalSet;
236 result->m_rareData->customAccessor.getter = customGetter;
237 result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
243 std::unique_ptr<AccessCase> AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
248 if (GPRInfo::numberOfRegisters < 9)
251 std::unique_ptr<AccessCase> result(new AccessCase());
253 result->m_type = MegamorphicLoad;
258 std::unique_ptr<AccessCase> AccessCase::replace(
259 VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
261 std::unique_ptr<AccessCase> result(new AccessCase());
263 result->m_type = Replace;
264 result->m_offset = offset;
265 result->m_structure.set(vm, owner, structure);
270 std::unique_ptr<AccessCase> AccessCase::transition(
271 VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
272 const ObjectPropertyConditionSet& conditionSet)
274 RELEASE_ASSERT(oldStructure == newStructure->previousID());
276 // Skip optimizing the case where we need a realloc, if we don't have
277 // enough registers to make it happen.
278 if (GPRInfo::numberOfRegisters < 6
279 && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
280 && oldStructure->outOfLineCapacity()) {
284 std::unique_ptr<AccessCase> result(new AccessCase());
286 result->m_type = Transition;
287 result->m_offset = offset;
288 result->m_structure.set(vm, owner, newStructure);
289 result->m_conditionSet = conditionSet;
294 std::unique_ptr<AccessCase> AccessCase::setter(
295 VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
296 const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
297 JSObject* customSlotBase)
299 std::unique_ptr<AccessCase> result(new AccessCase());
301 result->m_type = type;
302 result->m_offset = offset;
303 result->m_structure.set(vm, owner, structure);
304 result->m_conditionSet = conditionSet;
305 result->m_rareData = std::make_unique<RareData>();
306 result->m_rareData->customAccessor.setter = customSetter;
307 result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
312 std::unique_ptr<AccessCase> AccessCase::in(
313 VM& vm, JSCell* owner, AccessType type, Structure* structure,
314 const ObjectPropertyConditionSet& conditionSet)
316 std::unique_ptr<AccessCase> result(new AccessCase());
318 result->m_type = type;
319 result->m_structure.set(vm, owner, structure);
320 result->m_conditionSet = conditionSet;
325 std::unique_ptr<AccessCase> AccessCase::getLength(VM&, JSCell*, AccessType type)
327 std::unique_ptr<AccessCase> result(new AccessCase());
329 result->m_type = type;
334 std::unique_ptr<AccessCase> AccessCase::getIntrinsic(
335 VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
336 Structure* structure, const ObjectPropertyConditionSet& conditionSet)
338 std::unique_ptr<AccessCase> result(new AccessCase());
340 result->m_type = IntrinsicGetter;
341 result->m_structure.set(vm, owner, structure);
342 result->m_conditionSet = conditionSet;
343 result->m_offset = offset;
345 result->m_rareData = std::make_unique<RareData>();
346 result->m_rareData->intrinsicFunction.set(vm, owner, getter);
351 AccessCase::~AccessCase()
355 std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
356 VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
358 switch (stubInfo.cacheType) {
359 case CacheType::GetByIdSelf:
361 vm, owner, Load, stubInfo.u.byIdSelf.offset,
362 stubInfo.u.byIdSelf.baseObjectStructure.get());
364 case CacheType::PutByIdReplace:
366 vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
373 std::unique_ptr<AccessCase> AccessCase::clone() const
375 std::unique_ptr<AccessCase> result(new AccessCase());
376 result->m_type = m_type;
377 result->m_offset = m_offset;
378 result->m_structure = m_structure;
379 result->m_conditionSet = m_conditionSet;
380 if (RareData* rareData = m_rareData.get()) {
381 result->m_rareData = std::make_unique<RareData>();
382 result->m_rareData->viaProxy = rareData->viaProxy;
383 result->m_rareData->additionalSet = rareData->additionalSet;
384 // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
385 result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
386 result->m_rareData->customSlotBase = rareData->customSlotBase;
387 result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
392 bool AccessCase::guardedByStructureCheck() const
398 case MegamorphicLoad:
401 case DirectArgumentsLength:
402 case ScopedArgumentsLength:
409 JSObject* AccessCase::alternateBase() const
411 if (customSlotBase())
412 return customSlotBase();
413 return conditionSet().slotBaseCondition().object();
416 bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
421 case CustomValueGetter:
422 case CustomAccessorGetter:
423 case CustomValueSetter:
424 case CustomAccessorSetter:
427 if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
428 && structure()->couldHaveIndexingHeader()) {
430 cellsToMark->append(newStructure());
439 bool AccessCase::couldStillSucceed() const
441 return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
444 bool AccessCase::canBeReplacedByMegamorphicLoad() const
446 return type() == Load
448 && conditionSet().isEmpty()
450 && !customSlotBase();
453 bool AccessCase::canReplace(const AccessCase& other) const
455 // We could do a lot better here, but for now we just do something obvious.
457 if (type() == MegamorphicLoad && other.canBeReplacedByMegamorphicLoad())
460 if (!guardedByStructureCheck() || !other.guardedByStructureCheck()) {
461 // FIXME: Implement this!
465 return structure() == other.structure();
468 void AccessCase::dump(PrintStream& out) const
470 out.print(m_type, ":(");
474 if (m_type == Transition)
475 out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
476 else if (m_structure)
477 out.print(comma, "structure = ", pointerDump(m_structure.get()));
479 if (isValidOffset(m_offset))
480 out.print(comma, "offset = ", m_offset);
481 if (!m_conditionSet.isEmpty())
482 out.print(comma, "conditions = ", m_conditionSet);
484 if (RareData* rareData = m_rareData.get()) {
485 if (rareData->viaProxy)
486 out.print(comma, "viaProxy = ", rareData->viaProxy);
487 if (rareData->additionalSet)
488 out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
489 if (rareData->callLinkInfo)
490 out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
491 if (rareData->customAccessor.opaque)
492 out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
493 if (rareData->customSlotBase)
494 out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
500 bool AccessCase::visitWeak(VM& vm) const
502 if (m_structure && !Heap::isMarked(m_structure.get()))
504 if (!m_conditionSet.areStillLive())
507 if (m_rareData->callLinkInfo)
508 m_rareData->callLinkInfo->visitWeak(vm);
509 if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
511 if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
517 void AccessCase::generateWithGuard(
518 AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
520 CCallHelpers& jit = *state.jit;
522 const Identifier& ident = *state.ident;
523 StructureStubInfo& stubInfo = *state.stubInfo;
524 JSValueRegs valueRegs = state.valueRegs;
525 GPRReg baseGPR = state.baseGPR;
526 GPRReg scratchGPR = state.scratchGPR;
533 jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR);
536 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
539 CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
547 CCallHelpers::NotEqual,
548 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
549 CCallHelpers::TrustedImm32(StringType)));
553 case DirectArgumentsLength: {
557 CCallHelpers::NotEqual,
558 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
559 CCallHelpers::TrustedImm32(DirectArgumentsType)));
563 CCallHelpers::NonZero,
564 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
566 CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
567 valueRegs.payloadGPR());
568 jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
573 case ScopedArgumentsLength: {
577 CCallHelpers::NotEqual,
578 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
579 CCallHelpers::TrustedImm32(ScopedArgumentsType)));
583 CCallHelpers::NonZero,
584 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
586 CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
587 valueRegs.payloadGPR());
588 jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
593 case MegamorphicLoad: {
594 UniquedStringImpl* key = ident.impl();
595 unsigned hash = IdentifierRepHash::hash(key);
597 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
598 allocator.lock(baseGPR);
599 #if USE(JSVALUE32_64)
600 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
602 allocator.lock(valueRegs);
603 allocator.lock(scratchGPR);
605 GPRReg intermediateGPR = scratchGPR;
606 GPRReg maskGPR = allocator.allocateScratchGPR();
607 GPRReg maskedHashGPR = allocator.allocateScratchGPR();
608 GPRReg indexGPR = allocator.allocateScratchGPR();
609 GPRReg offsetGPR = allocator.allocateScratchGPR();
612 dataLog("baseGPR = ", baseGPR, "\n");
613 dataLog("valueRegs = ", valueRegs, "\n");
614 dataLog("scratchGPR = ", scratchGPR, "\n");
615 dataLog("intermediateGPR = ", intermediateGPR, "\n");
616 dataLog("maskGPR = ", maskGPR, "\n");
617 dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
618 dataLog("indexGPR = ", indexGPR, "\n");
619 dataLog("offsetGPR = ", offsetGPR, "\n");
622 ScratchRegisterAllocator::PreservedState preservedState =
623 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
625 CCallHelpers::JumpList myFailAndIgnore;
626 CCallHelpers::JumpList myFallThrough;
628 jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
630 CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
633 myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
635 jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
636 jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
638 CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
641 jit.move(maskGPR, maskedHashGPR);
642 jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
643 jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
644 jit.addPtr(indexGPR, intermediateGPR);
646 CCallHelpers::Label loop = jit.label();
648 jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
650 myFallThrough.append(
654 CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
656 jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
657 jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
658 jit.addPtr(intermediateGPR, offsetGPR);
660 CCallHelpers::Jump collision = jit.branchPtr(
661 CCallHelpers::NotEqual,
662 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
663 CCallHelpers::TrustedImmPtr(key));
665 // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
666 // Check them and then attempt the load.
668 myFallThrough.append(
670 CCallHelpers::NonZero,
671 CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
672 CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
674 jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
676 jit.loadProperty(baseGPR, offsetGPR, valueRegs);
678 allocator.restoreReusedRegistersByPopping(jit, preservedState);
681 collision.link(&jit);
683 jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
685 // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
686 // around isn't super common so we could, for example, recompute the mask from the difference between
687 // the table and index. But before we do that we should probably make it easier to multiply and
688 // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
689 // to have a power-of-2 size.
690 jit.and32(maskGPR, maskedHashGPR);
691 jit.jump().linkTo(loop, &jit);
693 if (allocator.didReuseRegisters()) {
694 myFailAndIgnore.link(&jit);
695 allocator.restoreReusedRegistersByPopping(jit, preservedState);
696 state.failAndIgnore.append(jit.jump());
698 myFallThrough.link(&jit);
699 allocator.restoreReusedRegistersByPopping(jit, preservedState);
700 fallThrough.append(jit.jump());
702 state.failAndIgnore.append(myFailAndIgnore);
703 fallThrough.append(myFallThrough);
712 CCallHelpers::NotEqual,
713 CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
714 CCallHelpers::TrustedImm32(PureForwardingProxyType)));
716 jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
720 CCallHelpers::NotEqual,
721 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
726 CCallHelpers::NotEqual,
727 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
736 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]).
737 // To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
738 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
739 #define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0),
741 #define EABI_32BIT_DUMMY_ARG
744 void AccessCase::generate(AccessGenerationState& state)
747 dataLog("Generating code for: ", *this, "\n");
749 CCallHelpers& jit = *state.jit;
751 CodeBlock* codeBlock = jit.codeBlock();
752 StructureStubInfo& stubInfo = *state.stubInfo;
753 const Identifier& ident = *state.ident;
754 JSValueRegs valueRegs = state.valueRegs;
755 GPRReg baseGPR = state.baseGPR;
756 GPRReg scratchGPR = state.scratchGPR;
758 ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
760 if ((structure() && structure()->needImpurePropertyWatchpoint())
761 || m_conditionSet.needImpurePropertyWatchpoint())
762 vm.registerWatchpointForImpureProperty(ident, state.addWatchpoint());
765 additionalSet()->add(state.addWatchpoint());
767 for (const ObjectPropertyCondition& condition : m_conditionSet) {
768 Structure* structure = condition.object()->structure();
770 if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
771 structure->addTransitionWatchpoint(state.addWatchpoint(condition));
775 if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
776 dataLog("This condition is no longer met: ", condition, "\n");
777 RELEASE_ASSERT_NOT_REACHED();
780 // We will emit code that has a weak reference that isn't otherwise listed anywhere.
781 state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
783 jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
784 state.failAndRepatch.append(
786 CCallHelpers::NotEqual,
787 CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
794 jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
799 jit.moveTrustedValue(jsUndefined(), valueRegs);
807 case CustomValueGetter:
808 case CustomAccessorGetter:
809 case CustomValueSetter:
810 case CustomAccessorSetter: {
811 if (isValidOffset(m_offset)) {
812 Structure* currStructure;
813 if (m_conditionSet.isEmpty())
814 currStructure = structure();
816 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
817 currStructure->startWatchingPropertyForReplacements(vm, offset());
820 GPRReg baseForGetGPR;
822 baseForGetGPR = valueRegs.payloadGPR();
824 CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
827 baseForGetGPR = baseGPR;
829 GPRReg baseForAccessGPR;
830 if (!m_conditionSet.isEmpty()) {
832 CCallHelpers::TrustedImmPtr(alternateBase()),
834 baseForAccessGPR = scratchGPR;
836 baseForAccessGPR = baseForGetGPR;
838 GPRReg loadedValueGPR = InvalidGPRReg;
839 if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
840 if (m_type == Load || m_type == GetGetter)
841 loadedValueGPR = valueRegs.payloadGPR();
843 loadedValueGPR = scratchGPR;
846 if (isInlineOffset(m_offset))
847 storageGPR = baseForAccessGPR;
850 CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
852 storageGPR = loadedValueGPR;
857 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
859 if (m_type == Load || m_type == GetGetter) {
861 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
865 CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
870 if (m_type == Load || m_type == GetGetter) {
875 // Stuff for custom getters/setters.
876 CCallHelpers::Call operationCall;
878 // Stuff for JS getters/setters.
879 CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
880 CCallHelpers::Call fastPathCall;
881 CCallHelpers::Call slowPathCall;
883 CCallHelpers::Jump success;
884 CCallHelpers::Jump fail;
886 // This also does the necessary calculations of whether or not we're an
887 // exception handling call site.
888 state.preserveLiveRegistersToStackForCall();
891 CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
892 CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
894 if (m_type == Getter || m_type == Setter) {
895 // Create a JS call using a JS call inline cache. Assume that:
897 // - SP is aligned and represents the extent of the calling compiler's stack usage.
899 // - FP is set correctly (i.e. it points to the caller's call frame header).
901 // - SP - FP is an aligned difference.
903 // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
906 // Therefore, we temporarily grow the stack for the purpose of the call and then
909 RELEASE_ASSERT(!m_rareData->callLinkInfo);
910 m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
912 // FIXME: If we generated a polymorphic call stub that jumped back to the getter
913 // stub, which then jumped back to the main code, then we'd have a reachability
914 // situation that the GC doesn't know about. The GC would ensure that the polymorphic
915 // call stub stayed alive, and it would ensure that the main code stayed alive, but
916 // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
917 // be GC objects, and then we'd be able to say that the polymorphic call stub has a
918 // reference to the getter stub.
919 // https://bugs.webkit.org/show_bug.cgi?id=148914
920 m_rareData->callLinkInfo->disallowStubs();
922 m_rareData->callLinkInfo->setUpCall(
923 CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
925 CCallHelpers::JumpList done;
927 // There is a "this" argument.
928 unsigned numberOfParameters = 1;
929 // ... and a value argument if we're calling a setter.
930 if (m_type == Setter)
931 numberOfParameters++;
933 // Get the accessor; if there ain't one then the result is jsUndefined().
934 if (m_type == Setter) {
936 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
940 CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
944 CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
945 CCallHelpers::Zero, loadedValueGPR);
947 unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
949 unsigned numberOfBytesForCall =
950 numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
952 unsigned alignedNumberOfBytesForCall =
953 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
956 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
957 CCallHelpers::stackPointerRegister);
959 CCallHelpers::Address calleeFrame = CCallHelpers::Address(
960 CCallHelpers::stackPointerRegister,
961 -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
964 CCallHelpers::TrustedImm32(numberOfParameters),
965 calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
968 loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
972 calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
974 if (m_type == Setter) {
977 calleeFrame.withOffset(
978 virtualRegisterForArgument(1).offset() * sizeof(Register)));
981 CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
982 CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
983 CCallHelpers::TrustedImmPtr(0));
985 fastPathCall = jit.nearCall();
986 if (m_type == Getter)
987 jit.setupResults(valueRegs);
988 done.append(jit.jump());
991 jit.move(loadedValueGPR, GPRInfo::regT0);
992 #if USE(JSVALUE32_64)
993 // We *always* know that the getter/setter, if non-null, is a cell.
994 jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
996 jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
997 slowPathCall = jit.nearCall();
998 if (m_type == Getter)
999 jit.setupResults(valueRegs);
1000 done.append(jit.jump());
1002 returnUndefined.link(&jit);
1003 if (m_type == Getter)
1004 jit.moveTrustedValue(jsUndefined(), valueRegs);
1008 jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - state.numberOfStackBytesUsedForRegisterPreservation()),
1009 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1010 state.restoreLiveRegistersFromStackForCall(isGetter());
1013 [=, &vm] (LinkBuffer& linkBuffer) {
1014 m_rareData->callLinkInfo->setCallLocations(
1015 linkBuffer.locationOfNearCall(slowPathCall),
1016 linkBuffer.locationOf(addressOfLinkFunctionCheck),
1017 linkBuffer.locationOfNearCall(fastPathCall));
1021 CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
1024 // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
1025 // hard to track if someone did spillage or not, so we just assume that we always need
1026 // to make some space here.
1027 jit.makeSpaceOnStackForCCall();
1029 // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
1030 // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
1031 // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
1032 GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
1034 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1035 jit.setupArgumentsWithExecState(
1037 CCallHelpers::TrustedImmPtr(ident.impl()));
1039 jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
1041 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
1042 jit.setupArgumentsWithExecState(
1043 EABI_32BIT_DUMMY_ARG baseForCustomValue,
1044 CCallHelpers::TrustedImm32(JSValue::CellTag),
1045 CCallHelpers::TrustedImmPtr(ident.impl()));
1047 jit.setupArgumentsWithExecState(
1048 EABI_32BIT_DUMMY_ARG baseForCustomValue,
1049 CCallHelpers::TrustedImm32(JSValue::CellTag),
1050 valueRegs.payloadGPR(), valueRegs.tagGPR());
1053 jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
1055 operationCall = jit.call();
1057 [=] (LinkBuffer& linkBuffer) {
1058 linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
1061 if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
1062 jit.setupResults(valueRegs);
1063 jit.reclaimSpaceOnStackForCCall();
1065 CCallHelpers::Jump noException =
1066 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1068 state.restoreLiveRegistersFromStackForCallWithThrownException();
1069 state.emitExplicitExceptionHandler();
1071 noException.link(&jit);
1072 state.restoreLiveRegistersFromStackForCall(isGetter());
1079 if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
1081 dataLog("Have type: ", type->descriptor(), "\n");
1082 state.failAndRepatch.append(
1083 jit.branchIfNotType(
1084 valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1086 dataLog("Don't have type.\n");
1088 if (isInlineOffset(m_offset)) {
1091 CCallHelpers::Address(
1093 JSObject::offsetOfInlineStorage() +
1094 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1096 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1099 CCallHelpers::Address(
1100 scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1107 // AccessCase::transition() should have returned null if this wasn't true.
1108 RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
1110 if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
1112 dataLog("Have type: ", type->descriptor(), "\n");
1113 state.failAndRepatch.append(
1114 jit.branchIfNotType(
1115 valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
1117 dataLog("Don't have type.\n");
1119 // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
1120 // exactly when this would make calls.
1121 bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
1122 bool reallocating = allocating && structure()->outOfLineCapacity();
1123 bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
1125 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1126 allocator.lock(baseGPR);
1127 #if USE(JSVALUE32_64)
1128 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1130 allocator.lock(valueRegs);
1131 allocator.lock(scratchGPR);
1133 GPRReg scratchGPR2 = InvalidGPRReg;
1134 GPRReg scratchGPR3 = InvalidGPRReg;
1135 if (allocatingInline) {
1136 scratchGPR2 = allocator.allocateScratchGPR();
1137 scratchGPR3 = allocator.allocateScratchGPR();
1140 ScratchRegisterAllocator::PreservedState preservedState =
1141 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
1143 CCallHelpers::JumpList slowPath;
1145 ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
1148 size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
1150 if (allocatingInline) {
1151 CopiedAllocator* copiedAllocator = &vm.heap.storageAllocator();
1153 if (!reallocating) {
1154 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1157 CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1158 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1159 jit.negPtr(scratchGPR);
1161 CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1162 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1164 // Handle the case where we are reallocating (i.e. the old structure/butterfly
1165 // already had out-of-line property storage).
1166 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
1167 ASSERT(newSize > oldSize);
1169 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
1170 jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
1173 CCallHelpers::Signed, CCallHelpers::TrustedImm32(newSize), scratchGPR));
1174 jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
1175 jit.negPtr(scratchGPR);
1177 CCallHelpers::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
1178 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(JSValue)), scratchGPR);
1179 // We have scratchGPR = new storage, scratchGPR3 = old storage,
1180 // scratchGPR2 = available
1181 for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
1183 CCallHelpers::Address(
1185 -static_cast<ptrdiff_t>(
1186 offset + sizeof(JSValue) + sizeof(void*))),
1190 CCallHelpers::Address(
1192 -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
1196 // Handle the case where we are allocating out-of-line using an operation.
1197 RegisterSet extraRegistersToPreserve;
1198 extraRegistersToPreserve.set(baseGPR);
1199 extraRegistersToPreserve.set(valueRegs);
1200 state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
1203 CCallHelpers::TrustedImm32(
1204 state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
1205 CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
1207 jit.makeSpaceOnStackForCCall();
1209 if (!reallocating) {
1210 jit.setupArgumentsWithExecState(baseGPR);
1212 CCallHelpers::Call operationCall = jit.call();
1214 [=] (LinkBuffer& linkBuffer) {
1217 FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
1220 // Handle the case where we are reallocating (i.e. the old structure/butterfly
1221 // already had out-of-line property storage).
1222 jit.setupArgumentsWithExecState(
1223 baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
1225 CCallHelpers::Call operationCall = jit.call();
1227 [=] (LinkBuffer& linkBuffer) {
1230 FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
1234 jit.reclaimSpaceOnStackForCCall();
1235 jit.move(GPRInfo::returnValueGPR, scratchGPR);
1237 CCallHelpers::Jump noException =
1238 jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
1240 state.restoreLiveRegistersFromStackForCallWithThrownException();
1241 state.emitExplicitExceptionHandler();
1243 noException.link(&jit);
1244 state.restoreLiveRegistersFromStackForCall();
1248 if (isInlineOffset(m_offset)) {
1251 CCallHelpers::Address(
1253 JSObject::offsetOfInlineStorage() +
1254 offsetInInlineStorage(m_offset) * sizeof(JSValue)));
1257 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1260 CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
1263 // If we had allocated using an operation then we would have already executed the store
1264 // barrier and we would have already stored the butterfly into the object.
1265 if (allocatingInline) {
1266 CCallHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(baseGPR);
1267 WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
1268 jit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2);
1271 CCallHelpers::AboveOrEqual, scratchGPR2,
1272 CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())));
1274 jit.add32(CCallHelpers::TrustedImm32(1), scratchGPR2);
1275 jit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress());
1277 jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR3);
1278 // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2.
1281 CCallHelpers::BaseIndex(
1282 scratchGPR3, scratchGPR2, CCallHelpers::ScalePtr,
1283 static_cast<int32_t>(-sizeof(void*))));
1284 ownerIsRememberedOrInEden.link(&jit);
1286 // We set the new butterfly and the structure last. Doing it this way ensures that
1287 // whatever we had done up to this point is forgotten if we choose to branch to slow
1290 jit.storePtr(scratchGPR, CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()));
1293 uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
1295 CCallHelpers::TrustedImm32(structureBits),
1296 CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
1298 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1301 // We will have a slow path if we were allocating without the help of an operation.
1302 if (allocatingInline) {
1303 if (allocator.didReuseRegisters()) {
1304 slowPath.link(&jit);
1305 allocator.restoreReusedRegistersByPopping(jit, preservedState);
1306 state.failAndIgnore.append(jit.jump());
1308 state.failAndIgnore.append(slowPath);
1310 RELEASE_ASSERT(slowPath.empty());
1315 jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
1316 jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
1317 state.failAndIgnore.append(
1318 jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
1319 jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1324 case StringLength: {
1325 jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
1326 jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
1331 case IntrinsicGetter: {
1332 RELEASE_ASSERT(isValidOffset(offset()));
1334 // We need to ensure the getter value does not move from under us. Note that GetterSetters
1335 // are immutable so we just need to watch the property not any value inside it.
1336 Structure* currStructure;
1337 if (m_conditionSet.isEmpty())
1338 currStructure = structure();
1340 currStructure = m_conditionSet.slotBaseCondition().object()->structure();
1341 currStructure->startWatchingPropertyForReplacements(vm, offset());
1343 emitIntrinsicGetter(state);
1347 case DirectArgumentsLength:
1348 case ScopedArgumentsLength:
1349 case MegamorphicLoad:
1350 // These need to be handled by generateWithGuard(), since the guard is part of the
1351 // algorithm. We can be sure that nobody will call generate() directly for these since they
1352 // are not guarded by structure checks.
1353 RELEASE_ASSERT_NOT_REACHED();
1356 RELEASE_ASSERT_NOT_REACHED();
1359 PolymorphicAccess::PolymorphicAccess() { }
1360 PolymorphicAccess::~PolymorphicAccess() { }
1362 AccessGenerationResult PolymorphicAccess::regenerateWithCases(
1363 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1364 Vector<std::unique_ptr<AccessCase>> originalCasesToAdd)
1366 // This method will add the originalCasesToAdd to the list one at a time while preserving the
1368 // - If a newly added case canReplace() any existing case, then the existing case is removed before
1369 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
1370 // can be removed via the canReplace() rule.
1371 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
1372 // cascade through the cases in reverse order, you will get the most recent cases first.
1373 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
1374 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
1375 // add more things after failure.
1377 // First ensure that the originalCasesToAdd doesn't contain duplicates.
1378 Vector<std::unique_ptr<AccessCase>> casesToAdd;
1379 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
1380 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
1382 // Add it only if it is not replaced by the subsequent cases in the list.
1384 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
1385 if (originalCasesToAdd[j]->canReplace(*myCase)) {
1394 casesToAdd.append(WTFMove(myCase));
1398 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
1400 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
1401 // new stub that will be identical to the old one. Returning null should tell the caller to just
1402 // keep doing what they were doing before.
1403 if (casesToAdd.isEmpty())
1404 return AccessGenerationResult::MadeNoChanges;
1406 // Now construct the list of cases as they should appear if we are successful. This means putting
1407 // all of the previous cases in this list in order but excluding those that can be replaced, and
1408 // then adding the new cases.
1410 for (auto& oldCase : m_list) {
1411 // Ignore old cases that cannot possibly succeed anymore.
1412 if (!oldCase->couldStillSucceed())
1415 // Figure out if this is replaced by any new cases.
1417 for (auto& caseToAdd : casesToAdd) {
1418 if (caseToAdd->canReplace(*oldCase)) {
1426 newCases.append(oldCase->clone());
1428 for (auto& caseToAdd : casesToAdd)
1429 newCases.append(WTFMove(caseToAdd));
1432 dataLog("newCases: ", listDump(newCases), "\n");
1434 // See if we are close to having too many cases and if some of those cases can be subsumed by a
1435 // megamorphic load.
1436 if (newCases.size() >= Options::maxAccessVariantListSize()) {
1437 unsigned numSelfLoads = 0;
1438 for (auto& newCase : newCases) {
1439 if (newCase->canBeReplacedByMegamorphicLoad())
1443 if (numSelfLoads >= Options::megamorphicLoadCost()) {
1444 if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
1445 newCases.removeAllMatching(
1446 [&] (std::unique_ptr<AccessCase>& newCase) -> bool {
1447 return newCase->canBeReplacedByMegamorphicLoad();
1450 newCases.append(WTFMove(mega));
1455 if (newCases.size() > Options::maxAccessVariantListSize()) {
1457 dataLog("Too many cases.\n");
1458 return AccessGenerationResult::GaveUp;
1461 MacroAssemblerCodePtr result = regenerate(vm, codeBlock, stubInfo, ident, newCases);
1463 return AccessGenerationResult::GaveUp;
1465 m_list = WTFMove(newCases);
1469 AccessGenerationResult PolymorphicAccess::regenerateWithCase(
1470 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1471 std::unique_ptr<AccessCase> newAccess)
1473 Vector<std::unique_ptr<AccessCase>> newAccesses;
1474 newAccesses.append(WTFMove(newAccess));
1475 return regenerateWithCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
1478 bool PolymorphicAccess::visitWeak(VM& vm) const
1480 for (unsigned i = 0; i < size(); ++i) {
1481 if (!at(i).visitWeak(vm))
1484 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
1485 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
1486 if (!Heap::isMarked(weakReference.get()))
1493 void PolymorphicAccess::dump(PrintStream& out) const
1495 out.print(RawPointer(this), ":[");
1497 for (auto& entry : m_list)
1498 out.print(comma, *entry);
1502 MacroAssemblerCodePtr PolymorphicAccess::regenerate(
1503 VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
1504 PolymorphicAccess::ListType& cases)
1507 dataLog("Generating code for cases: ", listDump(cases), "\n");
1509 AccessGenerationState state;
1511 state.access = this;
1512 state.stubInfo = &stubInfo;
1513 state.ident = &ident;
1515 state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
1516 state.valueRegs = JSValueRegs(
1517 #if USE(JSVALUE32_64)
1518 static_cast<GPRReg>(stubInfo.patch.valueTagGPR),
1520 static_cast<GPRReg>(stubInfo.patch.valueGPR));
1522 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
1523 state.allocator = &allocator;
1524 allocator.lock(state.baseGPR);
1525 allocator.lock(state.valueRegs);
1526 #if USE(JSVALUE32_64)
1527 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
1530 state.scratchGPR = allocator.allocateScratchGPR();
1532 CCallHelpers jit(&vm, codeBlock);
1535 state.preservedReusedRegisterState =
1536 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
1538 bool allGuardedByStructureCheck = true;
1539 bool hasJSGetterSetterCall = false;
1540 for (auto& entry : cases) {
1541 allGuardedByStructureCheck &= entry->guardedByStructureCheck();
1542 if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
1543 hasJSGetterSetterCall = true;
1546 if (cases.isEmpty()) {
1547 // This is super unlikely, but we make it legal anyway.
1548 state.failAndRepatch.append(jit.jump());
1549 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
1550 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
1551 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
1553 CCallHelpers::JumpList fallThrough;
1555 // Cascade through the list, preferring newer entries.
1556 for (unsigned i = cases.size(); i--;) {
1557 fallThrough.link(&jit);
1558 cases[i]->generateWithGuard(state, fallThrough);
1560 state.failAndRepatch.append(fallThrough);
1563 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
1566 Vector<int64_t> caseValues(cases.size());
1567 for (unsigned i = 0; i < cases.size(); ++i)
1568 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
1570 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
1571 while (binarySwitch.advance(jit))
1572 cases[binarySwitch.caseIndex()]->generate(state);
1573 state.failAndRepatch.append(binarySwitch.fallThrough());
1576 if (!state.failAndIgnore.empty()) {
1577 state.failAndIgnore.link(&jit);
1579 // Make sure that the inline cache optimization code knows that we are taking slow path because
1580 // of something that isn't patchable. The slow path will decrement "countdown" and will only
1581 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
1582 // that the slow path does not try to patch.
1583 jit.load8(&stubInfo.countdown, state.scratchGPR);
1584 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
1585 jit.store8(state.scratchGPR, &stubInfo.countdown);
1588 CCallHelpers::JumpList failure;
1589 if (allocator.didReuseRegisters()) {
1590 state.failAndRepatch.link(&jit);
1591 state.restoreScratch();
1593 failure = state.failAndRepatch;
1594 failure.append(jit.jump());
1596 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
1597 CallSiteIndex callSiteIndexForExceptionHandling;
1598 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
1599 // Emit the exception handler.
1600 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
1601 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
1602 // their own exception handling logic that doesn't go through genericUnwind.
1603 MacroAssembler::Label makeshiftCatchHandler = jit.label();
1605 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
1606 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
1607 stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
1609 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1610 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1612 state.restoreLiveRegistersFromStackForCallWithThrownException();
1613 state.restoreScratch();
1614 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
1616 HandlerInfo oldHandler = state.originalExceptionHandler();
1617 CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
1619 [=] (LinkBuffer& linkBuffer) {
1620 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
1622 HandlerInfo handlerToRegister = oldHandler;
1623 handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
1624 handlerToRegister.start = newExceptionHandlingCallSite.bits();
1625 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
1626 codeBlock->appendExceptionHandler(handlerToRegister);
1629 // We set these to indicate to the stub to remove itself from the CodeBlock's
1630 // exception handler table when it is deallocated.
1631 codeBlockThatOwnsExceptionHandlers = codeBlock;
1632 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
1633 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
1636 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
1637 if (linkBuffer.didFailToAllocate()) {
1639 dataLog("Did fail to allocate.\n");
1640 return MacroAssemblerCodePtr();
1643 CodeLocationLabel successLabel =
1644 stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
1646 linkBuffer.link(state.success, successLabel);
1650 stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
1653 dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
1655 MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
1656 codeBlock, linkBuffer,
1657 ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
1659 bool doesCalls = false;
1660 Vector<JSCell*> cellsToMark;
1661 for (auto& entry : cases)
1662 doesCalls |= entry->doesCalls(&cellsToMark);
1664 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
1665 m_watchpoints = WTFMove(state.watchpoints);
1666 if (!state.weakReferences.isEmpty())
1667 m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
1669 dataLog("Returning: ", code.code(), "\n");
1673 void PolymorphicAccess::aboutToDie()
1675 m_stubRoutine->aboutToDie();
1682 using namespace JSC;
1684 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
1687 case AccessGenerationResult::MadeNoChanges:
1688 out.print("MadeNoChanges");
1690 case AccessGenerationResult::GaveUp:
1691 out.print("GaveUp");
1693 case AccessGenerationResult::GeneratedNewCode:
1694 out.print("GeneratedNewCode");
1698 RELEASE_ASSERT_NOT_REACHED();
1701 void printInternal(PrintStream& out, AccessCase::AccessType type)
1704 case AccessCase::Load:
1707 case AccessCase::MegamorphicLoad:
1708 out.print("MegamorphicLoad");
1710 case AccessCase::Transition:
1711 out.print("Transition");
1713 case AccessCase::Replace:
1714 out.print("Replace");
1716 case AccessCase::Miss:
1719 case AccessCase::GetGetter:
1720 out.print("GetGetter");
1722 case AccessCase::Getter:
1723 out.print("Getter");
1725 case AccessCase::Setter:
1726 out.print("Setter");
1728 case AccessCase::CustomValueGetter:
1729 out.print("CustomValueGetter");
1731 case AccessCase::CustomAccessorGetter:
1732 out.print("CustomAccessorGetter");
1734 case AccessCase::CustomValueSetter:
1735 out.print("CustomValueSetter");
1737 case AccessCase::CustomAccessorSetter:
1738 out.print("CustomAccessorSetter");
1740 case AccessCase::IntrinsicGetter:
1741 out.print("IntrinsicGetter");
1743 case AccessCase::InHit:
1746 case AccessCase::InMiss:
1747 out.print("InMiss");
1749 case AccessCase::ArrayLength:
1750 out.print("ArrayLength");
1752 case AccessCase::StringLength:
1753 out.print("StringLength");
1755 case AccessCase::DirectArgumentsLength:
1756 out.print("DirectArgumentsLength");
1758 case AccessCase::ScopedArgumentsLength:
1759 out.print("ScopedArgumentsLength");
1763 RELEASE_ASSERT_NOT_REACHED();
1768 #endif // ENABLE(JIT)