1 # Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
27 macro nextInstruction()
30 jmp [t1, t0, PtrSize], BytecodePtrTag
33 macro nextInstructionWide16()
34 loadh 1[PB, PC, 1], t0
35 leap _g_opcodeMapWide16, t1
36 jmp [t1, t0, PtrSize], BytecodePtrTag
39 macro nextInstructionWide32()
40 loadi 1[PB, PC, 1], t0
41 leap _g_opcodeMapWide32, t1
42 jmp [t1, t0, PtrSize], BytecodePtrTag
45 macro getuOperandNarrow(opcodeStruct, fieldName, dst)
46 loadb constexpr %opcodeStruct%_%fieldName%_index[PB, PC, 1], dst
49 macro getOperandNarrow(opcodeStruct, fieldName, dst)
50 loadbsq constexpr %opcodeStruct%_%fieldName%_index[PB, PC, 1], dst
53 macro getuOperandWide16(opcodeStruct, fieldName, dst)
54 loadh constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PB, PC, 1], dst
57 macro getOperandWide16(opcodeStruct, fieldName, dst)
58 loadhsq constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PB, PC, 1], dst
61 macro getuOperandWide32(opcodeStruct, fieldName, dst)
62 loadi constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PB, PC, 1], dst
65 macro getOperandWide32(opcodeStruct, fieldName, dst)
66 loadis constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PB, PC, 1], dst
69 macro makeReturn(get, dispatch, fn)
73 storeq t2, [cfr, t1, 8]
78 macro makeReturnProfiled(opcodeStruct, get, metadata, dispatch, fn)
82 valueProfile(opcodeStruct, t1, t3)
84 storeq t3, [cfr, t1, 8]
89 macro valueProfile(opcodeStruct, metadata, value)
90 storeq value, %opcodeStruct%::Metadata::m_profile.m_buckets[metadata]
93 macro dispatchAfterCall(size, opcodeStruct, dispatch)
94 loadi ArgumentCount + TagOffset[cfr], PC
95 loadp CodeBlock[cfr], PB
96 loadp CodeBlock::m_instructionsRawPointer[PB], PB
97 get(size, opcodeStruct, m_dst, t1)
98 storeq r0, [cfr, t1, 8]
99 metadata(size, opcodeStruct, t2, t1)
100 valueProfile(opcodeStruct, t2, r0)
104 macro cCall2(function)
105 checkStackPointerAlignment(t4, 0xbad0c002)
106 if X86_64 or ARM64 or ARM64E
109 # Note: this implementation is only correct if the return type size is > 8 bytes.
110 # See macro cCall2Void for an implementation when the return type <= 8 bytes.
111 # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
112 # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right,
113 # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument.
114 # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1)
115 # since the return value is expected to be split between the two.
116 # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
126 elsif C_LOOP or C_LOOP_WIN
127 cloopCallSlowPath function, a0, a1
133 macro cCall2Void(function)
134 if C_LOOP or C_LOOP_WIN
135 cloopCallSlowPathVoid function, a0, a1
137 # Note: we cannot use the cCall2 macro for Win64 in this case,
138 # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
139 # On Win64, rcx and rdx are used for passing the first two parameters.
140 # We also need to make room on the stack for all four parameter registers.
141 # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
150 # This barely works. arg3 and arg4 should probably be immediates.
151 macro cCall4(function)
152 checkStackPointerAlignment(t4, 0xbad0c004)
153 if X86_64 or ARM64 or ARM64E
156 # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
157 # We also need to make room on the stack for all four parameter registers.
158 # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
167 macro doVMEntry(makeCall)
173 const protoCallFrame = a2
175 vmEntryRecord(cfr, sp)
177 checkStackPointerAlignment(t4, 0xbad0dc01)
179 storep vm, VMEntryRecord::m_vm[sp]
180 loadp VM::topCallFrame[vm], t4
181 storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
182 loadp VM::topEntryFrame[vm], t4
183 storep t4, VMEntryRecord::m_prevTopEntryFrame[sp]
184 loadp ProtoCallFrame::calleeValue[protoCallFrame], t4
185 storep t4, VMEntryRecord::m_callee[sp]
187 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
188 addp CallFrameHeaderSlots, t4, t4
191 bqbeq sp, t3, .throwStackOverflow
193 # Ensure that we have enough additional stack capacity for the incoming args,
194 # and the frame for the JS code we're executing. We need to do this check
195 # before we start copying the args from the protoCallFrame below.
196 if C_LOOP or C_LOOP_WIN
197 bpaeq t3, VM::m_cloopStackLimit[vm], .stackHeightOK
200 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
201 bpeq t0, 0, .stackCheckFailed
209 jmp .throwStackOverflow
211 bpb t3, VM::m_softStackLimit[vm], .throwStackOverflow
216 move (constexpr ProtoCallFrame::numberOfRegisters), t3
219 # Copy the CodeBlock/Callee/ArgumentCount/|this| from protoCallFrame into the callee frame.
221 loadq [protoCallFrame, t3, 8], extraTempReg
222 storeq extraTempReg, CodeBlock[sp, t3, 8]
223 btinz t3, .copyHeaderLoop
225 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
227 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg
230 bieq t4, extraTempReg, .copyArgs
231 move ValueUndefined, t3
234 storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8]
235 bineq t4, extraTempReg, .fillExtraArgsLoop
238 loadp ProtoCallFrame::args[protoCallFrame], t3
241 btiz t4, .copyArgsDone
243 loadq [t3, t4, 8], extraTempReg
244 storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8]
250 storep t4, VM::topCallFrame[vm]
252 storep sp, VM::topCallFrame[vm]
254 storep cfr, VM::topEntryFrame[vm]
256 checkStackPointerAlignment(extraTempReg, 0xbad0dc02)
258 makeCall(entry, t3, t4)
260 # We may have just made a call into a JS function, so we can't rely on sp
261 # for anything but the fact that our own locals (ie the VMEntryRecord) are
262 # not below it. It also still has to be aligned, though.
263 checkStackPointerAlignment(t2, 0xbad0dc03)
265 vmEntryRecord(cfr, t4)
267 loadp VMEntryRecord::m_vm[t4], vm
268 loadp VMEntryRecord::m_prevTopCallFrame[t4], t2
269 storep t2, VM::topCallFrame[vm]
270 loadp VMEntryRecord::m_prevTopEntryFrame[t4], t2
271 storep t2, VM::topEntryFrame[vm]
273 subp cfr, CalleeRegisterSaveSize, sp
281 move protoCallFrame, a1
282 cCall2(_llint_throw_stack_overflow_error)
284 vmEntryRecord(cfr, t4)
286 loadp VMEntryRecord::m_vm[t4], vm
287 loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg
288 storep extraTempReg, VM::topCallFrame[vm]
289 loadp VMEntryRecord::m_prevTopEntryFrame[t4], extraTempReg
290 storep extraTempReg, VM::topEntryFrame[vm]
292 subp cfr, CalleeRegisterSaveSize, sp
300 macro makeJavaScriptCall(entry, temp, unused)
302 if C_LOOP or C_LOOP_WIN
303 cloopCallJSFunction entry
305 call entry, JSEntryPtrTag
310 macro makeHostFunctionCall(entry, temp, unused)
314 if C_LOOP or C_LOOP_WIN
318 # We need to allocate 32 bytes on the stack for the shadow space.
320 call temp, JSEntryPtrTag
323 call temp, JSEntryPtrTag
327 op(handleUncaughtException, macro ()
328 loadp Callee[cfr], t3
329 andp MarkedBlockMask, t3
330 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
331 restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
332 storep 0, VM::callFrameForCatch[t3]
334 loadp VM::topEntryFrame[t3], cfr
335 vmEntryRecord(cfr, t2)
337 loadp VMEntryRecord::m_vm[t2], t3
338 loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg
339 storep extraTempReg, VM::topCallFrame[t3]
340 loadp VMEntryRecord::m_prevTopEntryFrame[t2], extraTempReg
341 storep extraTempReg, VM::topEntryFrame[t3]
343 subp cfr, CalleeRegisterSaveSize, sp
351 macro prepareStateForCCall()
355 macro restoreStateAfterCCall()
360 macro callSlowPath(slowPath)
361 prepareStateForCCall()
365 restoreStateAfterCCall()
368 macro traceOperand(fromWhere, operand)
369 prepareStateForCCall()
374 cCall4(_llint_trace_operand)
375 restoreStateAfterCCall()
378 macro traceValue(fromWhere, operand)
379 prepareStateForCCall()
384 cCall4(_llint_trace_value)
385 restoreStateAfterCCall()
388 # Call a slow path for call call opcodes.
389 macro callCallSlowPath(slowPath, action)
390 storei PC, ArgumentCount + TagOffset[cfr]
391 prepareStateForCCall()
398 macro callTrapHandler(throwHandler)
399 storei PC, ArgumentCount + TagOffset[cfr]
400 prepareStateForCCall()
403 cCall2(_llint_slow_path_handle_traps)
404 btpnz r0, throwHandler
405 loadi ArgumentCount + TagOffset[cfr], PC
408 macro checkSwitchToJITForLoop()
412 storei PC, ArgumentCount + TagOffset[cfr]
413 prepareStateForCCall()
416 cCall2(_llint_loop_osr)
419 jmp r0, JSEntryPtrTag
421 loadi ArgumentCount + TagOffset[cfr], PC
425 macro cage(basePtr, mask, ptr, scratch)
426 if GIGACAGE_ENABLED and not (C_LOOP or C_LOOP_WIN)
427 loadp basePtr, scratch
435 macro cagedPrimitive(ptr, length, scratch, scratch2)
437 const source = scratch2
443 cage(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, source, scratch)
445 const numberOfPACBits = constexpr MacroAssembler::numberOfPACBits
446 bfiq scratch2, 0, 64 - numberOfPACBits, ptr
447 untagArrayPtr length, ptr
452 macro loadCagedJSValue(source, dest, scratchOrLength)
454 cage(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, dest, scratchOrLength)
457 macro loadVariable(get, fieldName, valueReg)
458 get(fieldName, valueReg)
459 loadq [cfr, valueReg, 8], valueReg
462 # Index and value must be different registers. Index may be clobbered.
463 macro loadConstantOrVariable(size, index, value)
465 bpgteq index, FirstConstantRegisterIndexNarrow, .constant
466 loadq [cfr, index, 8], value
469 loadp CodeBlock[cfr], value
470 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
471 loadq -(FirstConstantRegisterIndexNarrow * 8)[value, index, 8], value
476 bpgteq index, FirstConstantRegisterIndexWide16, .constant
477 loadq [cfr, index, 8], value
480 loadp CodeBlock[cfr], value
481 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
482 loadq -(FirstConstantRegisterIndexWide16 * 8)[value, index, 8], value
487 bpgteq index, FirstConstantRegisterIndexWide32, .constant
488 loadq [cfr, index, 8], value
491 loadp CodeBlock[cfr], value
492 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
493 subp FirstConstantRegisterIndexWide32, index
494 loadq [value, index, 8], value
498 size(loadNarrow, loadWide16, loadWide32, macro (load) load() end)
501 macro loadConstantOrVariableInt32(size, index, value, slow)
502 loadConstantOrVariable(size, index, value)
503 bqb value, tagTypeNumber, slow
506 macro loadConstantOrVariableCell(size, index, value, slow)
507 loadConstantOrVariable(size, index, value)
508 btqnz value, tagMask, slow
511 macro writeBarrierOnOperandWithReload(size, get, cellFieldName, reloadAfterSlowPath)
512 get(cellFieldName, t1)
513 loadConstantOrVariableCell(size, t1, t2, .writeBarrierDone)
514 skipIfIsRememberedOrInEden(
518 move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry)
520 cCall2Void(_llint_write_barrier_slow)
522 reloadAfterSlowPath()
527 macro writeBarrierOnOperand(size, get, cellFieldName)
528 writeBarrierOnOperandWithReload(size, get, cellFieldName, macro () end)
531 macro writeBarrierOnOperands(size, get, cellFieldName, valueFieldName)
532 get(valueFieldName, t1)
533 loadConstantOrVariableCell(size, t1, t0, .writeBarrierDone)
534 btpz t0, .writeBarrierDone
536 writeBarrierOnOperand(size, get, cellFieldName)
540 macro writeBarrierOnGlobal(size, get, valueFieldName, loadMacro)
541 get(valueFieldName, t1)
542 loadConstantOrVariableCell(size, t1, t0, .writeBarrierDone)
543 btpz t0, .writeBarrierDone
546 skipIfIsRememberedOrInEden(
552 cCall2Void(_llint_write_barrier_slow)
558 macro writeBarrierOnGlobalObject(size, get, valueFieldName)
559 writeBarrierOnGlobal(size, get, valueFieldName,
560 macro(registerToStoreGlobal)
561 loadp CodeBlock[cfr], registerToStoreGlobal
562 loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
566 macro writeBarrierOnGlobalLexicalEnvironment(size, get, valueFieldName)
567 writeBarrierOnGlobal(size, get, valueFieldName,
568 macro(registerToStoreGlobal)
569 loadp CodeBlock[cfr], registerToStoreGlobal
570 loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
571 loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
575 macro structureIDToStructureWithScratch(structureIDThenStructure, scratch, scratch2)
576 loadp CodeBlock[cfr], scratch
577 move structureIDThenStructure, scratch2
578 loadp CodeBlock::m_vm[scratch], scratch
579 rshifti NumberOfStructureIDEntropyBits, scratch2
580 loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
581 loadp [scratch, scratch2, PtrSize], scratch2
582 lshiftp StructureEntropyBitsShift, structureIDThenStructure
583 xorp scratch2, structureIDThenStructure
586 macro loadStructureWithScratch(cell, structure, scratch, scratch2)
587 loadi JSCell::m_structureID[cell], structure
588 structureIDToStructureWithScratch(structure, scratch, scratch2)
591 # Entrypoints into the interpreter.
593 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
594 macro functionArityCheck(doneLabel, slowPath)
595 loadi PayloadOffset + ArgumentCount[cfr], t0
596 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
597 prepareStateForCCall()
600 cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error
603 # We're throwing before the frame is fully set up. This frame will be
604 # ignored by the unwinder. So, let's restore the callee saves before we
605 # start unwinding. We need to do this before we change the cfr.
606 restoreCalleeSavesUsedByLLInt()
608 move r1, cfr # r1 contains caller frame
609 jmp _llint_throw_from_slow_path_trampoline
612 move r1, t1 # r1 contains slotsToAdd.
614 loadi PayloadOffset + ArgumentCount[cfr], t2
615 addi CallFrameHeaderSlots, t2
617 // Check if there are some unaligned slots we can use
619 andi StackAlignmentSlots - 1, t3
620 btiz t3, .noExtraSlot
621 move ValueUndefined, t0
623 storeq t0, [cfr, t2, 8]
625 bsubinz 1, t3, .fillExtraSlots
626 andi ~(StackAlignmentSlots - 1), t1
633 untagReturnAddress t3
636 // Move frame up t1 slots
639 subp CalleeSaveSpaceAsVirtualRegisters * 8, t3
640 addi CalleeSaveSpaceAsVirtualRegisters, t2
642 # Adds to sp are always 64-bit on arm64 so we need maintain t0's high bits.
648 storeq t0, [t3, t1, 8]
650 bsubinz 1, t2, .copyLoop
652 // Fill new slots with JSUndefined
654 move ValueUndefined, t0
656 storeq t0, [t3, t1, 8]
658 baddinz 1, t2, .fillLoop
667 # Reload CodeBlock and reset PC, since the slow_path clobbered them.
668 loadp CodeBlock[cfr], t1
669 loadp CodeBlock::m_instructionsRawPointer[t1], PB
674 macro branchIfException(label)
675 loadp Callee[cfr], t3
676 andp MarkedBlockMask, t3
677 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
678 btpz VM::m_exception[t3], .noException
683 # Instruction implementations
686 checkStackPointerAlignment(t2, 0xdead00e1)
687 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
688 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
689 subq CalleeSaveSpaceAsVirtualRegisters, t2
691 subq CalleeSaveSpaceAsVirtualRegisters * 8, t1
692 btiz t2, .opEnterDone
693 move ValueUndefined, t0
697 storeq t0, [t1, t2, 8]
699 btqnz t2, .opEnterLoop
701 callSlowPath(_slow_path_enter)
702 dispatchOp(narrow, op_enter)
705 llintOpWithProfile(op_get_argument, OpGetArgument, macro (size, get, dispatch, return)
707 loadi PayloadOffset + ArgumentCount[cfr], t0
708 bilteq t0, t2, .opGetArgumentOutOfBounds
709 loadq ThisArgumentOffset[cfr, t2, 8], t0
712 .opGetArgumentOutOfBounds:
713 return(ValueUndefined)
717 llintOpWithReturn(op_argument_count, OpArgumentCount, macro (size, get, dispatch, return)
718 loadi PayloadOffset + ArgumentCount[cfr], t0
720 orq TagTypeNumber, t0
725 llintOpWithReturn(op_get_scope, OpGetScope, macro (size, get, dispatch, return)
726 loadp Callee[cfr], t0
727 loadp JSCallee::m_scope[t0], t0
732 llintOpWithMetadata(op_to_this, OpToThis, macro (size, get, dispatch, metadata, return)
734 loadq [cfr, t0, 8], t0
735 btqnz t0, tagMask, .opToThisSlow
736 bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
737 loadi JSCell::m_structureID[t0], t1
739 loadi OpToThis::Metadata::m_cachedStructureID[t2], t2
740 bineq t1, t2, .opToThisSlow
744 callSlowPath(_slow_path_to_this)
749 llintOp(op_check_tdz, OpCheckTdz, macro (size, get, dispatch)
750 get(m_targetVirtualRegister, t0)
751 loadConstantOrVariable(size, t0, t1)
752 bqneq t1, ValueEmpty, .opNotTDZ
753 callSlowPath(_slow_path_throw_tdz_error)
760 llintOpWithReturn(op_mov, OpMov, macro (size, get, dispatch, return)
762 loadConstantOrVariable(size, t1, t2)
767 llintOpWithReturn(op_not, OpNot, macro (size, get, dispatch, return)
769 loadConstantOrVariable(size, t0, t2)
771 btqnz t2, ~1, .opNotSlow
776 callSlowPath(_slow_path_not)
781 macro equalityComparisonOp(opcodeName, opcodeStruct, integerComparison)
782 llintOpWithReturn(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, return)
785 loadConstantOrVariableInt32(size, t0, t1, .slow)
786 loadConstantOrVariableInt32(size, t2, t0, .slow)
787 integerComparison(t0, t1, t0)
792 callSlowPath(_slow_path_%opcodeName%)
798 macro equalNullComparisonOp(opcodeName, opcodeStruct, fn)
799 llintOpWithReturn(opcodeName, opcodeStruct, macro (size, get, dispatch, return)
801 loadq [cfr, t0, 8], t0
802 btqnz t0, tagMask, .immediate
803 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
806 .masqueradesAsUndefined:
807 loadStructureWithScratch(t0, t2, t1, t3)
808 loadp CodeBlock[cfr], t0
809 loadp CodeBlock::m_globalObject[t0], t0
810 cpeq Structure::m_globalObject[t2], t0, t0
813 andq ~TagBitUndefined, t0
814 cqeq t0, ValueNull, t0
821 equalNullComparisonOp(op_eq_null, OpEqNull,
822 macro (value) orq ValueFalse, value end)
825 equalNullComparisonOp(op_neq_null, OpNeqNull,
826 macro (value) xorq ValueTrue, value end)
829 llintOpWithReturn(op_is_undefined_or_null, OpIsUndefinedOrNull, macro (size, get, dispatch, return)
831 loadq [cfr, t0, 8], t0
832 andq ~TagBitUndefined, t0
833 cqeq t0, ValueNull, t0
839 macro strictEqOp(opcodeName, opcodeStruct, equalityOperation)
840 llintOpWithReturn(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, return)
843 loadConstantOrVariable(size, t0, t1)
844 loadConstantOrVariable(size, t2, t0)
847 btqz t2, tagMask, .slow
848 bqaeq t0, tagTypeNumber, .leftOK
849 btqnz t0, tagTypeNumber, .slow
851 bqaeq t1, tagTypeNumber, .rightOK
852 btqnz t1, tagTypeNumber, .slow
854 equalityOperation(t0, t1, t0)
859 callSlowPath(_slow_path_%opcodeName%)
865 strictEqOp(stricteq, OpStricteq,
866 macro (left, right, result) cqeq left, right, result end)
869 strictEqOp(nstricteq, OpNstricteq,
870 macro (left, right, result) cqneq left, right, result end)
873 macro strictEqualityJumpOp(opcodeName, opcodeStruct, equalityOperation)
874 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
877 loadConstantOrVariable(size, t2, t0)
878 loadConstantOrVariable(size, t3, t1)
881 btqz t2, tagMask, .slow
882 bqaeq t0, tagTypeNumber, .leftOK
883 btqnz t0, tagTypeNumber, .slow
885 bqaeq t1, tagTypeNumber, .rightOK
886 btqnz t1, tagTypeNumber, .slow
888 equalityOperation(t0, t1, .jumpTarget)
895 callSlowPath(_llint_slow_path_%opcodeName%)
901 strictEqualityJumpOp(jstricteq, OpJstricteq,
902 macro (left, right, target) bqeq left, right, target end)
905 strictEqualityJumpOp(jnstricteq, OpJnstricteq,
906 macro (left, right, target) bqneq left, right, target end)
909 macro preOp(opcodeName, opcodeStruct, arithmeticOperation)
910 llintOp(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch)
912 loadq [cfr, t0, 8], t1
913 bqb t1, tagTypeNumber, .slow
914 arithmeticOperation(t1, .slow)
915 orq tagTypeNumber, t1
916 storeq t1, [cfr, t0, 8]
919 callSlowPath(_slow_path_%opcodeName%)
924 llintOpWithProfile(op_to_number, OpToNumber, macro (size, get, dispatch, return)
926 loadConstantOrVariable(size, t0, t2)
927 bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
928 btqz t2, tagTypeNumber, .opToNumberSlow
929 .opToNumberIsImmediate:
933 callSlowPath(_slow_path_to_number)
938 llintOpWithReturn(op_to_string, OpToString, macro (size, get, dispatch, return)
940 loadConstantOrVariable(size, t1, t0)
941 btqnz t0, tagMask, .opToStringSlow
942 bbneq JSCell::m_type[t0], StringType, .opToStringSlow
947 callSlowPath(_slow_path_to_string)
952 llintOpWithProfile(op_to_object, OpToObject, macro (size, get, dispatch, return)
954 loadConstantOrVariable(size, t0, t2)
955 btqnz t2, tagMask, .opToObjectSlow
956 bbb JSCell::m_type[t2], ObjectType, .opToObjectSlow
960 callSlowPath(_slow_path_to_object)
965 llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, return)
967 loadConstantOrVariable(size, t0, t3)
969 loadi OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1], t2
970 bqb t3, tagTypeNumber, .opNegateNotInt
971 btiz t3, 0x7fffffff, .opNegateSlow
973 orq tagTypeNumber, t3
974 ori ArithProfileInt, t2
975 storei t2, OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1]
978 btqz t3, tagTypeNumber, .opNegateSlow
979 xorq 0x8000000000000000, t3
980 ori ArithProfileNumber, t2
981 storei t2, OpNegate::Metadata::m_arithProfile + ArithProfile::m_bits[t1]
985 callSlowPath(_slow_path_negate)
990 macro binaryOpCustomStore(opcodeName, opcodeStruct, integerOperationAndStore, doubleOperation)
991 llintOpWithMetadata(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, metadata, return)
995 ori type, %opcodeStruct%::Metadata::m_arithProfile + ArithProfile::m_bits[t5]
1000 loadConstantOrVariable(size, t0, t1)
1001 loadConstantOrVariable(size, t2, t0)
1002 bqb t0, tagTypeNumber, .op1NotInt
1003 bqb t1, tagTypeNumber, .op2NotInt
1005 integerOperationAndStore(t1, t0, .slow, t2)
1007 profile(ArithProfileIntInt)
1011 # First operand is definitely not an int, the second operand could be anything.
1012 btqz t0, tagTypeNumber, .slow
1013 bqaeq t1, tagTypeNumber, .op1NotIntOp2Int
1014 btqz t1, tagTypeNumber, .slow
1015 addq tagTypeNumber, t1
1017 profile(ArithProfileNumberNumber)
1020 profile(ArithProfileNumberInt)
1024 addq tagTypeNumber, t0
1026 doubleOperation(ft1, ft0)
1028 subq tagTypeNumber, t0
1029 storeq t0, [cfr, t2, 8]
1033 # First operand is definitely an int, the second is definitely not.
1035 btqz t1, tagTypeNumber, .slow
1036 profile(ArithProfileIntNumber)
1038 addq tagTypeNumber, t1
1040 doubleOperation(ft1, ft0)
1042 subq tagTypeNumber, t0
1043 storeq t0, [cfr, t2, 8]
1047 callSlowPath(_slow_path_%opcodeName%)
1052 if X86_64 or X86_64_WIN
1053 binaryOpCustomStore(div, OpDiv,
1054 macro (left, right, slow, index)
1055 # Assume t3 is scratchable.
1057 bineq left, -1, .notNeg2TwoThe31DivByNeg1
1058 bieq right, -2147483648, .slow
1059 .notNeg2TwoThe31DivByNeg1:
1068 orq tagTypeNumber, t0
1069 storeq t0, [cfr, index, 8]
1071 macro (left, right) divd left, right end)
1077 binaryOpCustomStore(mul, OpMul,
1078 macro (left, right, slow, index)
1079 # Assume t3 is scratchable.
1081 bmulio left, t3, slow
1086 orq tagTypeNumber, t3
1087 storeq t3, [cfr, index, 8]
1089 macro (left, right) muld left, right end)
1092 macro binaryOp(opcodeName, opcodeStruct, integerOperation, doubleOperation)
1093 binaryOpCustomStore(opcodeName, opcodeStruct,
1094 macro (left, right, slow, index)
1095 integerOperation(left, right, slow)
1096 orq tagTypeNumber, right
1097 storeq right, [cfr, index, 8]
1102 binaryOp(add, OpAdd,
1103 macro (left, right, slow) baddio left, right, slow end,
1104 macro (left, right) addd left, right end)
1107 binaryOp(sub, OpSub,
1108 macro (left, right, slow) bsubio left, right, slow end,
1109 macro (left, right) subd left, right end)
1112 llintOpWithReturn(op_unsigned, OpUnsigned, macro (size, get, dispatch, return)
1114 loadConstantOrVariable(size, t1, t2)
1115 bilt t2, 0, .opUnsignedSlow
1118 callSlowPath(_slow_path_unsigned)
1123 macro commonBitOp(opKind, opcodeName, opcodeStruct, operation)
1124 opKind(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, return)
1127 loadConstantOrVariable(size, t0, t1)
1128 loadConstantOrVariable(size, t2, t0)
1129 bqb t0, tagTypeNumber, .slow
1130 bqb t1, tagTypeNumber, .slow
1132 orq tagTypeNumber, t0
1136 callSlowPath(_slow_path_%opcodeName%)
1141 macro bitOp(opcodeName, opcodeStruct, operation)
1142 commonBitOp(llintOpWithReturn, opcodeName, opcodeStruct, operation)
1145 macro bitOpProfiled(opcodeName, opcodeStruct, operation)
1146 commonBitOp(llintOpWithProfile, opcodeName, opcodeStruct, operation)
1149 bitOp(lshift, OpLshift,
1150 macro (left, right) lshifti left, right end)
1153 bitOp(rshift, OpRshift,
1154 macro (left, right) rshifti left, right end)
1157 bitOp(urshift, OpUrshift,
1158 macro (left, right) urshifti left, right end)
1160 bitOpProfiled(bitand, OpBitand,
1161 macro (left, right) andi left, right end)
1163 bitOpProfiled(bitor, OpBitor,
1164 macro (left, right) ori left, right end)
1166 bitOpProfiled(bitxor, OpBitxor,
1167 macro (left, right) xori left, right end)
1169 llintOpWithProfile(op_bitnot, OpBitnot, macro (size, get, dispatch, return)
1171 loadConstantOrVariableInt32(size, t0, t3, .opBitNotSlow)
1173 orq tagTypeNumber, t3
1176 callSlowPath(_slow_path_bitnot)
1181 llintOp(op_overrides_has_instance, OpOverridesHasInstance, macro (size, get, dispatch)
1184 get(m_hasInstanceValue, t1)
1185 loadConstantOrVariable(size, t1, t0)
1186 loadp CodeBlock[cfr], t2
1187 loadp CodeBlock::m_globalObject[t2], t2
1188 loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2
1189 bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol
1191 get(m_constructor, t1)
1192 loadConstantOrVariable(size, t1, t0)
1193 tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1
1195 storeq t1, [cfr, t3, 8]
1198 .opOverridesHasInstanceNotDefaultSymbol:
1199 storeq ValueTrue, [cfr, t3, 8]
1204 llintOpWithReturn(op_is_empty, OpIsEmpty, macro (size, get, dispatch, return)
1206 loadConstantOrVariable(size, t1, t0)
1207 cqeq t0, ValueEmpty, t3
1213 llintOpWithReturn(op_is_undefined, OpIsUndefined, macro (size, get, dispatch, return)
1215 loadConstantOrVariable(size, t1, t0)
1216 btqz t0, tagMask, .opIsUndefinedCell
1217 cqeq t0, ValueUndefined, t3
1221 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
1224 .masqueradesAsUndefined:
1225 loadStructureWithScratch(t0, t3, t1, t2)
1226 loadp CodeBlock[cfr], t1
1227 loadp CodeBlock::m_globalObject[t1], t1
1228 cpeq Structure::m_globalObject[t3], t1, t0
1234 llintOpWithReturn(op_is_boolean, OpIsBoolean, macro (size, get, dispatch, return)
1236 loadConstantOrVariable(size, t1, t0)
1244 llintOpWithReturn(op_is_number, OpIsNumber, macro (size, get, dispatch, return)
1246 loadConstantOrVariable(size, t1, t0)
1247 tqnz t0, tagTypeNumber, t1
1253 llintOpWithReturn(op_is_cell_with_type, OpIsCellWithType, macro (size, get, dispatch, return)
1254 getu(size, OpIsCellWithType, m_type, t0)
1256 loadConstantOrVariable(size, t1, t3)
1257 btqnz t3, tagMask, .notCellCase
1258 cbeq JSCell::m_type[t3], t0, t1
1266 llintOpWithReturn(op_is_object, OpIsObject, macro (size, get, dispatch, return)
1268 loadConstantOrVariable(size, t1, t0)
1269 btqnz t0, tagMask, .opIsObjectNotCell
1270 cbaeq JSCell::m_type[t0], ObjectType, t1
1278 macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1279 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1280 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1281 negi propertyOffsetAsInt
1282 sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1285 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1287 loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
1291 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1292 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1293 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1294 negi propertyOffsetAsInt
1295 sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1298 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1300 storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1304 llintOpWithMetadata(op_get_by_id_direct, OpGetByIdDirect, macro (size, get, dispatch, metadata, return)
1307 loadConstantOrVariableCell(size, t0, t3, .opGetByIdDirectSlow)
1308 loadi JSCell::m_structureID[t3], t1
1309 loadi OpGetByIdDirect::Metadata::m_structureID[t2], t0
1310 bineq t0, t1, .opGetByIdDirectSlow
1311 loadi OpGetByIdDirect::Metadata::m_offset[t2], t1
1312 loadPropertyAtVariableOffset(t1, t3, t0)
1313 valueProfile(OpGetByIdDirect, t2, t0)
1316 .opGetByIdDirectSlow:
1317 callSlowPath(_llint_slow_path_get_by_id_direct)
1322 llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadata, return)
1324 loadb OpGetById::Metadata::m_modeMetadata.mode[t2], t1
1326 loadConstantOrVariableCell(size, t0, t3, .opGetByIdSlow)
1329 bbneq t1, constexpr GetByIdMode::Default, .opGetByIdProtoLoad
1330 loadi JSCell::m_structureID[t3], t1
1331 loadi OpGetById::Metadata::m_modeMetadata.defaultMode.structureID[t2], t0
1332 bineq t0, t1, .opGetByIdSlow
1333 loadis OpGetById::Metadata::m_modeMetadata.defaultMode.cachedOffset[t2], t1
1334 loadPropertyAtVariableOffset(t1, t3, t0)
1335 valueProfile(OpGetById, t2, t0)
1338 .opGetByIdProtoLoad:
1339 bbneq t1, constexpr GetByIdMode::ProtoLoad, .opGetByIdArrayLength
1340 loadi JSCell::m_structureID[t3], t1
1341 loadi OpGetById::Metadata::m_modeMetadata.protoLoadMode.structureID[t2], t3
1342 bineq t3, t1, .opGetByIdSlow
1343 loadis OpGetById::Metadata::m_modeMetadata.protoLoadMode.cachedOffset[t2], t1
1344 loadp OpGetById::Metadata::m_modeMetadata.protoLoadMode.cachedSlot[t2], t3
1345 loadPropertyAtVariableOffset(t1, t3, t0)
1346 valueProfile(OpGetById, t2, t0)
1349 .opGetByIdArrayLength:
1350 bbneq t1, constexpr GetByIdMode::ArrayLength, .opGetByIdUnset
1352 arrayProfile(OpGetById::Metadata::m_modeMetadata.arrayLengthMode.arrayProfile, t0, t2, t5)
1353 btiz t0, IsArray, .opGetByIdSlow
1354 btiz t0, IndexingShapeMask, .opGetByIdSlow
1355 loadCagedJSValue(JSObject::m_butterfly[t3], t0, t1)
1356 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
1357 bilt t0, 0, .opGetByIdSlow
1358 orq tagTypeNumber, t0
1359 valueProfile(OpGetById, t2, t0)
1363 loadi JSCell::m_structureID[t3], t1
1364 loadi OpGetById::Metadata::m_modeMetadata.unsetMode.structureID[t2], t0
1365 bineq t0, t1, .opGetByIdSlow
1366 valueProfile(OpGetById, t2, ValueUndefined)
1367 return(ValueUndefined)
1370 callSlowPath(_llint_slow_path_get_by_id)
1375 llintOpWithMetadata(op_put_by_id, OpPutById, macro (size, get, dispatch, metadata, return)
1377 loadConstantOrVariableCell(size, t3, t0, .opPutByIdSlow)
1379 loadi OpPutById::Metadata::m_oldStructureID[t5], t2
1380 bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow
1382 # At this point, we have:
1384 # t2 -> current structure ID
1387 loadi OpPutById::Metadata::m_newStructureID[t5], t1
1388 btiz t1, .opPutByIdNotTransition
1390 # This is the transition case. t1 holds the new structureID. t2 holds the old structure ID.
1391 # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as
1393 loadp OpPutById::Metadata::m_structureChain[t5], t3
1394 btpz t3, .opPutByIdTransitionDirect
1396 structureIDToStructureWithScratch(t2, t1, t3)
1398 # reload the StructureChain since we used t3 as a scratch above
1399 loadp OpPutById::Metadata::m_structureChain[t5], t3
1401 loadp StructureChain::m_vector[t3], t3
1402 assert(macro (ok) btpnz t3, ok end)
1404 loadq Structure::m_prototype[t2], t2
1405 bqeq t2, ValueNull, .opPutByIdTransitionChainDone
1406 .opPutByIdTransitionChainLoop:
1407 # At this point, t2 contains a prototye, and [t3] contains the Structure* that we want that
1408 # prototype to have. We don't want to have to load the Structure* for t2. Instead, we load
1409 # the Structure* from [t3], and then we compare its id to the id in the header of t2.
1411 loadi JSCell::m_structureID[t2], t2
1412 # Now, t1 has the Structure* and t2 has the StructureID that we want that Structure* to have.
1413 bineq t2, Structure::m_blob + StructureIDBlob::u.fields.structureID[t1], .opPutByIdSlow
1415 loadq Structure::m_prototype[t1], t2
1416 bqneq t2, ValueNull, .opPutByIdTransitionChainLoop
1418 .opPutByIdTransitionChainDone:
1419 # Reload the new structure, since we clobbered it above.
1420 loadi OpPutById::Metadata::m_newStructureID[t5], t1
1422 .opPutByIdTransitionDirect:
1423 storei t1, JSCell::m_structureID[t0]
1424 writeBarrierOnOperandWithReload(size, get, m_base, macro ()
1425 # Reload metadata into t5
1427 # Reload base into t0
1429 loadConstantOrVariable(size, t1, t0)
1432 .opPutByIdNotTransition:
1433 # The only thing live right now is t0, which holds the base.
1435 loadConstantOrVariable(size, t1, t2)
1436 loadi OpPutById::Metadata::m_offset[t5], t1
1437 storePropertyAtVariableOffset(t1, t0, t2)
1438 writeBarrierOnOperands(size, get, m_base, m_value)
1442 callSlowPath(_llint_slow_path_put_by_id)
1447 llintOpWithMetadata(op_get_by_val, OpGetByVal, macro (size, get, dispatch, metadata, return)
1448 macro finishGetByVal(result, scratch)
1450 storeq result, [cfr, scratch, 8]
1451 valueProfile(OpGetByVal, t5, result)
1455 macro finishIntGetByVal(result, scratch)
1456 orq tagTypeNumber, result
1457 finishGetByVal(result, scratch)
1460 macro finishDoubleGetByVal(result, scratch1, scratch2)
1461 fd2q result, scratch1
1462 subq tagTypeNumber, scratch1
1463 finishGetByVal(scratch1, scratch2)
1469 loadConstantOrVariableCell(size, t2, t0, .opGetByValSlow)
1472 arrayProfile(OpGetByVal::Metadata::m_arrayProfile, t2, t5, t1)
1475 loadConstantOrVariableInt32(size, t3, t1, .opGetByValSlow)
1478 loadCagedJSValue(JSObject::m_butterfly[t0], t3, tagTypeNumber)
1479 move TagTypeNumber, tagTypeNumber
1481 andi IndexingShapeMask, t2
1482 bieq t2, Int32Shape, .opGetByValIsContiguous
1483 bineq t2, ContiguousShape, .opGetByValNotContiguous
1485 .opGetByValIsContiguous:
1486 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValSlow
1488 loadq [t3, t1, 8], t2
1489 btqz t2, .opGetByValSlow
1492 .opGetByValNotContiguous:
1493 bineq t2, DoubleShape, .opGetByValNotDouble
1494 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValSlow
1496 loadd [t3, t1, 8], ft0
1497 bdnequn ft0, ft0, .opGetByValSlow
1499 subq tagTypeNumber, t2
1502 .opGetByValNotDouble:
1503 subi ArrayStorageShape, t2
1504 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValNotIndexedStorage
1505 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValSlow
1507 loadq ArrayStorage::m_vector[t3, t1, 8], t2
1508 btqz t2, .opGetByValSlow
1511 storeq t2, [cfr, t0, 8]
1512 valueProfile(OpGetByVal, t5, t2)
1515 .opGetByValNotIndexedStorage:
1516 # First lets check if we even have a typed array. This lets us do some boilerplate up front.
1517 loadb JSCell::m_type[t0], t2
1518 subi FirstTypedArrayType, t2
1519 biaeq t2, NumberOfTypedArrayTypesExcludingDataView, .opGetByValSlow
1521 # Sweet, now we know that we have a typed array. Do some basic things now.
1526 loadi JSArrayBufferView::m_length[t0], length
1527 biaeq t1, length, .opGetByValSlow
1529 # length and scratch are intentionally undefined on this branch because they are not used on other platforms.
1530 biaeq t1, JSArrayBufferView::m_length[t0], .opGetByValSlow
1533 loadp JSArrayBufferView::m_vector[t0], t3
1534 cagedPrimitive(t3, length, t0, scratch)
1536 # Now bisect through the various types:
1539 # Uint8ClampedArrayType,
1547 bia t2, Uint16ArrayType - FirstTypedArrayType, .opGetByValAboveUint16Array
1549 # We have one of Int8ArrayType .. Uint16ArrayType.
1550 bia t2, Uint8ClampedArrayType - FirstTypedArrayType, .opGetByValInt16ArrayOrUint16Array
1552 # We have one of Int8ArrayType ... Uint8ClampedArrayType
1553 bia t2, Int8ArrayType - FirstTypedArrayType, .opGetByValUint8ArrayOrUint8ClampedArray
1555 # We have Int8ArrayType.
1556 loadbsi [t3, t1], t0
1557 finishIntGetByVal(t0, t1)
1559 .opGetByValUint8ArrayOrUint8ClampedArray:
1560 bia t2, Uint8ArrayType - FirstTypedArrayType, .opGetByValUint8ClampedArray
1562 # We have Uint8ArrayType.
1564 finishIntGetByVal(t0, t1)
1566 .opGetByValUint8ClampedArray:
1567 # We have Uint8ClampedArrayType.
1569 finishIntGetByVal(t0, t1)
1571 .opGetByValInt16ArrayOrUint16Array:
1572 # We have either Int16ArrayType or Uint16ClampedArrayType.
1573 bia t2, Int16ArrayType - FirstTypedArrayType, .opGetByValUint16Array
1575 # We have Int16ArrayType.
1576 loadhsi [t3, t1, 2], t0
1577 finishIntGetByVal(t0, t1)
1579 .opGetByValUint16Array:
1580 # We have Uint16ArrayType.
1581 loadh [t3, t1, 2], t0
1582 finishIntGetByVal(t0, t1)
1584 .opGetByValAboveUint16Array:
1585 # We have one of Int32ArrayType .. Float64ArrayType.
1586 bia t2, Uint32ArrayType - FirstTypedArrayType, .opGetByValFloat32ArrayOrFloat64Array
1588 # We have either Int32ArrayType or Uint32ArrayType
1589 bia t2, Int32ArrayType - FirstTypedArrayType, .opGetByValUint32Array
1591 # We have Int32ArrayType.
1592 loadi [t3, t1, 4], t0
1593 finishIntGetByVal(t0, t1)
1595 .opGetByValUint32Array:
1596 # We have Uint32ArrayType.
1597 # This is the hardest part because of large unsigned values.
1598 loadi [t3, t1, 4], t0
1599 bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
1600 finishIntGetByVal(t0, t1)
1602 .opGetByValFloat32ArrayOrFloat64Array:
1603 # We have one of Float32ArrayType or Float64ArrayType. Sadly, we cannot handle Float32Array
1604 # inline yet. That would require some offlineasm changes.
1605 bieq t2, Float32ArrayType - FirstTypedArrayType, .opGetByValSlow
1607 # We have Float64ArrayType.
1608 loadd [t3, t1, 8], ft0
1609 bdnequn ft0, ft0, .opGetByValSlow
1610 finishDoubleGetByVal(ft0, t0, t1)
1613 callSlowPath(_llint_slow_path_get_by_val)
1618 macro putByValOp(opcodeName, opcodeStruct)
1619 llintOpWithMetadata(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, metadata, return)
1620 macro contiguousPutByVal(storeCallback)
1621 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
1624 storeCallback(t2, t1, [t0, t3, 8])
1628 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1629 storeb 1, %opcodeStruct%::Metadata::m_arrayProfile.m_mayStoreToHole[t5]
1631 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1636 loadConstantOrVariableCell(size, t0, t1, .opPutByValSlow)
1639 arrayProfile(%opcodeStruct%::Metadata::m_arrayProfile, t2, t5, t0)
1641 loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow)
1643 loadCagedJSValue(JSObject::m_butterfly[t1], t0, tagTypeNumber)
1644 move TagTypeNumber, tagTypeNumber
1645 btinz t2, CopyOnWrite, .opPutByValSlow
1646 andi IndexingShapeMask, t2
1647 bineq t2, Int32Shape, .opPutByValNotInt32
1649 macro (operand, scratch, address)
1650 loadConstantOrVariable(size, operand, scratch)
1651 bqb scratch, tagTypeNumber, .opPutByValSlow
1652 storeq scratch, address
1653 writeBarrierOnOperands(size, get, m_base, m_value)
1656 .opPutByValNotInt32:
1657 bineq t2, DoubleShape, .opPutByValNotDouble
1659 macro (operand, scratch, address)
1660 loadConstantOrVariable(size, operand, scratch)
1661 bqb scratch, tagTypeNumber, .notInt
1665 addq tagTypeNumber, scratch
1667 bdnequn ft0, ft0, .opPutByValSlow
1670 writeBarrierOnOperands(size, get, m_base, m_value)
1673 .opPutByValNotDouble:
1674 bineq t2, ContiguousShape, .opPutByValNotContiguous
1676 macro (operand, scratch, address)
1677 loadConstantOrVariable(size, operand, scratch)
1678 storeq scratch, address
1679 writeBarrierOnOperands(size, get, m_base, m_value)
1682 .opPutByValNotContiguous:
1683 bineq t2, ArrayStorageShape, .opPutByValSlow
1684 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1685 btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
1686 .opPutByValArrayStorageStoreResult:
1688 loadConstantOrVariable(size, t2, t1)
1689 storeq t1, ArrayStorage::m_vector[t0, t3, 8]
1690 writeBarrierOnOperands(size, get, m_base, m_value)
1693 .opPutByValArrayStorageEmpty:
1694 storeb 1, %opcodeStruct%::Metadata::m_arrayProfile.m_mayStoreToHole[t5]
1695 addi 1, ArrayStorage::m_numValuesInVector[t0]
1696 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
1698 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1699 jmp .opPutByValArrayStorageStoreResult
1701 .opPutByValOutOfBounds:
1702 storeb 1, %opcodeStruct%::Metadata::m_arrayProfile.m_outOfBounds[t5]
1704 callSlowPath(_llint_slow_path_%opcodeName%)
1709 putByValOp(put_by_val, OpPutByVal)
1711 putByValOp(put_by_val_direct, OpPutByValDirect)
1714 macro llintJumpTrueOrFalseOp(opcodeName, opcodeStruct, conditionOp)
1715 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
1716 get(m_condition, t1)
1717 loadConstantOrVariable(size, t1, t0)
1718 btqnz t0, ~0xf, .slow
1719 conditionOp(t0, .target)
1726 callSlowPath(_llint_slow_path_%opcodeName%)
1732 macro equalNullJumpOp(opcodeName, opcodeStruct, cellHandler, immediateHandler)
1733 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
1735 assertNotConstant(size, t0)
1736 loadq [cfr, t0, 8], t0
1737 btqnz t0, tagMask, .immediate
1738 loadStructureWithScratch(t0, t2, t1, t3)
1739 cellHandler(t2, JSCell::m_flags[t0], .target)
1746 andq ~TagBitUndefined, t0
1747 immediateHandler(t0, .target)
1752 equalNullJumpOp(jeq_null, OpJeqNull,
1753 macro (structure, value, target)
1754 btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined
1755 loadp CodeBlock[cfr], t0
1756 loadp CodeBlock::m_globalObject[t0], t0
1757 bpeq Structure::m_globalObject[structure], t0, target
1758 .notMasqueradesAsUndefined:
1760 macro (value, target) bqeq value, ValueNull, target end)
1763 equalNullJumpOp(jneq_null, OpJneqNull,
1764 macro (structure, value, target)
1765 btbz value, MasqueradesAsUndefined, target
1766 loadp CodeBlock[cfr], t0
1767 loadp CodeBlock::m_globalObject[t0], t0
1768 bpneq Structure::m_globalObject[structure], t0, target
1770 macro (value, target) bqneq value, ValueNull, target end)
1773 llintOpWithMetadata(op_jneq_ptr, OpJneqPtr, macro (size, get, dispatch, metadata, return)
1775 getu(size, OpJneqPtr, m_specialPointer, t1)
1776 loadp CodeBlock[cfr], t2
1777 loadp CodeBlock::m_globalObject[t2], t2
1778 loadp JSGlobalObject::m_specialPointers[t2, t1, PtrSize], t1
1779 bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
1784 storeb 1, OpJneqPtr::Metadata::m_hasJumped[t5]
1785 get(m_targetLabel, t0)
1790 macro compareJumpOp(opcodeName, opcodeStruct, integerCompare, doubleCompare)
1791 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
1794 loadConstantOrVariable(size, t2, t0)
1795 loadConstantOrVariable(size, t3, t1)
1796 bqb t0, tagTypeNumber, .op1NotInt
1797 bqb t1, tagTypeNumber, .op2NotInt
1798 integerCompare(t0, t1, .jumpTarget)
1802 btqz t0, tagTypeNumber, .slow
1803 bqb t1, tagTypeNumber, .op1NotIntOp2NotInt
1806 .op1NotIntOp2NotInt:
1807 btqz t1, tagTypeNumber, .slow
1808 addq tagTypeNumber, t1
1811 addq tagTypeNumber, t0
1813 doubleCompare(ft0, ft1, .jumpTarget)
1818 btqz t1, tagTypeNumber, .slow
1819 addq tagTypeNumber, t1
1821 doubleCompare(ft0, ft1, .jumpTarget)
1828 callSlowPath(_llint_slow_path_%opcodeName%)
1834 macro equalityJumpOp(opcodeName, opcodeStruct, integerComparison)
1835 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
1838 loadConstantOrVariableInt32(size, t2, t0, .slow)
1839 loadConstantOrVariableInt32(size, t3, t1, .slow)
1840 integerComparison(t0, t1, .jumpTarget)
1847 callSlowPath(_llint_slow_path_%opcodeName%)
1853 macro compareUnsignedJumpOp(opcodeName, opcodeStruct, integerCompareMacro)
1854 llintOpWithJump(op_%opcodeName%, opcodeStruct, macro (size, get, jump, dispatch)
1857 loadConstantOrVariable(size, t2, t0)
1858 loadConstantOrVariable(size, t3, t1)
1859 integerCompareMacro(t0, t1, .jumpTarget)
1868 macro compareUnsignedOp(opcodeName, opcodeStruct, integerCompareAndSet)
1869 llintOpWithReturn(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, return)
1872 loadConstantOrVariable(size, t0, t1)
1873 loadConstantOrVariable(size, t2, t0)
1874 integerCompareAndSet(t0, t1, t0)
1881 llintOpWithJump(op_switch_imm, OpSwitchImm, macro (size, get, jump, dispatch)
1882 get(m_scrutinee, t2)
1883 getu(size, OpSwitchImm, m_tableIndex, t3)
1884 loadConstantOrVariable(size, t2, t1)
1885 loadp CodeBlock[cfr], t2
1886 loadp CodeBlock::m_rareData[t2], t2
1887 muli sizeof SimpleJumpTable, t3
1888 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1890 bqb t1, tagTypeNumber, .opSwitchImmNotInt
1891 subi SimpleJumpTable::min[t2], t1
1892 biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1893 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1894 loadis [t3, t1, 4], t1
1895 btiz t1, .opSwitchImmFallThrough
1896 dispatchIndirect(t1)
1899 btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double.
1900 .opSwitchImmFallThrough:
1901 jump(m_defaultOffset)
1904 callSlowPath(_llint_slow_path_switch_imm)
1909 llintOpWithJump(op_switch_char, OpSwitchChar, macro (size, get, jump, dispatch)
1910 get(m_scrutinee, t2)
1911 getu(size, OpSwitchChar, m_tableIndex, t3)
1912 loadConstantOrVariable(size, t2, t1)
1913 loadp CodeBlock[cfr], t2
1914 loadp CodeBlock::m_rareData[t2], t2
1915 muli sizeof SimpleJumpTable, t3
1916 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1918 btqnz t1, tagMask, .opSwitchCharFallThrough
1919 bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
1920 loadp JSString::m_fiber[t1], t0
1921 btpnz t0, isRopeInPointer, .opSwitchOnRope
1922 bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
1923 loadp StringImpl::m_data8[t0], t1
1924 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1926 jmp .opSwitchCharReady
1930 subi SimpleJumpTable::min[t2], t0
1931 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1932 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1933 loadis [t2, t0, 4], t1
1934 btiz t1, .opSwitchCharFallThrough
1935 dispatchIndirect(t1)
1937 .opSwitchCharFallThrough:
1938 jump(m_defaultOffset)
1941 bineq JSRopeString::m_compactFibers + JSRopeString::CompactFibers::m_length[t1], 1, .opSwitchCharFallThrough
1943 .opSwitchOnRopeChar:
1944 callSlowPath(_llint_slow_path_switch_char)
1949 # we assume t5 contains the metadata, and we should not scratch that
1950 macro arrayProfileForCall(opcodeStruct, getu)
1953 loadq ThisArgumentOffset[cfr, t3, 8], t0
1954 btqnz t0, tagMask, .done
1955 loadi JSCell::m_structureID[t0], t3
1956 storei t3, %opcodeStruct%::Metadata::m_callLinkInfo.m_arrayProfile.m_lastSeenStructureID[t5]
1960 macro commonCallOp(opcodeName, slowPath, opcodeStruct, prepareCall, prologue)
1961 llintOpWithMetadata(opcodeName, opcodeStruct, macro (size, get, dispatch, metadata, return)
1964 prologue(macro (fieldName, dst)
1965 getu(size, opcodeStruct, fieldName, dst)
1969 loadp %opcodeStruct%::Metadata::m_callLinkInfo.m_calleeOrLastSeenCalleeWithLinkBit[t5], t2
1970 loadConstantOrVariable(size, t0, t3)
1971 bqneq t3, t2, .opCallSlow
1972 getu(size, opcodeStruct, m_argv, t3)
1976 storeq t2, Callee[t3]
1977 getu(size, opcodeStruct, m_argc, t2)
1978 storei PC, ArgumentCount + TagOffset[cfr]
1979 storei t2, ArgumentCount + PayloadOffset[t3]
1981 prepareCall(%opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], t2, t3, t4, JSEntryPtrTag)
1982 callTargetFunction(size, opcodeStruct, dispatch, %opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], JSEntryPtrTag)
1985 slowPathForCall(size, opcodeStruct, dispatch, slowPath, prepareCall)
1989 llintOp(op_ret, OpRet, macro (size, get, dispatch)
1990 checkSwitchToJITForEpilogue()
1992 loadConstantOrVariable(size, t2, r0)
1997 llintOpWithReturn(op_to_primitive, OpToPrimitive, macro (size, get, dispatch, return)
1999 loadConstantOrVariable(size, t2, t0)
2000 btqnz t0, tagMask, .opToPrimitiveIsImm
2001 bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
2002 .opToPrimitiveIsImm:
2005 .opToPrimitiveSlowCase:
2006 callSlowPath(_slow_path_to_primitive)
2011 commonOp(llint_op_catch, macro() end, macro (size)
2012 # This is where we end up from the JIT's throw trampoline (because the
2013 # machine code return address will be set to _llint_op_catch), and from
2014 # the interpreter's throw trampoline (see _llint_throw_trampoline).
2015 # The throwing code must have known that we were throwing to the interpreter,
2016 # and have set VM::targetInterpreterPCForThrow.
2017 loadp Callee[cfr], t3
2018 andp MarkedBlockMask, t3
2019 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
2020 restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
2021 loadp VM::callFrameForCatch[t3], cfr
2022 storep 0, VM::callFrameForCatch[t3]
2023 restoreStackPointerAfterCall()
2025 loadp CodeBlock[cfr], PB
2026 loadp CodeBlock::m_metadata[PB], metadataTable
2027 loadp CodeBlock::m_instructionsRawPointer[PB], PB
2028 loadp VM::targetInterpreterPCForThrow[t3], PC
2031 callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
2032 bpeq r1, 0, .isCatchableException
2033 jmp _llint_throw_from_slow_path_trampoline
2035 .isCatchableException:
2036 loadp Callee[cfr], t3
2037 andp MarkedBlockMask, t3
2038 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
2040 loadp VM::m_exception[t3], t0
2041 storep 0, VM::m_exception[t3]
2042 get(size, OpCatch, m_exception, t2)
2043 storeq t0, [cfr, t2, 8]
2045 loadq Exception::m_value[t0], t3
2046 get(size, OpCatch, m_thrownValue, t2)
2047 storeq t3, [cfr, t2, 8]
2051 callSlowPath(_llint_slow_path_profile_catch)
2053 dispatchOp(size, op_catch)
2057 llintOp(op_end, OpEnd, macro (size, get, dispatch)
2058 checkSwitchToJITForEpilogue()
2060 assertNotConstant(size, t0)
2061 loadq [cfr, t0, 8], r0
2066 op(llint_throw_from_slow_path_trampoline, macro ()
2067 loadp Callee[cfr], t1
2068 andp MarkedBlockMask, t1
2069 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1
2070 copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2)
2072 callSlowPath(_llint_slow_path_handle_exception)
2074 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
2075 # the throw target is not necessarily interpreted code, we come to here.
2076 # This essentially emulates the JIT's throwing protocol.
2077 loadp Callee[cfr], t1
2078 andp MarkedBlockMask, t1
2079 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1
2080 jmp VM::targetMachinePCForThrow[t1], ExceptionHandlerPtrTag
2084 op(llint_throw_during_call_trampoline, macro ()
2085 preserveReturnAddressAfterCall(t2)
2086 jmp _llint_throw_from_slow_path_trampoline
2090 macro nativeCallTrampoline(executableOffsetToFunction)
2093 storep 0, CodeBlock[cfr]
2094 loadp Callee[cfr], t0
2095 andp MarkedBlockMask, t0, t1
2096 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1
2097 storep cfr, VM::topCallFrame[t1]
2098 if ARM64 or ARM64E or C_LOOP or C_LOOP_WIN
2099 storep lr, ReturnPC[cfr]
2102 loadp Callee[cfr], t1
2103 loadp JSFunction::m_executable[t1], t1
2104 checkStackPointerAlignment(t3, 0xdead0001)
2105 if C_LOOP or C_LOOP_WIN
2106 cloopCallNative executableOffsetToFunction[t1]
2110 call executableOffsetToFunction[t1], JSEntryPtrTag
2113 call executableOffsetToFunction[t1], JSEntryPtrTag
2117 loadp Callee[cfr], t3
2118 andp MarkedBlockMask, t3
2119 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
2121 btpnz VM::m_exception[t3], .handleException
2127 storep cfr, VM::topCallFrame[t3]
2128 jmp _llint_throw_from_slow_path_trampoline
2131 macro internalFunctionCallTrampoline(offsetOfFunction)
2133 storep 0, CodeBlock[cfr]
2134 loadp Callee[cfr], t0
2135 andp MarkedBlockMask, t0, t1
2136 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1
2137 storep cfr, VM::topCallFrame[t1]
2138 if ARM64 or ARM64E or C_LOOP or C_LOOP_WIN
2139 storep lr, ReturnPC[cfr]
2142 loadp Callee[cfr], t1
2143 checkStackPointerAlignment(t3, 0xdead0001)
2144 if C_LOOP or C_LOOP_WIN
2145 cloopCallNative offsetOfFunction[t1]
2149 call offsetOfFunction[t1], JSEntryPtrTag
2152 call offsetOfFunction[t1], JSEntryPtrTag
2156 loadp Callee[cfr], t3
2157 andp MarkedBlockMask, t3
2158 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3
2160 btpnz VM::m_exception[t3], .handleException
2166 storep cfr, VM::topCallFrame[t3]
2167 jmp _llint_throw_from_slow_path_trampoline
2170 macro varInjectionCheck(slowPath, scratch)
2171 loadp CodeBlock[cfr], scratch
2172 loadp CodeBlock::m_globalObject[scratch], scratch
2173 loadp JSGlobalObject::m_varInjectionWatchpoint[scratch], scratch
2174 bbeq WatchpointSet::m_state[scratch], IsInvalidated, slowPath
2177 llintOpWithMetadata(op_resolve_scope, OpResolveScope, macro (size, get, dispatch, metadata, return)
2180 macro getConstantScope(dst)
2181 loadp OpResolveScope::Metadata::m_constantScope[t5], dst
2184 macro returnConstantScope()
2185 getConstantScope(t0)
2189 macro globalLexicalBindingEpochCheck(slowPath, globalObject, scratch)
2190 loadi OpResolveScope::Metadata::m_globalLexicalBindingEpoch[t5], scratch
2191 bineq JSGlobalObject::m_globalLexicalBindingEpoch[globalObject], scratch, slowPath
2194 macro resolveScope()
2195 loadi OpResolveScope::Metadata::m_localScopeDepth[t5], t2
2197 loadq [cfr, t0, 8], t0
2198 btiz t2, .resolveScopeLoopEnd
2201 loadp JSScope::m_next[t0], t0
2203 btinz t2, .resolveScopeLoop
2205 .resolveScopeLoopEnd:
2209 loadi OpResolveScope::Metadata::m_resolveType[t5], t0
2212 bineq t0, GlobalProperty, .rGlobalVar
2213 getConstantScope(t0)
2214 globalLexicalBindingEpochCheck(.rDynamic, t0, t2)
2218 bineq t0, GlobalVar, .rGlobalLexicalVar
2219 returnConstantScope()
2222 bineq t0, GlobalLexicalVar, .rClosureVar
2223 returnConstantScope()
2226 bineq t0, ClosureVar, .rModuleVar
2230 bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
2231 returnConstantScope()
2233 .rGlobalPropertyWithVarInjectionChecks:
2234 bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
2235 varInjectionCheck(.rDynamic, t2)
2236 getConstantScope(t0)
2237 globalLexicalBindingEpochCheck(.rDynamic, t0, t2)
2240 .rGlobalVarWithVarInjectionChecks:
2241 bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
2242 varInjectionCheck(.rDynamic, t2)
2243 returnConstantScope()
2245 .rGlobalLexicalVarWithVarInjectionChecks:
2246 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
2247 varInjectionCheck(.rDynamic, t2)
2248 returnConstantScope()
2250 .rClosureVarWithVarInjectionChecks:
2251 bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
2252 varInjectionCheck(.rDynamic, t2)
2256 callSlowPath(_slow_path_resolve_scope)
2261 macro loadWithStructureCheck(opcodeStruct, get, slowPath)
2263 loadq [cfr, t0, 8], t0
2264 loadStructureWithScratch(t0, t2, t1, t3)
2265 loadp %opcodeStruct%::Metadata::m_structure[t5], t1
2266 bpneq t2, t1, slowPath
2269 llintOpWithMetadata(op_get_from_scope, OpGetFromScope, macro (size, get, dispatch, metadata, return)
2273 loadp OpGetFromScope::Metadata::m_operand[t5], t1
2274 loadPropertyAtVariableOffset(t1, t0, t2)
2275 valueProfile(OpGetFromScope, t5, t2)
2279 macro getGlobalVar(tdzCheckIfNecessary)
2280 loadp OpGetFromScope::Metadata::m_operand[t5], t0
2282 tdzCheckIfNecessary(t0)
2283 valueProfile(OpGetFromScope, t5, t0)
2287 macro getClosureVar()
2288 loadp OpGetFromScope::Metadata::m_operand[t5], t1
2289 loadq JSLexicalEnvironment_variables[t0, t1, 8], t0
2290 valueProfile(OpGetFromScope, t5, t0)
2294 loadi OpGetFromScope::Metadata::m_getPutInfo + GetPutInfo::m_operand[t5], t0
2295 andi ResolveTypeMask, t0
2298 bineq t0, GlobalProperty, .gGlobalVar
2299 loadWithStructureCheck(OpGetFromScope, get, .gDynamic) # This structure check includes lexical binding epoch check since when the epoch is changed, scope will be changed too.
2303 bineq t0, GlobalVar, .gGlobalLexicalVar
2304 getGlobalVar(macro(v) end)
2307 bineq t0, GlobalLexicalVar, .gClosureVar
2310 bqeq value, ValueEmpty, .gDynamic
2314 bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
2315 loadVariable(get, m_scope, t0)
2318 .gGlobalPropertyWithVarInjectionChecks:
2319 bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
2320 loadWithStructureCheck(OpGetFromScope, get, .gDynamic) # This structure check includes lexical binding epoch check since when the epoch is changed, scope will be changed too.
2323 .gGlobalVarWithVarInjectionChecks:
2324 bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
2325 varInjectionCheck(.gDynamic, t2)
2326 getGlobalVar(macro(v) end)
2328 .gGlobalLexicalVarWithVarInjectionChecks:
2329 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
2330 varInjectionCheck(.gDynamic, t2)
2333 bqeq value, ValueEmpty, .gDynamic
2336 .gClosureVarWithVarInjectionChecks:
2337 bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
2338 varInjectionCheck(.gDynamic, t2)
2339 loadVariable(get, m_scope, t0)
2343 callSlowPath(_llint_slow_path_get_from_scope)
2348 llintOpWithMetadata(op_put_to_scope, OpPutToScope, macro (size, get, dispatch, metadata, return)
2351 loadConstantOrVariable(size, t1, t2)
2352 loadp OpPutToScope::Metadata::m_operand[t5], t1
2353 storePropertyAtVariableOffset(t1, t0, t2)
2356 macro putGlobalVariable()
2358 loadConstantOrVariable(size, t0, t1)
2359 loadp OpPutToScope::Metadata::m_watchpointSet[t5], t2
2360 btpz t2, .noVariableWatchpointSet
2361 notifyWrite(t2, .pDynamic)
2362 .noVariableWatchpointSet:
2363 loadp OpPutToScope::Metadata::m_operand[t5], t0
2367 macro putClosureVar()
2369 loadConstantOrVariable(size, t1, t2)
2370 loadp OpPutToScope::Metadata::m_operand[t5], t1
2371 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8]
2374 macro putLocalClosureVar()
2376 loadConstantOrVariable(size, t1, t2)
2377 loadp OpPutToScope::Metadata::m_watchpointSet[t5], t3
2378 btpz t3, .noVariableWatchpointSet
2379 notifyWrite(t3, .pDynamic)
2380 .noVariableWatchpointSet:
2381 loadp OpPutToScope::Metadata::m_operand[t5], t1
2382 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8]
2385 macro checkTDZInGlobalPutToScopeIfNecessary()
2386 loadi OpPutToScope::Metadata::m_getPutInfo + GetPutInfo::m_operand[t5], t0
2387 andi InitializationModeMask, t0
2388 rshifti InitializationModeShift, t0
2389 bineq t0, NotInitialization, .noNeedForTDZCheck
2390 loadp OpPutToScope::Metadata::m_operand[t5], t0
2392 bqeq t0, ValueEmpty, .pDynamic
2397 loadi OpPutToScope::Metadata::m_getPutInfo + GetPutInfo::m_operand[t5], t0
2398 andi ResolveTypeMask, t0
2401 bineq t0, LocalClosureVar, .pGlobalProperty
2402 loadVariable(get, m_scope, t0)
2403 putLocalClosureVar()
2404 writeBarrierOnOperands(size, get, m_scope, m_value)
2408 bineq t0, GlobalProperty, .pGlobalVar
2409 loadWithStructureCheck(OpPutToScope, get, .pDynamic) # This structure check includes lexical binding epoch check since when the epoch is changed, scope will be changed too.
2411 writeBarrierOnOperands(size, get, m_scope, m_value)
2415 bineq t0, GlobalVar, .pGlobalLexicalVar
2417 writeBarrierOnGlobalObject(size, get, m_value)
2421 bineq t0, GlobalLexicalVar, .pClosureVar
2422 checkTDZInGlobalPutToScopeIfNecessary()
2424 writeBarrierOnGlobalLexicalEnvironment(size, get, m_value)
2428 bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
2429 loadVariable(get, m_scope, t0)
2431 writeBarrierOnOperands(size, get, m_scope, m_value)
2434 .pGlobalPropertyWithVarInjectionChecks:
2435 bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
2436 loadWithStructureCheck(OpPutToScope, get, .pDynamic) # This structure check includes lexical binding epoch check since when the epoch is changed, scope will be changed too.
2438 writeBarrierOnOperands(size, get, m_scope, m_value)
2441 .pGlobalVarWithVarInjectionChecks:
2442 bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
2443 varInjectionCheck(.pDynamic, t2)
2445 writeBarrierOnGlobalObject(size, get, m_value)
2448 .pGlobalLexicalVarWithVarInjectionChecks:
2449 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
2450 varInjectionCheck(.pDynamic, t2)
2451 checkTDZInGlobalPutToScopeIfNecessary()
2453 writeBarrierOnGlobalLexicalEnvironment(size, get, m_value)
2456 .pClosureVarWithVarInjectionChecks:
2457 bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
2458 varInjectionCheck(.pDynamic, t2)
2459 loadVariable(get, m_scope, t0)
2461 writeBarrierOnOperands(size, get, m_scope, m_value)
2465 bineq t0, ModuleVar, .pDynamic
2466 callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
2470 callSlowPath(_llint_slow_path_put_to_scope)
2475 llintOpWithProfile(op_get_from_arguments, OpGetFromArguments, macro (size, get, dispatch, return)
2476 loadVariable(get, m_arguments, t0)
2477 getu(size, OpGetFromArguments, m_index, t1)
2478 loadq DirectArguments_storage[t0, t1, 8], t0
2483 llintOp(op_put_to_arguments, OpPutToArguments, macro (size, get, dispatch)
2484 loadVariable(get, m_arguments, t0)
2485 getu(size, OpPutToArguments, m_index, t1)
2487 loadConstantOrVariable(size, t3, t2)
2488 storeq t2, DirectArguments_storage[t0, t1, 8]
2489 writeBarrierOnOperands(size, get, m_arguments, m_value)
2494 llintOpWithReturn(op_get_parent_scope, OpGetParentScope, macro (size, get, dispatch, return)
2495 loadVariable(get, m_scope, t0)
2496 loadp JSScope::m_next[t0], t0
2501 llintOpWithMetadata(op_profile_type, OpProfileType, macro (size, get, dispatch, metadata, return)
2502 loadp CodeBlock[cfr], t1
2503 loadp CodeBlock::m_vm[t1], t1
2504 # t1 is holding the pointer to the typeProfilerLog.
2505 loadp VM::m_typeProfilerLog[t1], t1
2506 # t2 is holding the pointer to the current log entry.
2507 loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
2509 # t0 is holding the JSValue argument.
2510 get(m_targetVirtualRegister, t3)
2511 loadConstantOrVariable(size, t3, t0)
2513 bqeq t0, ValueEmpty, .opProfileTypeDone
2514 # Store the JSValue onto the log entry.
2515 storeq t0, TypeProfilerLog::LogEntry::value[t2]
2517 # Store the TypeLocation onto the log entry.
2519 loadp OpProfileType::Metadata::m_typeLocation[t5], t3
2520 storep t3, TypeProfilerLog::LogEntry::location[t2]
2522 btqz t0, tagMask, .opProfileTypeIsCell
2523 storei 0, TypeProfilerLog::LogEntry::structureID[t2]
2524 jmp .opProfileTypeSkipIsCell
2525 .opProfileTypeIsCell:
2526 loadi JSCell::m_structureID[t0], t3
2527 storei t3, TypeProfilerLog::LogEntry::structureID[t2]
2528 .opProfileTypeSkipIsCell:
2530 # Increment the current log entry.
2531 addp sizeof TypeProfilerLog::LogEntry, t2
2532 storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
2534 loadp TypeProfilerLog::m_logEndPtr[t1], t1
2535 bpneq t2, t1, .opProfileTypeDone
2536 callSlowPath(_slow_path_profile_type_clear_log)
2543 llintOpWithMetadata(op_profile_control_flow, OpProfileControlFlow, macro (size, get, dispatch, metadata, return)
2545 loadp OpProfileControlFlow::Metadata::m_basicBlockLocation[t5], t0
2546 addq 1, BasicBlockLocation::m_executionCount[t0]
2551 llintOpWithReturn(op_get_rest_length, OpGetRestLength, macro (size, get, dispatch, return)
2552 loadi PayloadOffset + ArgumentCount[cfr], t0
2554 getu(size, OpGetRestLength, m_numParametersToSkip, t1)
2555 bilteq t0, t1, .storeZero
2561 orq tagTypeNumber, t0
2566 llintOp(op_log_shadow_chicken_prologue, OpLogShadowChickenPrologue, macro (size, get, dispatch)
2567 acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow)
2568 storep cfr, ShadowChicken::Packet::frame[t0]
2569 loadp CallerFrame[cfr], t1
2570 storep t1, ShadowChicken::Packet::callerFrame[t0]
2571 loadp Callee[cfr], t1
2572 storep t1, ShadowChicken::Packet::callee[t0]
2573 loadVariable(get, m_scope, t1)
2574 storep t1, ShadowChicken::Packet::scope[t0]
2576 .opLogShadowChickenPrologueSlow:
2577 callSlowPath(_llint_slow_path_log_shadow_chicken_prologue)
2582 llintOp(op_log_shadow_chicken_tail, OpLogShadowChickenTail, macro (size, get, dispatch)
2583 acquireShadowChickenPacket(.opLogShadowChickenTailSlow)
2584 storep cfr, ShadowChicken::Packet::frame[t0]
2585 storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0]
2586 loadVariable(get, m_thisValue, t1)
2587 storep t1, ShadowChicken::Packet::thisValue[t0]
2588 loadVariable(get, m_scope, t1)
2589 storep t1, ShadowChicken::Packet::scope[t0]
2590 loadp CodeBlock[cfr], t1
2591 storep t1, ShadowChicken::Packet::codeBlock[t0]
2592 storei PC, ShadowChicken::Packet::callSiteIndex[t0]
2594 .opLogShadowChickenTailSlow:
2595 callSlowPath(_llint_slow_path_log_shadow_chicken_tail)