ee98f42f4c9462b1da8bf802a0bafea0af2a6884
[WebKit-https.git] / Source / JavaScriptCore / llint / LowLevelInterpreter64.asm
1 # Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 #    notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 #    notice, this list of conditions and the following disclaimer in the
10 #    documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24
25 # Utilities.
26 macro jumpToInstruction()
27     jmp [PB, PC, 8]
28 end
29
30 macro dispatch(advance)
31     addp advance, PC
32     jumpToInstruction()
33 end
34
35 macro dispatchInt(advance)
36     addi advance, PC
37     jumpToInstruction()
38 end
39
40 macro dispatchIntIndirect(offset)
41     dispatchInt(offset * 8[PB, PC, 8])
42 end
43
44 macro dispatchAfterCall()
45     loadi ArgumentCount + TagOffset[cfr], PC
46     loadp CodeBlock[cfr], PB
47     loadp CodeBlock::m_instructions[PB], PB
48     loadisFromInstruction(1, t1)
49     storeq t0, [cfr, t1, 8]
50     valueProfile(t0, (CallOpCodeSize - 1), t2)
51     dispatch(CallOpCodeSize)
52 end
53
54 macro cCall2(function, arg1, arg2)
55     checkStackPointerAlignment(t4, 0xbad0c002)
56     if X86_64
57         move arg1, t4
58         move arg2, t5
59         call function
60     elsif X86_64_WIN
61         # Note: this implementation is only correct if the return type size is > 8 bytes.
62         # See macro cCall2Void for an implementation when the return type <= 8 bytes.
63         # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
64         # On entry rcx (t2), should contain a pointer to this stack space. The other parameters are shifted to the right,
65         # rdx (t1) should contain the first argument, and r8 (t6) should contain the second argument.
66         # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (t0) and rdx (t1)
67         # since the return value is expected to be split between the two.
68         # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
69         move arg1, t1
70         move arg2, t6
71         subp 48, sp
72         move sp, t2
73         addp 32, t2
74         call function
75         addp 48, sp
76         move 8[t0], t1
77         move [t0], t0
78     elsif ARM64
79         move arg1, t0
80         move arg2, t1
81         call function
82     elsif C_LOOP
83         cloopCallSlowPath function, arg1, arg2
84     else
85         error
86     end
87 end
88
89 macro cCall2Void(function, arg1, arg2)
90     if C_LOOP
91         cloopCallSlowPathVoid function, arg1, arg2
92     elsif X86_64_WIN
93         # Note: we cannot use the cCall2 macro for Win64 in this case,
94         # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
95         # On Win64, rcx and rdx are used for passing the first two parameters.
96         # We also need to make room on the stack for all four parameter registers.
97         # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
98         move arg2, t1
99         move arg1, t2
100         subp 32, sp 
101         call function
102         addp 32, sp 
103     else
104         cCall2(function, arg1, arg2)
105     end
106 end
107
108 # This barely works. arg3 and arg4 should probably be immediates.
109 macro cCall4(function, arg1, arg2, arg3, arg4)
110     checkStackPointerAlignment(t4, 0xbad0c004)
111     if X86_64
112         move arg1, t4
113         move arg2, t5
114         move arg3, t1
115         move arg4, t2
116         call function
117     elsif X86_64_WIN
118         # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
119         # We also need to make room on the stack for all four parameter registers.
120         # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
121         move arg1, t2
122         move arg2, t1
123         move arg3, t6
124         move arg4, t7
125         subp 32, sp 
126         call function
127         addp 32, sp 
128     elsif ARM64
129         move arg1, t0
130         move arg2, t1
131         move arg3, t2
132         move arg4, t3
133         call function
134     elsif C_LOOP
135         error
136     else
137         error
138     end
139 end
140
141 macro doVMEntry(makeCall)
142     if X86_64
143         const entry = t4
144         const vm = t5
145         const protoCallFrame = t1
146
147         const previousCFR = t0
148         const previousPC = t6
149         const temp1 = t0
150         const temp2 = t3
151         const temp3 = t6
152     elsif X86_64_WIN
153         const entry = t2
154         const vm = t1
155         const protoCallFrame = t6
156
157         const previousCFR = t0
158         const previousPC = t4
159         const temp1 = t0
160         const temp2 = t3
161         const temp3 = t7
162     elsif ARM64 or C_LOOP
163         const entry = a0
164         const vm = a1
165         const protoCallFrame = a2
166
167         const previousCFR = t5
168         const previousPC = lr
169         const temp1 = t3
170         const temp2 = t4
171         const temp3 = t6
172     end
173
174     functionPrologue()
175     pushCalleeSaves()
176
177     vmEntryRecord(cfr, sp)
178
179     checkStackPointerAlignment(temp2, 0xbad0dc01)
180
181     storep vm, VMEntryRecord::m_vm[sp]
182     loadp VM::topCallFrame[vm], temp2
183     storep temp2, VMEntryRecord::m_prevTopCallFrame[sp]
184     loadp VM::topVMEntryFrame[vm], temp2
185     storep temp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]
186
187     loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
188     addp CallFrameHeaderSlots, temp2, temp2
189     lshiftp 3, temp2
190     subp sp, temp2, temp1
191
192     # Ensure that we have enough additional stack capacity for the incoming args,
193     # and the frame for the JS code we're executing. We need to do this check
194     # before we start copying the args from the protoCallFrame below.
195     bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
196
197     if C_LOOP
198         move entry, temp2
199         move vm, temp3
200         cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
201         bpeq t0, 0, .stackCheckFailed
202         move temp2, entry
203         move temp3, vm
204         jmp .stackHeightOK
205
206 .stackCheckFailed:
207         move temp2, entry
208         move temp3, vm
209     end
210
211     cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
212
213     vmEntryRecord(cfr, temp2)
214
215     loadp VMEntryRecord::m_vm[temp2], vm
216     loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
217     storep temp3, VM::topCallFrame[vm]
218     loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
219     storep temp3, VM::topVMEntryFrame[vm]
220
221     subp cfr, CalleeRegisterSaveSize, sp
222
223     popCalleeSaves()
224     functionEpilogue()
225     ret
226
227 .stackHeightOK:
228     move temp1, sp
229     move 4, temp1
230
231 .copyHeaderLoop:
232     subi 1, temp1
233     loadq [protoCallFrame, temp1, 8], temp3
234     storeq temp3, CodeBlock[sp, temp1, 8]
235     btinz temp1, .copyHeaderLoop
236
237     loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
238     subi 1, temp2
239     loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
240     subi 1, temp3
241
242     bieq temp2, temp3, .copyArgs
243     move ValueUndefined, temp1
244 .fillExtraArgsLoop:
245     subi 1, temp3
246     storeq temp1, ThisArgumentOffset + 8[sp, temp3, 8]
247     bineq temp2, temp3, .fillExtraArgsLoop
248
249 .copyArgs:
250     loadp ProtoCallFrame::args[protoCallFrame], temp1
251
252 .copyArgsLoop:
253     btiz temp2, .copyArgsDone
254     subi 1, temp2
255     loadq [temp1, temp2, 8], temp3
256     storeq temp3, ThisArgumentOffset + 8[sp, temp2, 8]
257     jmp .copyArgsLoop
258
259 .copyArgsDone:
260     if ARM64
261         move sp, temp2
262         storep temp2, VM::topCallFrame[vm]
263     else
264         storep sp, VM::topCallFrame[vm]
265     end
266     storep cfr, VM::topVMEntryFrame[vm]
267
268     move 0xffff000000000000, csr1
269     addp 2, csr1, csr2
270
271     checkStackPointerAlignment(temp3, 0xbad0dc02)
272
273     makeCall(entry, temp1)
274
275     checkStackPointerAlignment(temp3, 0xbad0dc03)
276
277     vmEntryRecord(cfr, temp2)
278
279     loadp VMEntryRecord::m_vm[temp2], vm
280     loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
281     storep temp3, VM::topCallFrame[vm]
282     loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
283     storep temp3, VM::topVMEntryFrame[vm]
284
285     subp cfr, CalleeRegisterSaveSize, sp
286
287     popCalleeSaves()
288     functionEpilogue()
289
290     ret
291 end
292
293
294 macro makeJavaScriptCall(entry, temp)
295     addp 16, sp
296     if C_LOOP
297         cloopCallJSFunction entry
298     else
299         call entry
300     end
301     subp 16, sp
302 end
303
304
305 macro makeHostFunctionCall(entry, temp)
306     move entry, temp
307     storep cfr, [sp]
308     if X86_64
309         move sp, t4
310     elsif X86_64_WIN
311         move sp, t2
312     elsif ARM64 or C_LOOP
313         move sp, a0
314     end
315     if C_LOOP
316         storep lr, 8[sp]
317         cloopCallNative temp
318     elsif X86_64_WIN
319         # We need to allocate 32 bytes on the stack for the shadow space.
320         subp 32, sp
321         call temp
322         addp 32, sp
323     else
324         call temp
325     end
326 end
327
328
329 _handleUncaughtException:
330     loadp Callee[cfr], t3
331     andp MarkedBlockMask, t3
332     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
333     loadp VM::callFrameForThrow[t3], cfr
334
335     loadp CallerFrame[cfr], cfr
336     vmEntryRecord(cfr, t2)
337
338     loadp VMEntryRecord::m_vm[t2], t3
339     loadp VMEntryRecord::m_prevTopCallFrame[t2], t5
340     storep t5, VM::topCallFrame[t3]
341     loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], t5
342     storep t5, VM::topVMEntryFrame[t3]
343
344     subp cfr, CalleeRegisterSaveSize, sp
345
346     popCalleeSaves()
347     functionEpilogue()
348     ret
349
350
351 macro prepareStateForCCall()
352     leap [PB, PC, 8], PC
353     move PB, t3
354 end
355
356 macro restoreStateAfterCCall()
357     move t0, PC
358     move t3, PB
359     subp PB, PC
360     rshiftp 3, PC
361 end
362
363 macro callSlowPath(slowPath)
364     prepareStateForCCall()
365     cCall2(slowPath, cfr, PC)
366     restoreStateAfterCCall()
367 end
368
369 macro traceOperand(fromWhere, operand)
370     prepareStateForCCall()
371     cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
372     restoreStateAfterCCall()
373 end
374
375 macro traceValue(fromWhere, operand)
376     prepareStateForCCall()
377     cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
378     restoreStateAfterCCall()
379 end
380
381 # Call a slow path for call call opcodes.
382 macro callCallSlowPath(slowPath, action)
383     storei PC, ArgumentCount + TagOffset[cfr]
384     prepareStateForCCall()
385     cCall2(slowPath, cfr, PC)
386     action(t0)
387 end
388
389 macro callWatchdogTimerHandler(throwHandler)
390     storei PC, ArgumentCount + TagOffset[cfr]
391     prepareStateForCCall()
392     cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
393     btpnz t0, throwHandler
394     move t3, PB
395     loadi ArgumentCount + TagOffset[cfr], PC
396 end
397
398 macro checkSwitchToJITForLoop()
399     checkSwitchToJIT(
400         1,
401         macro()
402             storei PC, ArgumentCount + TagOffset[cfr]
403             prepareStateForCCall()
404             cCall2(_llint_loop_osr, cfr, PC)
405             btpz t0, .recover
406             move t1, sp
407             jmp t0
408         .recover:
409             move t3, PB
410             loadi ArgumentCount + TagOffset[cfr], PC
411         end)
412 end
413
414 macro loadVariable(operand, value)
415     loadisFromInstruction(operand, value)
416     loadq [cfr, value, 8], value
417 end
418
419 # Index and value must be different registers. Index may be clobbered.
420 macro loadConstantOrVariable(index, value)
421     bpgteq index, FirstConstantRegisterIndex, .constant
422     loadq [cfr, index, 8], value
423     jmp .done
424 .constant:
425     loadp CodeBlock[cfr], value
426     loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
427     subp FirstConstantRegisterIndex, index
428     loadq [value, index, 8], value
429 .done:
430 end
431
432 macro loadConstantOrVariableInt32(index, value, slow)
433     loadConstantOrVariable(index, value)
434     bqb value, tagTypeNumber, slow
435 end
436
437 macro loadConstantOrVariableCell(index, value, slow)
438     loadConstantOrVariable(index, value)
439     btqnz value, tagMask, slow
440 end
441
442 macro writeBarrierOnOperand(cellOperand)
443     if GGC
444         loadisFromInstruction(cellOperand, t1)
445         loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
446         skipIfIsRememberedOrInEden(t2, t1, t3, 
447             macro(gcData)
448                 btbnz gcData, .writeBarrierDone
449                 push PB, PC
450                 cCall2Void(_llint_write_barrier_slow, cfr, t2)
451                 pop PC, PB
452             end
453         )
454     .writeBarrierDone:
455     end
456 end
457
458 macro writeBarrierOnOperands(cellOperand, valueOperand)
459     if GGC
460         loadisFromInstruction(valueOperand, t1)
461         loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
462         btpz t0, .writeBarrierDone
463     
464         writeBarrierOnOperand(cellOperand)
465     .writeBarrierDone:
466     end
467 end
468
469 macro writeBarrierOnGlobalObject(valueOperand)
470     if GGC
471         loadisFromInstruction(valueOperand, t1)
472         loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
473         btpz t0, .writeBarrierDone
474     
475         loadp CodeBlock[cfr], t3
476         loadp CodeBlock::m_globalObject[t3], t3
477         skipIfIsRememberedOrInEden(t3, t1, t2,
478             macro(gcData)
479                 btbnz gcData, .writeBarrierDone
480                 push PB, PC
481                 cCall2Void(_llint_write_barrier_slow, cfr, t3)
482                 pop PC, PB
483             end
484         )
485     .writeBarrierDone:
486     end
487 end
488
489 macro valueProfile(value, operand, scratch)
490     loadpFromInstruction(operand, scratch)
491     storeq value, ValueProfile::m_buckets[scratch]
492 end
493
494 macro loadStructure(cell, structure)
495 end
496
497 macro loadStructureWithScratch(cell, structure, scratch)
498     loadp CodeBlock[cfr], scratch
499     loadp CodeBlock::m_vm[scratch], scratch
500     loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
501     loadi JSCell::m_structureID[cell], structure
502     loadp [scratch, structure, 8], structure
503 end
504
505 macro loadStructureAndClobberFirstArg(cell, structure)
506     loadi JSCell::m_structureID[cell], structure
507     loadp CodeBlock[cfr], cell
508     loadp CodeBlock::m_vm[cell], cell
509     loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
510     loadp [cell, structure, 8], structure
511 end
512
513 macro storeStructureWithTypeInfo(cell, structure, scratch)
514     loadq Structure::m_blob + StructureIDBlob::u.doubleWord[structure], scratch
515     storeq scratch, JSCell::m_structureID[cell]
516 end
517
518 # Entrypoints into the interpreter.
519
520 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
521 macro functionArityCheck(doneLabel, slowPath)
522     loadi PayloadOffset + ArgumentCount[cfr], t0
523     biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
524     prepareStateForCCall()
525     cCall2(slowPath, cfr, PC)   # This slowPath has the protocol: t0 = 0 => no error, t0 != 0 => error
526     btiz t0, .noError
527     move t1, cfr   # t1 contains caller frame
528     jmp _llint_throw_from_slow_path_trampoline
529
530 .noError:
531     # t1 points to ArityCheckData.
532     loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
533     btpz t2, .proceedInline
534     
535     loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t7
536     loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
537     call t2
538     if ASSERT_ENABLED
539         loadp ReturnPC[cfr], t0
540         loadp [t0], t0
541     end
542     jmp .continue
543
544 .proceedInline:
545     loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
546     btiz t1, .continue
547
548     // Move frame up "t1 * 2" slots
549     lshiftp 1, t1
550     negq t1
551     move cfr, t3
552     loadi PayloadOffset + ArgumentCount[cfr], t2
553     addi CallFrameHeaderSlots, t2
554 .copyLoop:
555     loadq [t3], t0
556     storeq t0, [t3, t1, 8]
557     addp 8, t3
558     bsubinz 1, t2, .copyLoop
559
560     // Fill new slots with JSUndefined
561     move t1, t2
562     move ValueUndefined, t0
563 .fillLoop:
564     storeq t0, [t3, t1, 8]
565     addp 8, t3
566     baddinz 1, t2, .fillLoop
567
568     lshiftp 3, t1
569     addp t1, cfr
570     addp t1, sp
571
572 .continue:
573     # Reload CodeBlock and reset PC, since the slow_path clobbered them.
574     loadp CodeBlock[cfr], t1
575     loadp CodeBlock::m_instructions[t1], PB
576     move 0, PC
577     jmp doneLabel
578 end
579
580 macro branchIfException(label)
581     loadp Callee[cfr], t3
582     andp MarkedBlockMask, t3
583     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
584     btqz VM::m_exception[t3], .noException
585     jmp label
586 .noException:
587 end
588
589
590 # Instruction implementations
591
592 _llint_op_enter:
593     traceExecution()
594     checkStackPointerAlignment(t2, 0xdead00e1)
595     loadp CodeBlock[cfr], t2                // t2<CodeBlock> = cfr.CodeBlock
596     loadi CodeBlock::m_numVars[t2], t2      // t2<size_t> = t2<CodeBlock>.m_numVars
597     btiz t2, .opEnterDone
598     move ValueUndefined, t0
599     negi t2
600     sxi2q t2, t2
601 .opEnterLoop:
602     storeq t0, [cfr, t2, 8]
603     addq 1, t2
604     btqnz t2, .opEnterLoop
605 .opEnterDone:
606     callSlowPath(_slow_path_enter)
607     dispatch(1)
608
609
610 _llint_op_create_lexical_environment:
611     traceExecution()
612     loadisFromInstruction(1, t0)
613     callSlowPath(_llint_slow_path_create_lexical_environment)
614     dispatch(3)
615
616
617 _llint_op_get_scope:
618     traceExecution()
619     loadp Callee[cfr], t0
620     loadp JSCallee::m_scope[t0], t0
621     loadisFromInstruction(1, t1)
622     storeq t0, [cfr, t1, 8]
623     dispatch(2)
624
625
626 _llint_op_init_lazy_reg:
627     traceExecution()
628     loadisFromInstruction(1, t0)
629     storeq ValueEmpty, [cfr, t0, 8]
630     dispatch(2)
631
632
633 _llint_op_create_arguments:
634     traceExecution()
635     loadisFromInstruction(1, t0)
636     bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
637     callSlowPath(_slow_path_create_arguments)
638 .opCreateArgumentsDone:
639     dispatch(3)
640
641
642 _llint_op_create_this:
643     traceExecution()
644     loadisFromInstruction(2, t0)
645     loadp [cfr, t0, 8], t0
646     loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
647     loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
648     btpz t1, .opCreateThisSlow
649     allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
650     loadisFromInstruction(1, t1)
651     storeq t0, [cfr, t1, 8]
652     dispatch(4)
653
654 .opCreateThisSlow:
655     callSlowPath(_slow_path_create_this)
656     dispatch(4)
657
658
659 _llint_op_get_callee:
660     traceExecution()
661     loadisFromInstruction(1, t0)
662     loadp Callee[cfr], t1
663     loadpFromInstruction(2, t2)
664     bpneq t1, t2, .opGetCalleeSlow
665     storep t1, [cfr, t0, 8]
666     dispatch(3)
667
668 .opGetCalleeSlow:
669     callSlowPath(_slow_path_get_callee)
670     dispatch(3)
671
672 _llint_op_to_this:
673     traceExecution()
674     loadisFromInstruction(1, t0)
675     loadq [cfr, t0, 8], t0
676     btqnz t0, tagMask, .opToThisSlow
677     bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
678     loadStructureWithScratch(t0, t1, t2)
679     loadpFromInstruction(2, t2)
680     bpneq t1, t2, .opToThisSlow
681     dispatch(4)
682
683 .opToThisSlow:
684     callSlowPath(_slow_path_to_this)
685     dispatch(4)
686
687
688 _llint_op_new_object:
689     traceExecution()
690     loadpFromInstruction(3, t0)
691     loadp ObjectAllocationProfile::m_allocator[t0], t1
692     loadp ObjectAllocationProfile::m_structure[t0], t2
693     allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
694     loadisFromInstruction(1, t1)
695     storeq t0, [cfr, t1, 8]
696     dispatch(4)
697
698 .opNewObjectSlow:
699     callSlowPath(_llint_slow_path_new_object)
700     dispatch(4)
701
702
703 _llint_op_mov:
704     traceExecution()
705     loadisFromInstruction(2, t1)
706     loadisFromInstruction(1, t0)
707     loadConstantOrVariable(t1, t2)
708     storeq t2, [cfr, t0, 8]
709     dispatch(3)
710
711
712 macro notifyWrite(set, value, scratch, slow)
713     loadb VariableWatchpointSet::m_state[set], scratch
714     bieq scratch, IsInvalidated, .done
715     bqneq value, VariableWatchpointSet::m_inferredValue[set], slow
716 .done:
717 end
718
719 _llint_op_not:
720     traceExecution()
721     loadisFromInstruction(2, t0)
722     loadisFromInstruction(1, t1)
723     loadConstantOrVariable(t0, t2)
724     xorq ValueFalse, t2
725     btqnz t2, ~1, .opNotSlow
726     xorq ValueTrue, t2
727     storeq t2, [cfr, t1, 8]
728     dispatch(3)
729
730 .opNotSlow:
731     callSlowPath(_slow_path_not)
732     dispatch(3)
733
734
735 macro equalityComparison(integerComparison, slowPath)
736     traceExecution()
737     loadisFromInstruction(3, t0)
738     loadisFromInstruction(2, t2)
739     loadisFromInstruction(1, t3)
740     loadConstantOrVariableInt32(t0, t1, .slow)
741     loadConstantOrVariableInt32(t2, t0, .slow)
742     integerComparison(t0, t1, t0)
743     orq ValueFalse, t0
744     storeq t0, [cfr, t3, 8]
745     dispatch(4)
746
747 .slow:
748     callSlowPath(slowPath)
749     dispatch(4)
750 end
751
752 _llint_op_eq:
753     equalityComparison(
754         macro (left, right, result) cieq left, right, result end,
755         _slow_path_eq)
756
757
758 _llint_op_neq:
759     equalityComparison(
760         macro (left, right, result) cineq left, right, result end,
761         _slow_path_neq)
762
763
764 macro equalNullComparison()
765     loadisFromInstruction(2, t0)
766     loadq [cfr, t0, 8], t0
767     btqnz t0, tagMask, .immediate
768     btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
769     move 0, t0
770     jmp .done
771 .masqueradesAsUndefined:
772     loadStructureWithScratch(t0, t2, t1)
773     loadp CodeBlock[cfr], t0
774     loadp CodeBlock::m_globalObject[t0], t0
775     cpeq Structure::m_globalObject[t2], t0, t0
776     jmp .done
777 .immediate:
778     andq ~TagBitUndefined, t0
779     cqeq t0, ValueNull, t0
780 .done:
781 end
782
783 _llint_op_eq_null:
784     traceExecution()
785     equalNullComparison()
786     loadisFromInstruction(1, t1)
787     orq ValueFalse, t0
788     storeq t0, [cfr, t1, 8]
789     dispatch(3)
790
791
792 _llint_op_neq_null:
793     traceExecution()
794     equalNullComparison()
795     loadisFromInstruction(1, t1)
796     xorq ValueTrue, t0
797     storeq t0, [cfr, t1, 8]
798     dispatch(3)
799
800
801 macro strictEq(equalityOperation, slowPath)
802     traceExecution()
803     loadisFromInstruction(3, t0)
804     loadisFromInstruction(2, t2)
805     loadConstantOrVariable(t0, t1)
806     loadConstantOrVariable(t2, t0)
807     move t0, t2
808     orq t1, t2
809     btqz t2, tagMask, .slow
810     bqaeq t0, tagTypeNumber, .leftOK
811     btqnz t0, tagTypeNumber, .slow
812 .leftOK:
813     bqaeq t1, tagTypeNumber, .rightOK
814     btqnz t1, tagTypeNumber, .slow
815 .rightOK:
816     equalityOperation(t0, t1, t0)
817     loadisFromInstruction(1, t1)
818     orq ValueFalse, t0
819     storeq t0, [cfr, t1, 8]
820     dispatch(4)
821
822 .slow:
823     callSlowPath(slowPath)
824     dispatch(4)
825 end
826
827 _llint_op_stricteq:
828     strictEq(
829         macro (left, right, result) cqeq left, right, result end,
830         _slow_path_stricteq)
831
832
833 _llint_op_nstricteq:
834     strictEq(
835         macro (left, right, result) cqneq left, right, result end,
836         _slow_path_nstricteq)
837
838
839 macro preOp(arithmeticOperation, slowPath)
840     traceExecution()
841     loadisFromInstruction(1, t0)
842     loadq [cfr, t0, 8], t1
843     bqb t1, tagTypeNumber, .slow
844     arithmeticOperation(t1, .slow)
845     orq tagTypeNumber, t1
846     storeq t1, [cfr, t0, 8]
847     dispatch(2)
848
849 .slow:
850     callSlowPath(slowPath)
851     dispatch(2)
852 end
853
854 _llint_op_inc:
855     preOp(
856         macro (value, slow) baddio 1, value, slow end,
857         _slow_path_inc)
858
859
860 _llint_op_dec:
861     preOp(
862         macro (value, slow) bsubio 1, value, slow end,
863         _slow_path_dec)
864
865
866 _llint_op_to_number:
867     traceExecution()
868     loadisFromInstruction(2, t0)
869     loadisFromInstruction(1, t1)
870     loadConstantOrVariable(t0, t2)
871     bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
872     btqz t2, tagTypeNumber, .opToNumberSlow
873 .opToNumberIsImmediate:
874     storeq t2, [cfr, t1, 8]
875     dispatch(3)
876
877 .opToNumberSlow:
878     callSlowPath(_slow_path_to_number)
879     dispatch(3)
880
881
882 _llint_op_negate:
883     traceExecution()
884     loadisFromInstruction(2, t0)
885     loadisFromInstruction(1, t1)
886     loadConstantOrVariable(t0, t2)
887     bqb t2, tagTypeNumber, .opNegateNotInt
888     btiz t2, 0x7fffffff, .opNegateSlow
889     negi t2
890     orq tagTypeNumber, t2
891     storeq t2, [cfr, t1, 8]
892     dispatch(3)
893 .opNegateNotInt:
894     btqz t2, tagTypeNumber, .opNegateSlow
895     xorq 0x8000000000000000, t2
896     storeq t2, [cfr, t1, 8]
897     dispatch(3)
898
899 .opNegateSlow:
900     callSlowPath(_slow_path_negate)
901     dispatch(3)
902
903
904 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
905     loadisFromInstruction(3, t0)
906     loadisFromInstruction(2, t2)
907     loadConstantOrVariable(t0, t1)
908     loadConstantOrVariable(t2, t0)
909     bqb t0, tagTypeNumber, .op1NotInt
910     bqb t1, tagTypeNumber, .op2NotInt
911     loadisFromInstruction(1, t2)
912     integerOperationAndStore(t1, t0, .slow, t2)
913     dispatch(5)
914
915 .op1NotInt:
916     # First operand is definitely not an int, the second operand could be anything.
917     btqz t0, tagTypeNumber, .slow
918     bqaeq t1, tagTypeNumber, .op1NotIntOp2Int
919     btqz t1, tagTypeNumber, .slow
920     addq tagTypeNumber, t1
921     fq2d t1, ft1
922     jmp .op1NotIntReady
923 .op1NotIntOp2Int:
924     ci2d t1, ft1
925 .op1NotIntReady:
926     loadisFromInstruction(1, t2)
927     addq tagTypeNumber, t0
928     fq2d t0, ft0
929     doubleOperation(ft1, ft0)
930     fd2q ft0, t0
931     subq tagTypeNumber, t0
932     storeq t0, [cfr, t2, 8]
933     dispatch(5)
934
935 .op2NotInt:
936     # First operand is definitely an int, the second is definitely not.
937     loadisFromInstruction(1, t2)
938     btqz t1, tagTypeNumber, .slow
939     ci2d t0, ft0
940     addq tagTypeNumber, t1
941     fq2d t1, ft1
942     doubleOperation(ft1, ft0)
943     fd2q ft0, t0
944     subq tagTypeNumber, t0
945     storeq t0, [cfr, t2, 8]
946     dispatch(5)
947
948 .slow:
949     callSlowPath(slowPath)
950     dispatch(5)
951 end
952
953 macro binaryOp(integerOperation, doubleOperation, slowPath)
954     binaryOpCustomStore(
955         macro (left, right, slow, index)
956             integerOperation(left, right, slow)
957             orq tagTypeNumber, right
958             storeq right, [cfr, index, 8]
959         end,
960         doubleOperation, slowPath)
961 end
962
963 _llint_op_add:
964     traceExecution()
965     binaryOp(
966         macro (left, right, slow) baddio left, right, slow end,
967         macro (left, right) addd left, right end,
968         _slow_path_add)
969
970
971 _llint_op_mul:
972     traceExecution()
973     binaryOpCustomStore(
974         macro (left, right, slow, index)
975             # Assume t3 is scratchable.
976             move right, t3
977             bmulio left, t3, slow
978             btinz t3, .done
979             bilt left, 0, slow
980             bilt right, 0, slow
981         .done:
982             orq tagTypeNumber, t3
983             storeq t3, [cfr, index, 8]
984         end,
985         macro (left, right) muld left, right end,
986         _slow_path_mul)
987
988
989 _llint_op_sub:
990     traceExecution()
991     binaryOp(
992         macro (left, right, slow) bsubio left, right, slow end,
993         macro (left, right) subd left, right end,
994         _slow_path_sub)
995
996
997 _llint_op_div:
998     traceExecution()
999     if X86_64 or X86_64_WIN
1000         binaryOpCustomStore(
1001             macro (left, right, slow, index)
1002                 # Assume t3 is scratchable.
1003                 btiz left, slow
1004                 bineq left, -1, .notNeg2TwoThe31DivByNeg1
1005                 bieq right, -2147483648, .slow
1006             .notNeg2TwoThe31DivByNeg1:
1007                 btinz right, .intOK
1008                 bilt left, 0, slow
1009             .intOK:
1010                 move left, t3
1011                 move right, t0
1012                 cdqi
1013                 idivi t3
1014                 btinz t1, slow
1015                 orq tagTypeNumber, t0
1016                 storeq t0, [cfr, index, 8]
1017             end,
1018             macro (left, right) divd left, right end,
1019             _slow_path_div)
1020     else
1021         callSlowPath(_slow_path_div)
1022         dispatch(5)
1023     end
1024
1025
1026 macro bitOp(operation, slowPath, advance)
1027     loadisFromInstruction(3, t0)
1028     loadisFromInstruction(2, t2)
1029     loadisFromInstruction(1, t3)
1030     loadConstantOrVariable(t0, t1)
1031     loadConstantOrVariable(t2, t0)
1032     bqb t0, tagTypeNumber, .slow
1033     bqb t1, tagTypeNumber, .slow
1034     operation(t1, t0)
1035     orq tagTypeNumber, t0
1036     storeq t0, [cfr, t3, 8]
1037     dispatch(advance)
1038
1039 .slow:
1040     callSlowPath(slowPath)
1041     dispatch(advance)
1042 end
1043
1044 _llint_op_lshift:
1045     traceExecution()
1046     bitOp(
1047         macro (left, right) lshifti left, right end,
1048         _slow_path_lshift,
1049         4)
1050
1051
1052 _llint_op_rshift:
1053     traceExecution()
1054     bitOp(
1055         macro (left, right) rshifti left, right end,
1056         _slow_path_rshift,
1057         4)
1058
1059
1060 _llint_op_urshift:
1061     traceExecution()
1062     bitOp(
1063         macro (left, right) urshifti left, right end,
1064         _slow_path_urshift,
1065         4)
1066
1067
1068 _llint_op_unsigned:
1069     traceExecution()
1070     loadisFromInstruction(1, t0)
1071     loadisFromInstruction(2, t1)
1072     loadConstantOrVariable(t1, t2)
1073     bilt t2, 0, .opUnsignedSlow
1074     storeq t2, [cfr, t0, 8]
1075     dispatch(3)
1076 .opUnsignedSlow:
1077     callSlowPath(_slow_path_unsigned)
1078     dispatch(3)
1079
1080
1081 _llint_op_bitand:
1082     traceExecution()
1083     bitOp(
1084         macro (left, right) andi left, right end,
1085         _slow_path_bitand,
1086         5)
1087
1088
1089 _llint_op_bitxor:
1090     traceExecution()
1091     bitOp(
1092         macro (left, right) xori left, right end,
1093         _slow_path_bitxor,
1094         5)
1095
1096
1097 _llint_op_bitor:
1098     traceExecution()
1099     bitOp(
1100         macro (left, right) ori left, right end,
1101         _slow_path_bitor,
1102         5)
1103
1104
1105 _llint_op_check_has_instance:
1106     traceExecution()
1107     loadisFromInstruction(3, t1)
1108     loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
1109     btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
1110     dispatch(5)
1111
1112 .opCheckHasInstanceSlow:
1113     callSlowPath(_llint_slow_path_check_has_instance)
1114     dispatch(0)
1115
1116
1117 _llint_op_instanceof:
1118     traceExecution()
1119     # Actually do the work.
1120     loadisFromInstruction(3, t0)
1121     loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
1122     bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
1123     loadisFromInstruction(2, t0)
1124     loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
1125     
1126     # Register state: t1 = prototype, t2 = value
1127     move 1, t0
1128 .opInstanceofLoop:
1129     loadStructureAndClobberFirstArg(t2, t3)
1130     loadq Structure::m_prototype[t3], t2
1131     bqeq t2, t1, .opInstanceofDone
1132     btqz t2, tagMask, .opInstanceofLoop
1133
1134     move 0, t0
1135 .opInstanceofDone:
1136     orq ValueFalse, t0
1137     loadisFromInstruction(1, t3)
1138     storeq t0, [cfr, t3, 8]
1139     dispatch(4)
1140
1141 .opInstanceofSlow:
1142     callSlowPath(_llint_slow_path_instanceof)
1143     dispatch(4)
1144
1145
1146 _llint_op_is_undefined:
1147     traceExecution()
1148     loadisFromInstruction(2, t1)
1149     loadisFromInstruction(1, t2)
1150     loadConstantOrVariable(t1, t0)
1151     btqz t0, tagMask, .opIsUndefinedCell
1152     cqeq t0, ValueUndefined, t3
1153     orq ValueFalse, t3
1154     storeq t3, [cfr, t2, 8]
1155     dispatch(3)
1156 .opIsUndefinedCell:
1157     btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
1158     move ValueFalse, t1
1159     storeq t1, [cfr, t2, 8]
1160     dispatch(3)
1161 .masqueradesAsUndefined:
1162     loadStructureWithScratch(t0, t3, t1)
1163     loadp CodeBlock[cfr], t1
1164     loadp CodeBlock::m_globalObject[t1], t1
1165     cpeq Structure::m_globalObject[t3], t1, t0
1166     orq ValueFalse, t0
1167     storeq t0, [cfr, t2, 8]
1168     dispatch(3)
1169
1170
1171 _llint_op_is_boolean:
1172     traceExecution()
1173     loadisFromInstruction(2, t1)
1174     loadisFromInstruction(1, t2)
1175     loadConstantOrVariable(t1, t0)
1176     xorq ValueFalse, t0
1177     tqz t0, ~1, t0
1178     orq ValueFalse, t0
1179     storeq t0, [cfr, t2, 8]
1180     dispatch(3)
1181
1182
1183 _llint_op_is_number:
1184     traceExecution()
1185     loadisFromInstruction(2, t1)
1186     loadisFromInstruction(1, t2)
1187     loadConstantOrVariable(t1, t0)
1188     tqnz t0, tagTypeNumber, t1
1189     orq ValueFalse, t1
1190     storeq t1, [cfr, t2, 8]
1191     dispatch(3)
1192
1193
1194 _llint_op_is_string:
1195     traceExecution()
1196     loadisFromInstruction(2, t1)
1197     loadisFromInstruction(1, t2)
1198     loadConstantOrVariable(t1, t0)
1199     btqnz t0, tagMask, .opIsStringNotCell
1200     cbeq JSCell::m_type[t0], StringType, t1
1201     orq ValueFalse, t1
1202     storeq t1, [cfr, t2, 8]
1203     dispatch(3)
1204 .opIsStringNotCell:
1205     storeq ValueFalse, [cfr, t2, 8]
1206     dispatch(3)
1207
1208
1209 macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1210     bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1211     loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1212     negi propertyOffsetAsInt
1213     sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1214     jmp .ready
1215 .isInline:
1216     addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1217 .ready:
1218     loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
1219 end
1220
1221
1222 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1223     bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1224     loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1225     negi propertyOffsetAsInt
1226     sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1227     jmp .ready
1228 .isInline:
1229     addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1230 .ready:
1231     storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1232 end
1233
1234 _llint_op_init_global_const:
1235     traceExecution()
1236     writeBarrierOnGlobalObject(2)
1237     loadisFromInstruction(2, t1)
1238     loadpFromInstruction(1, t0)
1239     loadConstantOrVariable(t1, t2)
1240     storeq t2, [t0]
1241     dispatch(5)
1242
1243
1244 macro getById(getPropertyStorage)
1245     traceExecution()
1246     # We only do monomorphic get_by_id caching for now, and we do not modify the
1247     # opcode. We do, however, allow for the cache to change anytime if fails, since
1248     # ping-ponging is free. At best we get lucky and the get_by_id will continue
1249     # to take fast path on the new cache. At worst we take slow path, which is what
1250     # we would have been doing anyway.
1251     loadisFromInstruction(2, t0)
1252     loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
1253     loadStructureWithScratch(t3, t2, t1)
1254     loadpFromInstruction(4, t1)
1255     bpneq t2, t1, .opGetByIdSlow
1256     getPropertyStorage(
1257         t3,
1258         t0,
1259         macro (propertyStorage, scratch)
1260             loadisFromInstruction(5, t2)
1261             loadisFromInstruction(1, t1)
1262             loadq [propertyStorage, t2], scratch
1263             storeq scratch, [cfr, t1, 8]
1264             valueProfile(scratch, 8, t1)
1265             dispatch(9)
1266         end)
1267             
1268     .opGetByIdSlow:
1269         callSlowPath(_llint_slow_path_get_by_id)
1270         dispatch(9)
1271 end
1272
1273 _llint_op_get_by_id:
1274     getById(withInlineStorage)
1275
1276
1277 _llint_op_get_by_id_out_of_line:
1278     getById(withOutOfLineStorage)
1279
1280
1281 _llint_op_get_array_length:
1282     traceExecution()
1283     loadisFromInstruction(2, t0)
1284     loadpFromInstruction(4, t1)
1285     loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
1286     move t3, t2
1287     arrayProfile(t2, t1, t0)
1288     btiz t2, IsArray, .opGetArrayLengthSlow
1289     btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1290     loadisFromInstruction(1, t1)
1291     loadp JSObject::m_butterfly[t3], t0
1292     loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
1293     bilt t0, 0, .opGetArrayLengthSlow
1294     orq tagTypeNumber, t0
1295     valueProfile(t0, 8, t2)
1296     storeq t0, [cfr, t1, 8]
1297     dispatch(9)
1298
1299 .opGetArrayLengthSlow:
1300     callSlowPath(_llint_slow_path_get_by_id)
1301     dispatch(9)
1302
1303
1304 _llint_op_get_arguments_length:
1305     traceExecution()
1306     loadisFromInstruction(2, t0)
1307     loadisFromInstruction(1, t1)
1308     btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow
1309     loadi ArgumentCount + PayloadOffset[cfr], t2
1310     subi 1, t2
1311     orq tagTypeNumber, t2
1312     storeq t2, [cfr, t1, 8]
1313     dispatch(4)
1314
1315 .opGetArgumentsLengthSlow:
1316     callSlowPath(_llint_slow_path_get_arguments_length)
1317     dispatch(4)
1318
1319
1320 macro putById(getPropertyStorage)
1321     traceExecution()
1322     writeBarrierOnOperands(1, 3)
1323     loadisFromInstruction(1, t3)
1324     loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
1325     loadStructureWithScratch(t0, t2, t1)
1326     loadpFromInstruction(4, t1)
1327     bpneq t2, t1, .opPutByIdSlow
1328     getPropertyStorage(
1329         t0,
1330         t3,
1331         macro (propertyStorage, scratch)
1332             loadisFromInstruction(5, t1)
1333             loadisFromInstruction(3, t2)
1334             loadConstantOrVariable(t2, scratch)
1335             storeq scratch, [propertyStorage, t1]
1336             dispatch(9)
1337         end)
1338 end
1339
1340 _llint_op_put_by_id:
1341     putById(withInlineStorage)
1342
1343 .opPutByIdSlow:
1344     callSlowPath(_llint_slow_path_put_by_id)
1345     dispatch(9)
1346
1347
1348 _llint_op_put_by_id_out_of_line:
1349     putById(withOutOfLineStorage)
1350
1351
1352 macro putByIdTransition(additionalChecks, getPropertyStorage)
1353     traceExecution()
1354     writeBarrierOnOperand(1)
1355     loadisFromInstruction(1, t3)
1356     loadpFromInstruction(4, t1)
1357     loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
1358     loadStructureWithScratch(t0, t2, t3)
1359     bpneq t2, t1, .opPutByIdSlow
1360     additionalChecks(t1, t3, t2)
1361     loadisFromInstruction(3, t2)
1362     loadisFromInstruction(5, t1)
1363     getPropertyStorage(
1364         t0,
1365         t3,
1366         macro (propertyStorage, scratch)
1367             addp t1, propertyStorage, t3
1368             loadConstantOrVariable(t2, t1)
1369             storeq t1, [t3]
1370             loadpFromInstruction(6, t1)
1371             loadi Structure::m_blob + StructureIDBlob::u.words.word1[t1], t1
1372             storei t1, JSCell::m_structureID[t0]
1373             dispatch(9)
1374         end)
1375 end
1376
1377 macro noAdditionalChecks(oldStructure, scratch, scratch2)
1378 end
1379
1380 macro structureChainChecks(oldStructure, scratch, scratch2)
1381     const protoCell = oldStructure    # Reusing the oldStructure register for the proto
1382     loadpFromInstruction(7, scratch)
1383     assert(macro (ok) btpnz scratch, ok end)
1384     loadp StructureChain::m_vector[scratch], scratch
1385     assert(macro (ok) btpnz scratch, ok end)
1386     bqeq Structure::m_prototype[oldStructure], ValueNull, .done
1387 .loop:
1388     loadq Structure::m_prototype[oldStructure], protoCell
1389     loadStructureAndClobberFirstArg(protoCell, scratch2)
1390     move scratch2, oldStructure
1391     bpneq oldStructure, [scratch], .opPutByIdSlow
1392     addp 8, scratch
1393     bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
1394 .done:
1395 end
1396
1397 _llint_op_put_by_id_transition_direct:
1398     putByIdTransition(noAdditionalChecks, withInlineStorage)
1399
1400
1401 _llint_op_put_by_id_transition_direct_out_of_line:
1402     putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1403
1404
1405 _llint_op_put_by_id_transition_normal:
1406     putByIdTransition(structureChainChecks, withInlineStorage)
1407
1408
1409 _llint_op_put_by_id_transition_normal_out_of_line:
1410     putByIdTransition(structureChainChecks, withOutOfLineStorage)
1411
1412
1413 _llint_op_get_by_val:
1414     traceExecution()
1415     loadisFromInstruction(2, t2)
1416     loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
1417     loadpFromInstruction(4, t3)
1418     move t0, t2
1419     arrayProfile(t2, t3, t1)
1420     loadisFromInstruction(3, t3)
1421     loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
1422     sxi2q t1, t1
1423     loadp JSObject::m_butterfly[t0], t3
1424     andi IndexingShapeMask, t2
1425     bieq t2, Int32Shape, .opGetByValIsContiguous
1426     bineq t2, ContiguousShape, .opGetByValNotContiguous
1427 .opGetByValIsContiguous:
1428
1429     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1430     loadisFromInstruction(1, t0)
1431     loadq [t3, t1, 8], t2
1432     btqz t2, .opGetByValOutOfBounds
1433     jmp .opGetByValDone
1434
1435 .opGetByValNotContiguous:
1436     bineq t2, DoubleShape, .opGetByValNotDouble
1437     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1438     loadis 8[PB, PC, 8], t0
1439     loadd [t3, t1, 8], ft0
1440     bdnequn ft0, ft0, .opGetByValOutOfBounds
1441     fd2q ft0, t2
1442     subq tagTypeNumber, t2
1443     jmp .opGetByValDone
1444     
1445 .opGetByValNotDouble:
1446     subi ArrayStorageShape, t2
1447     bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1448     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
1449     loadisFromInstruction(1, t0)
1450     loadq ArrayStorage::m_vector[t3, t1, 8], t2
1451     btqz t2, .opGetByValOutOfBounds
1452
1453 .opGetByValDone:
1454     storeq t2, [cfr, t0, 8]
1455     valueProfile(t2, 5, t0)
1456     dispatch(6)
1457
1458 .opGetByValOutOfBounds:
1459     loadpFromInstruction(4, t0)
1460     storeb 1, ArrayProfile::m_outOfBounds[t0]
1461 .opGetByValSlow:
1462     callSlowPath(_llint_slow_path_get_by_val)
1463     dispatch(6)
1464
1465
1466 _llint_op_get_argument_by_val:
1467     # FIXME: At some point we should array profile this. Right now it isn't necessary
1468     # since the DFG will never turn a get_argument_by_val into a GetByVal.
1469     traceExecution()
1470     loadisFromInstruction(2, t0)
1471     loadisFromInstruction(3, t1)
1472     btqnz [cfr, t0, 8], .opGetArgumentByValSlow
1473     loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
1474     addi 1, t2
1475     loadi ArgumentCount + PayloadOffset[cfr], t1
1476     biaeq t2, t1, .opGetArgumentByValSlow
1477     loadisFromInstruction(1, t3)
1478     loadpFromInstruction(6, t1)
1479     loadq ThisArgumentOffset[cfr, t2, 8], t0
1480     storeq t0, [cfr, t3, 8]
1481     valueProfile(t0, 6, t1)
1482     dispatch(7)
1483
1484 .opGetArgumentByValSlow:
1485     callSlowPath(_llint_slow_path_get_argument_by_val)
1486     dispatch(7)
1487
1488
1489 macro contiguousPutByVal(storeCallback)
1490     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
1491 .storeResult:
1492     loadisFromInstruction(3, t2)
1493     storeCallback(t2, t1, [t0, t3, 8])
1494     dispatch(5)
1495
1496 .outOfBounds:
1497     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1498     loadp 32[PB, PC, 8], t2
1499     storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1500     addi 1, t3, t2
1501     storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1502     jmp .storeResult
1503 end
1504
1505 macro putByVal(slowPath)
1506     traceExecution()
1507     writeBarrierOnOperands(1, 3)
1508     loadisFromInstruction(1, t0)
1509     loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
1510     loadpFromInstruction(4, t3)
1511     move t1, t2
1512     arrayProfile(t2, t3, t0)
1513     loadisFromInstruction(2, t0)
1514     loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
1515     sxi2q t3, t3
1516     loadp JSObject::m_butterfly[t1], t0
1517     andi IndexingShapeMask, t2
1518     bineq t2, Int32Shape, .opPutByValNotInt32
1519     contiguousPutByVal(
1520         macro (operand, scratch, address)
1521             loadConstantOrVariable(operand, scratch)
1522             bpb scratch, tagTypeNumber, .opPutByValSlow
1523             storep scratch, address
1524         end)
1525
1526 .opPutByValNotInt32:
1527     bineq t2, DoubleShape, .opPutByValNotDouble
1528     contiguousPutByVal(
1529         macro (operand, scratch, address)
1530             loadConstantOrVariable(operand, scratch)
1531             bqb scratch, tagTypeNumber, .notInt
1532             ci2d scratch, ft0
1533             jmp .ready
1534         .notInt:
1535             addp tagTypeNumber, scratch
1536             fq2d scratch, ft0
1537             bdnequn ft0, ft0, .opPutByValSlow
1538         .ready:
1539             stored ft0, address
1540         end)
1541
1542 .opPutByValNotDouble:
1543     bineq t2, ContiguousShape, .opPutByValNotContiguous
1544     contiguousPutByVal(
1545         macro (operand, scratch, address)
1546             loadConstantOrVariable(operand, scratch)
1547             storep scratch, address
1548         end)
1549
1550 .opPutByValNotContiguous:
1551     bineq t2, ArrayStorageShape, .opPutByValSlow
1552     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1553     btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
1554 .opPutByValArrayStorageStoreResult:
1555     loadisFromInstruction(3, t2)
1556     loadConstantOrVariable(t2, t1)
1557     storeq t1, ArrayStorage::m_vector[t0, t3, 8]
1558     dispatch(5)
1559
1560 .opPutByValArrayStorageEmpty:
1561     loadpFromInstruction(4, t1)
1562     storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1563     addi 1, ArrayStorage::m_numValuesInVector[t0]
1564     bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
1565     addi 1, t3, t1
1566     storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1567     jmp .opPutByValArrayStorageStoreResult
1568
1569 .opPutByValOutOfBounds:
1570     loadpFromInstruction(4, t0)
1571     storeb 1, ArrayProfile::m_outOfBounds[t0]
1572 .opPutByValSlow:
1573     callSlowPath(slowPath)
1574     dispatch(5)
1575 end
1576
1577 _llint_op_put_by_val:
1578     putByVal(_llint_slow_path_put_by_val)
1579
1580 _llint_op_put_by_val_direct:
1581     putByVal(_llint_slow_path_put_by_val_direct)
1582
1583
1584 _llint_op_jmp:
1585     traceExecution()
1586     dispatchIntIndirect(1)
1587
1588
1589 macro jumpTrueOrFalse(conditionOp, slow)
1590     loadisFromInstruction(1, t1)
1591     loadConstantOrVariable(t1, t0)
1592     xorq ValueFalse, t0
1593     btqnz t0, -1, .slow
1594     conditionOp(t0, .target)
1595     dispatch(3)
1596
1597 .target:
1598     dispatchIntIndirect(2)
1599
1600 .slow:
1601     callSlowPath(slow)
1602     dispatch(0)
1603 end
1604
1605
1606 macro equalNull(cellHandler, immediateHandler)
1607     loadisFromInstruction(1, t0)
1608     assertNotConstant(t0)
1609     loadq [cfr, t0, 8], t0
1610     btqnz t0, tagMask, .immediate
1611     loadStructureWithScratch(t0, t2, t1)
1612     cellHandler(t2, JSCell::m_flags[t0], .target)
1613     dispatch(3)
1614
1615 .target:
1616     dispatchIntIndirect(2)
1617
1618 .immediate:
1619     andq ~TagBitUndefined, t0
1620     immediateHandler(t0, .target)
1621     dispatch(3)
1622 end
1623
1624 _llint_op_jeq_null:
1625     traceExecution()
1626     equalNull(
1627         macro (structure, value, target) 
1628             btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined
1629             loadp CodeBlock[cfr], t0
1630             loadp CodeBlock::m_globalObject[t0], t0
1631             bpeq Structure::m_globalObject[structure], t0, target
1632 .notMasqueradesAsUndefined:
1633         end,
1634         macro (value, target) bqeq value, ValueNull, target end)
1635
1636
1637 _llint_op_jneq_null:
1638     traceExecution()
1639     equalNull(
1640         macro (structure, value, target) 
1641             btbz value, MasqueradesAsUndefined, target
1642             loadp CodeBlock[cfr], t0
1643             loadp CodeBlock::m_globalObject[t0], t0
1644             bpneq Structure::m_globalObject[structure], t0, target
1645         end,
1646         macro (value, target) bqneq value, ValueNull, target end)
1647
1648
1649 _llint_op_jneq_ptr:
1650     traceExecution()
1651     loadisFromInstruction(1, t0)
1652     loadisFromInstruction(2, t1)
1653     loadp CodeBlock[cfr], t2
1654     loadp CodeBlock::m_globalObject[t2], t2
1655     loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
1656     bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
1657     dispatch(4)
1658
1659 .opJneqPtrTarget:
1660     dispatchIntIndirect(3)
1661
1662
1663 macro compare(integerCompare, doubleCompare, slowPath)
1664     loadisFromInstruction(1, t2)
1665     loadisFromInstruction(2, t3)
1666     loadConstantOrVariable(t2, t0)
1667     loadConstantOrVariable(t3, t1)
1668     bqb t0, tagTypeNumber, .op1NotInt
1669     bqb t1, tagTypeNumber, .op2NotInt
1670     integerCompare(t0, t1, .jumpTarget)
1671     dispatch(4)
1672
1673 .op1NotInt:
1674     btqz t0, tagTypeNumber, .slow
1675     bqb t1, tagTypeNumber, .op1NotIntOp2NotInt
1676     ci2d t1, ft1
1677     jmp .op1NotIntReady
1678 .op1NotIntOp2NotInt:
1679     btqz t1, tagTypeNumber, .slow
1680     addq tagTypeNumber, t1
1681     fq2d t1, ft1
1682 .op1NotIntReady:
1683     addq tagTypeNumber, t0
1684     fq2d t0, ft0
1685     doubleCompare(ft0, ft1, .jumpTarget)
1686     dispatch(4)
1687
1688 .op2NotInt:
1689     ci2d t0, ft0
1690     btqz t1, tagTypeNumber, .slow
1691     addq tagTypeNumber, t1
1692     fq2d t1, ft1
1693     doubleCompare(ft0, ft1, .jumpTarget)
1694     dispatch(4)
1695
1696 .jumpTarget:
1697     dispatchIntIndirect(3)
1698
1699 .slow:
1700     callSlowPath(slowPath)
1701     dispatch(0)
1702 end
1703
1704
1705 _llint_op_switch_imm:
1706     traceExecution()
1707     loadisFromInstruction(3, t2)
1708     loadisFromInstruction(1, t3)
1709     loadConstantOrVariable(t2, t1)
1710     loadp CodeBlock[cfr], t2
1711     loadp CodeBlock::m_rareData[t2], t2
1712     muli sizeof SimpleJumpTable, t3    # FIXME: would be nice to peephole this!
1713     loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1714     addp t3, t2
1715     bqb t1, tagTypeNumber, .opSwitchImmNotInt
1716     subi SimpleJumpTable::min[t2], t1
1717     biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1718     loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1719     loadis [t3, t1, 4], t1
1720     btiz t1, .opSwitchImmFallThrough
1721     dispatch(t1)
1722
1723 .opSwitchImmNotInt:
1724     btqnz t1, tagTypeNumber, .opSwitchImmSlow   # Go slow if it's a double.
1725 .opSwitchImmFallThrough:
1726     dispatchIntIndirect(2)
1727
1728 .opSwitchImmSlow:
1729     callSlowPath(_llint_slow_path_switch_imm)
1730     dispatch(0)
1731
1732
1733 _llint_op_switch_char:
1734     traceExecution()
1735     loadisFromInstruction(3, t2)
1736     loadisFromInstruction(1, t3)
1737     loadConstantOrVariable(t2, t1)
1738     loadp CodeBlock[cfr], t2
1739     loadp CodeBlock::m_rareData[t2], t2
1740     muli sizeof SimpleJumpTable, t3
1741     loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1742     addp t3, t2
1743     btqnz t1, tagMask, .opSwitchCharFallThrough
1744     bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
1745     bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
1746     loadp JSString::m_value[t1], t0
1747     btpz  t0, .opSwitchOnRope
1748     loadp StringImpl::m_data8[t0], t1
1749     btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1750     loadh [t1], t0
1751     jmp .opSwitchCharReady
1752 .opSwitchChar8Bit:
1753     loadb [t1], t0
1754 .opSwitchCharReady:
1755     subi SimpleJumpTable::min[t2], t0
1756     biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1757     loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1758     loadis [t2, t0, 4], t1
1759     btiz t1, .opSwitchCharFallThrough
1760     dispatch(t1)
1761
1762 .opSwitchCharFallThrough:
1763     dispatchIntIndirect(2)
1764
1765 .opSwitchOnRope:
1766     callSlowPath(_llint_slow_path_switch_char)
1767     dispatch(0)
1768
1769
1770 _llint_op_new_func:
1771     traceExecution()
1772     loadisFromInstruction(4, t2)
1773     btiz t2, .opNewFuncUnchecked
1774     loadisFromInstruction(1, t1)
1775     btqnz [cfr, t1, 8], .opNewFuncDone
1776 .opNewFuncUnchecked:
1777     callSlowPath(_llint_slow_path_new_func)
1778 .opNewFuncDone:
1779     dispatch(5)
1780
1781 macro arrayProfileForCall()
1782     loadisFromInstruction(4, t3)
1783     negp t3
1784     loadq ThisArgumentOffset[cfr, t3, 8], t0
1785     btqnz t0, tagMask, .done
1786     loadpFromInstruction((CallOpCodeSize - 2), t1)
1787     loadi JSCell::m_structureID[t0], t3
1788     storei t3, ArrayProfile::m_lastSeenStructureID[t1]
1789 .done:
1790 end
1791
1792 macro doCall(slowPath)
1793     loadisFromInstruction(2, t0)
1794     loadpFromInstruction(5, t1)
1795     loadp LLIntCallLinkInfo::callee[t1], t2
1796     loadConstantOrVariable(t0, t3)
1797     bqneq t3, t2, .opCallSlow
1798     loadisFromInstruction(4, t3)
1799     lshifti 3, t3
1800     negp t3
1801     addp cfr, t3
1802     storeq t2, Callee[t3]
1803     loadisFromInstruction(3, t2)
1804     storei PC, ArgumentCount + TagOffset[cfr]
1805     storei t2, ArgumentCount + PayloadOffset[t3]
1806     addp CallerFrameAndPCSize, t3
1807     callTargetFunction(t1, t3)
1808
1809 .opCallSlow:
1810     slowPathForCall(slowPath)
1811 end
1812
1813 _llint_op_tear_off_arguments:
1814     traceExecution()
1815     loadisFromInstruction(1, t0)
1816     btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated
1817     callSlowPath(_llint_slow_path_tear_off_arguments)
1818 .opTearOffArgumentsNotCreated:
1819     dispatch(3)
1820
1821
1822 _llint_op_ret:
1823     traceExecution()
1824     checkSwitchToJITForEpilogue()
1825     loadisFromInstruction(1, t2)
1826     loadConstantOrVariable(t2, t0)
1827     doReturn()
1828
1829
1830 _llint_op_to_primitive:
1831     traceExecution()
1832     loadisFromInstruction(2, t2)
1833     loadisFromInstruction(1, t3)
1834     loadConstantOrVariable(t2, t0)
1835     btqnz t0, tagMask, .opToPrimitiveIsImm
1836     bbneq JSCell::m_type[t0], StringType, .opToPrimitiveSlowCase
1837 .opToPrimitiveIsImm:
1838     storeq t0, [cfr, t3, 8]
1839     dispatch(3)
1840
1841 .opToPrimitiveSlowCase:
1842     callSlowPath(_slow_path_to_primitive)
1843     dispatch(3)
1844
1845
1846 _llint_op_catch:
1847     # This is where we end up from the JIT's throw trampoline (because the
1848     # machine code return address will be set to _llint_op_catch), and from
1849     # the interpreter's throw trampoline (see _llint_throw_trampoline).
1850     # The throwing code must have known that we were throwing to the interpreter,
1851     # and have set VM::targetInterpreterPCForThrow.
1852     loadp Callee[cfr], t3
1853     andp MarkedBlockMask, t3
1854     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1855     loadp VM::callFrameForThrow[t3], cfr
1856     loadp VM::vmEntryFrameForThrow[t3], t0
1857     storep t0, VM::topVMEntryFrame[t3]
1858     restoreStackPointerAfterCall()
1859
1860     loadp CodeBlock[cfr], PB
1861     loadp CodeBlock::m_instructions[PB], PB
1862     loadp VM::targetInterpreterPCForThrow[t3], PC
1863     subp PB, PC
1864     rshiftp 3, PC
1865     loadq VM::m_exception[t3], t0
1866     storeq 0, VM::m_exception[t3]
1867     loadisFromInstruction(1, t2)
1868     storeq t0, [cfr, t2, 8]
1869     traceExecution()
1870     dispatch(2)
1871
1872
1873 _llint_op_end:
1874     traceExecution()
1875     checkSwitchToJITForEpilogue()
1876     loadisFromInstruction(1, t0)
1877     assertNotConstant(t0)
1878     loadq [cfr, t0, 8], t0
1879     doReturn()
1880
1881
1882 _llint_throw_from_slow_path_trampoline:
1883     callSlowPath(_llint_slow_path_handle_exception)
1884
1885     # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
1886     # the throw target is not necessarily interpreted code, we come to here.
1887     # This essentially emulates the JIT's throwing protocol.
1888     loadp CodeBlock[cfr], t1
1889     loadp CodeBlock::m_vm[t1], t1
1890     jmp VM::targetMachinePCForThrow[t1]
1891
1892
1893 _llint_throw_during_call_trampoline:
1894     preserveReturnAddressAfterCall(t2)
1895     jmp _llint_throw_from_slow_path_trampoline
1896
1897
1898 macro nativeCallTrampoline(executableOffsetToFunction)
1899
1900     functionPrologue()
1901     storep 0, CodeBlock[cfr]
1902     if X86_64 or X86_64_WIN
1903         if X86_64
1904             const arg1 = t4  # t4 = rdi
1905             const arg2 = t5  # t5 = rsi
1906             const temp = t1
1907         elsif X86_64_WIN
1908             const arg1 = t2  # t2 = rcx
1909             const arg2 = t1  # t1 = rdx
1910             const temp = t0
1911         end
1912         loadp Callee[cfr], t0
1913         andp MarkedBlockMask, t0, t1
1914         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
1915         storep cfr, VM::topCallFrame[t1]
1916         move cfr, arg1
1917         loadp Callee[cfr], arg2
1918         loadp JSFunction::m_executable[arg2], temp
1919         checkStackPointerAlignment(t3, 0xdead0001)
1920         if X86_64_WIN
1921             subp 32, sp
1922         end
1923         call executableOffsetToFunction[temp]
1924         if X86_64_WIN
1925             addp 32, sp
1926         end
1927         loadp Callee[cfr], t3
1928         andp MarkedBlockMask, t3
1929         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1930     elsif ARM64 or C_LOOP
1931         loadp Callee[cfr], t0
1932         andp MarkedBlockMask, t0, t1
1933         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
1934         storep cfr, VM::topCallFrame[t1]
1935         preserveReturnAddressAfterCall(t3)
1936         storep t3, ReturnPC[cfr]
1937         move cfr, t0
1938         loadp Callee[cfr], t1
1939         loadp JSFunction::m_executable[t1], t1
1940         if C_LOOP
1941             cloopCallNative executableOffsetToFunction[t1]
1942         else
1943             call executableOffsetToFunction[t1]
1944         end
1945         restoreReturnAddressBeforeReturn(t3)
1946         loadp Callee[cfr], t3
1947         andp MarkedBlockMask, t3
1948         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1949     else
1950         error
1951     end
1952
1953     functionEpilogue()
1954
1955     btqnz VM::m_exception[t3], .handleException
1956     ret
1957
1958 .handleException:
1959     storep cfr, VM::topCallFrame[t3]
1960     restoreStackPointerAfterCall()
1961     jmp _llint_throw_from_slow_path_trampoline
1962 end
1963
1964
1965 macro getGlobalObject(dst)
1966     loadp CodeBlock[cfr], t0
1967     loadp CodeBlock::m_globalObject[t0], t0
1968     loadisFromInstruction(dst, t1)
1969     storeq t0, [cfr, t1, 8]
1970 end
1971
1972 macro varInjectionCheck(slowPath)
1973     loadp CodeBlock[cfr], t0
1974     loadp CodeBlock::m_globalObject[t0], t0
1975     loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
1976     bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
1977 end
1978
1979 macro resolveScope()
1980     loadisFromInstruction(5, t2)
1981     loadisFromInstruction(2, t0)
1982     loadp [cfr, t0, 8], t0
1983     btiz t2, .resolveScopeLoopEnd
1984
1985 .resolveScopeLoop:
1986     loadp JSScope::m_next[t0], t0
1987     subi 1, t2
1988     btinz t2, .resolveScopeLoop
1989
1990 .resolveScopeLoopEnd:
1991     loadisFromInstruction(1, t1)
1992     storeq t0, [cfr, t1, 8]
1993 end
1994
1995
1996 _llint_op_resolve_scope:
1997     traceExecution()
1998     loadisFromInstruction(4, t0)
1999
2000 #rGlobalProperty:
2001     bineq t0, GlobalProperty, .rGlobalVar
2002     getGlobalObject(1)
2003     dispatch(7)
2004
2005 .rGlobalVar:
2006     bineq t0, GlobalVar, .rClosureVar
2007     getGlobalObject(1)
2008     dispatch(7)
2009
2010 .rClosureVar:
2011     bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
2012     resolveScope()
2013     dispatch(7)
2014
2015 .rGlobalPropertyWithVarInjectionChecks:
2016     bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
2017     varInjectionCheck(.rDynamic)
2018     getGlobalObject(1)
2019     dispatch(7)
2020
2021 .rGlobalVarWithVarInjectionChecks:
2022     bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
2023     varInjectionCheck(.rDynamic)
2024     getGlobalObject(1)
2025     dispatch(7)
2026
2027 .rClosureVarWithVarInjectionChecks:
2028     bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
2029     varInjectionCheck(.rDynamic)
2030     resolveScope()
2031     dispatch(7)
2032
2033 .rDynamic:
2034     callSlowPath(_llint_slow_path_resolve_scope)
2035     dispatch(7)
2036
2037
2038 macro loadWithStructureCheck(operand, slowPath)
2039     loadisFromInstruction(operand, t0)
2040     loadq [cfr, t0, 8], t0
2041     loadStructureWithScratch(t0, t2, t1)
2042     loadpFromInstruction(5, t1)
2043     bpneq t2, t1, slowPath
2044 end
2045
2046 macro getProperty()
2047     loadisFromInstruction(6, t1)
2048     loadPropertyAtVariableOffset(t1, t0, t2)
2049     valueProfile(t2, 7, t0)
2050     loadisFromInstruction(1, t0)
2051     storeq t2, [cfr, t0, 8]
2052 end
2053
2054 macro getGlobalVar()
2055     loadpFromInstruction(6, t0)
2056     loadq [t0], t0
2057     valueProfile(t0, 7, t1)
2058     loadisFromInstruction(1, t1)
2059     storeq t0, [cfr, t1, 8]
2060 end
2061
2062 macro getClosureVar()
2063     loadp JSEnvironmentRecord::m_registers[t0], t0
2064     loadisFromInstruction(6, t1)
2065     loadq [t0, t1, 8], t0
2066     valueProfile(t0, 7, t1)
2067     loadisFromInstruction(1, t1)
2068     storeq t0, [cfr, t1, 8]
2069 end
2070
2071 _llint_op_get_from_scope:
2072     traceExecution()
2073     loadisFromInstruction(4, t0)
2074     andi ResolveModeMask, t0
2075
2076 #gGlobalProperty:
2077     bineq t0, GlobalProperty, .gGlobalVar
2078     loadWithStructureCheck(2, .gDynamic)
2079     getProperty()
2080     dispatch(8)
2081
2082 .gGlobalVar:
2083     bineq t0, GlobalVar, .gClosureVar
2084     getGlobalVar()
2085     dispatch(8)
2086
2087 .gClosureVar:
2088     bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
2089     loadVariable(2, t0)
2090     getClosureVar()
2091     dispatch(8)
2092
2093 .gGlobalPropertyWithVarInjectionChecks:
2094     bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
2095     loadWithStructureCheck(2, .gDynamic)
2096     getProperty()
2097     dispatch(8)
2098
2099 .gGlobalVarWithVarInjectionChecks:
2100     bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
2101     varInjectionCheck(.gDynamic)
2102     loadVariable(2, t0)
2103     getGlobalVar()
2104     dispatch(8)
2105
2106 .gClosureVarWithVarInjectionChecks:
2107     bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
2108     varInjectionCheck(.gDynamic)
2109     loadVariable(2, t0)
2110     getClosureVar()
2111     dispatch(8)
2112
2113 .gDynamic:
2114     callSlowPath(_llint_slow_path_get_from_scope)
2115     dispatch(8)
2116
2117
2118 macro putProperty()
2119     loadisFromInstruction(3, t1)
2120     loadConstantOrVariable(t1, t2)
2121     loadisFromInstruction(6, t1)
2122     storePropertyAtVariableOffset(t1, t0, t2)
2123 end
2124
2125 macro putGlobalVar()
2126     loadisFromInstruction(3, t0)
2127     loadConstantOrVariable(t0, t1)
2128     loadpFromInstruction(5, t2)
2129     notifyWrite(t2, t1, t0, .pDynamic)
2130     loadpFromInstruction(6, t0)
2131     storeq t1, [t0]
2132 end
2133
2134 macro putClosureVar()
2135     loadisFromInstruction(3, t1)
2136     loadConstantOrVariable(t1, t2)
2137     loadp JSEnvironmentRecord::m_registers[t0], t0
2138     loadisFromInstruction(6, t1)
2139     storeq t2, [t0, t1, 8]
2140 end
2141
2142 macro putLocalClosureVar()
2143     loadisFromInstruction(3, t1)
2144     loadConstantOrVariable(t1, t2)
2145     loadpFromInstruction(5, t3)
2146     btpz t3, .noVariableWatchpointSet
2147     notifyWrite(t3, t2, t1, .pDynamic)
2148 .noVariableWatchpointSet:
2149     loadp JSEnvironmentRecord::m_registers[t0], t0
2150     loadisFromInstruction(6, t1)
2151     storeq t2, [t0, t1, 8]
2152 end
2153
2154
2155 _llint_op_put_to_scope:
2156     traceExecution()
2157     loadisFromInstruction(4, t0)
2158     andi ResolveModeMask, t0
2159
2160 #pLocalClosureVar:
2161     bineq t0, LocalClosureVar, .pGlobalProperty
2162     writeBarrierOnOperands(1, 3)
2163     loadVariable(1, t0)
2164     putLocalClosureVar()
2165     dispatch(7)
2166
2167 .pGlobalProperty:
2168     bineq t0, GlobalProperty, .pGlobalVar
2169     writeBarrierOnOperands(1, 3)
2170     loadWithStructureCheck(1, .pDynamic)
2171     putProperty()
2172     dispatch(7)
2173
2174 .pGlobalVar:
2175     bineq t0, GlobalVar, .pClosureVar
2176     writeBarrierOnGlobalObject(3)
2177     putGlobalVar()
2178     dispatch(7)
2179
2180 .pClosureVar:
2181     bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
2182     writeBarrierOnOperands(1, 3)
2183     loadVariable(1, t0)
2184     putClosureVar()
2185     dispatch(7)
2186
2187 .pGlobalPropertyWithVarInjectionChecks:
2188     bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
2189     writeBarrierOnOperands(1, 3)
2190     loadWithStructureCheck(1, .pDynamic)
2191     putProperty()
2192     dispatch(7)
2193
2194 .pGlobalVarWithVarInjectionChecks:
2195     bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
2196     writeBarrierOnGlobalObject(3)
2197     varInjectionCheck(.pDynamic)
2198     putGlobalVar()
2199     dispatch(7)
2200
2201 .pClosureVarWithVarInjectionChecks:
2202     bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
2203     writeBarrierOnOperands(1, 3)
2204     varInjectionCheck(.pDynamic)
2205     loadVariable(1, t0)
2206     putClosureVar()
2207     dispatch(7)
2208
2209 .pDynamic:
2210     callSlowPath(_llint_slow_path_put_to_scope)
2211     dispatch(7)
2212
2213 _llint_op_profile_type:
2214     traceExecution()
2215     loadp CodeBlock[cfr], t1
2216     loadp CodeBlock::m_vm[t1], t1
2217     # t1 is holding the pointer to the typeProfilerLog.
2218     loadp VM::m_typeProfilerLog[t1], t1
2219     # t2 is holding the pointer to the current log entry.
2220     loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
2221
2222     # t0 is holding the JSValue argument.
2223     loadisFromInstruction(1, t3)
2224     loadConstantOrVariable(t3, t0)
2225
2226     # Store the JSValue onto the log entry.
2227     storeq t0, TypeProfilerLog::LogEntry::value[t2]
2228     
2229     # Store the TypeLocation onto the log entry.
2230     loadpFromInstruction(2, t3)
2231     storep t3, TypeProfilerLog::LogEntry::location[t2]
2232
2233     btqz t0, tagMask, .opProfileTypeIsCell
2234     storei 0, TypeProfilerLog::LogEntry::structureID[t2]
2235     jmp .opProfileTypeSkipIsCell
2236 .opProfileTypeIsCell:
2237     loadi JSCell::m_structureID[t0], t3
2238     storei t3, TypeProfilerLog::LogEntry::structureID[t2]
2239 .opProfileTypeSkipIsCell:
2240     
2241     # Increment the current log entry.
2242     addp sizeof TypeProfilerLog::LogEntry, t2
2243     storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
2244
2245     loadp TypeProfilerLog::m_logEndPtr[t1], t1
2246     bpneq t2, t1, .opProfileTypeDone
2247     callSlowPath(_slow_path_profile_type_clear_log)
2248
2249 .opProfileTypeDone:
2250     dispatch(6)