arguments[-1] should have well-defined behavior
[WebKit-https.git] / Source / JavaScriptCore / llint / LowLevelInterpreter64.asm
1 # Copyright (C) 2011-2015 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 #    notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 #    notice, this list of conditions and the following disclaimer in the
10 #    documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24
25 # Utilities.
26 macro jumpToInstruction()
27     jmp [PB, PC, 8]
28 end
29
30 macro dispatch(advance)
31     addp advance, PC
32     jumpToInstruction()
33 end
34
35 macro dispatchInt(advance)
36     addi advance, PC
37     jumpToInstruction()
38 end
39
40 macro dispatchIntIndirect(offset)
41     dispatchInt(offset * 8[PB, PC, 8])
42 end
43
44 macro dispatchAfterCall()
45     loadi ArgumentCount + TagOffset[cfr], PC
46     loadp CodeBlock[cfr], PB
47     loadp CodeBlock::m_instructions[PB], PB
48     loadisFromInstruction(1, t1)
49     storeq t0, [cfr, t1, 8]
50     valueProfile(t0, (CallOpCodeSize - 1), t2)
51     dispatch(CallOpCodeSize)
52 end
53
54 macro cCall2(function, arg1, arg2)
55     checkStackPointerAlignment(t4, 0xbad0c002)
56     if X86_64
57         move arg1, t4
58         move arg2, t5
59         call function
60     elsif X86_64_WIN
61         # Note: this implementation is only correct if the return type size is > 8 bytes.
62         # See macro cCall2Void for an implementation when the return type <= 8 bytes.
63         # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
64         # On entry rcx (t2), should contain a pointer to this stack space. The other parameters are shifted to the right,
65         # rdx (t1) should contain the first argument, and r8 (t6) should contain the second argument.
66         # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (t0) and rdx (t1)
67         # since the return value is expected to be split between the two.
68         # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
69         move arg1, t1
70         move arg2, t6
71         subp 48, sp
72         move sp, t2
73         addp 32, t2
74         call function
75         addp 48, sp
76         move 8[t0], t1
77         move [t0], t0
78     elsif ARM64
79         move arg1, t0
80         move arg2, t1
81         call function
82     elsif C_LOOP
83         cloopCallSlowPath function, arg1, arg2
84     else
85         error
86     end
87 end
88
89 macro cCall2Void(function, arg1, arg2)
90     if C_LOOP
91         cloopCallSlowPathVoid function, arg1, arg2
92     elsif X86_64_WIN
93         # Note: we cannot use the cCall2 macro for Win64 in this case,
94         # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
95         # On Win64, rcx and rdx are used for passing the first two parameters.
96         # We also need to make room on the stack for all four parameter registers.
97         # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
98         move arg2, t1
99         move arg1, t2
100         subp 32, sp 
101         call function
102         addp 32, sp 
103     else
104         cCall2(function, arg1, arg2)
105     end
106 end
107
108 # This barely works. arg3 and arg4 should probably be immediates.
109 macro cCall4(function, arg1, arg2, arg3, arg4)
110     checkStackPointerAlignment(t4, 0xbad0c004)
111     if X86_64
112         move arg1, t4
113         move arg2, t5
114         move arg3, t1
115         move arg4, t2
116         call function
117     elsif X86_64_WIN
118         # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
119         # We also need to make room on the stack for all four parameter registers.
120         # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
121         move arg1, t2
122         move arg2, t1
123         move arg3, t6
124         move arg4, t7
125         subp 32, sp 
126         call function
127         addp 32, sp 
128     elsif ARM64
129         move arg1, t0
130         move arg2, t1
131         move arg3, t2
132         move arg4, t3
133         call function
134     elsif C_LOOP
135         error
136     else
137         error
138     end
139 end
140
141 macro doVMEntry(makeCall)
142     if X86_64
143         const entry = t4
144         const vm = t5
145         const protoCallFrame = t1
146
147         const previousCFR = t0
148         const previousPC = t6
149         const temp1 = t0
150         const temp2 = t3
151         const temp3 = t6
152     elsif X86_64_WIN
153         const entry = t2
154         const vm = t1
155         const protoCallFrame = t6
156
157         const previousCFR = t0
158         const previousPC = t4
159         const temp1 = t0
160         const temp2 = t3
161         const temp3 = t7
162     elsif ARM64 or C_LOOP
163         const entry = a0
164         const vm = a1
165         const protoCallFrame = a2
166
167         const previousCFR = t5
168         const previousPC = lr
169         const temp1 = t3
170         const temp2 = t4
171         const temp3 = t6
172     end
173
174     functionPrologue()
175     pushCalleeSaves()
176
177     vmEntryRecord(cfr, sp)
178
179     checkStackPointerAlignment(temp2, 0xbad0dc01)
180
181     storep vm, VMEntryRecord::m_vm[sp]
182     loadp VM::topCallFrame[vm], temp2
183     storep temp2, VMEntryRecord::m_prevTopCallFrame[sp]
184     loadp VM::topVMEntryFrame[vm], temp2
185     storep temp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]
186
187     loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
188     addp CallFrameHeaderSlots, temp2, temp2
189     lshiftp 3, temp2
190     subp sp, temp2, temp1
191
192     # Ensure that we have enough additional stack capacity for the incoming args,
193     # and the frame for the JS code we're executing. We need to do this check
194     # before we start copying the args from the protoCallFrame below.
195     bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
196
197     if C_LOOP
198         move entry, temp2
199         move vm, temp3
200         cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
201         bpeq t0, 0, .stackCheckFailed
202         move temp2, entry
203         move temp3, vm
204         jmp .stackHeightOK
205
206 .stackCheckFailed:
207         move temp2, entry
208         move temp3, vm
209     end
210
211     cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
212
213     vmEntryRecord(cfr, temp2)
214
215     loadp VMEntryRecord::m_vm[temp2], vm
216     loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
217     storep temp3, VM::topCallFrame[vm]
218     loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
219     storep temp3, VM::topVMEntryFrame[vm]
220
221     subp cfr, CalleeRegisterSaveSize, sp
222
223     popCalleeSaves()
224     functionEpilogue()
225     ret
226
227 .stackHeightOK:
228     move temp1, sp
229     move 4, temp1
230
231 .copyHeaderLoop:
232     subi 1, temp1
233     loadq [protoCallFrame, temp1, 8], temp3
234     storeq temp3, CodeBlock[sp, temp1, 8]
235     btinz temp1, .copyHeaderLoop
236
237     loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
238     subi 1, temp2
239     loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
240     subi 1, temp3
241
242     bieq temp2, temp3, .copyArgs
243     move ValueUndefined, temp1
244 .fillExtraArgsLoop:
245     subi 1, temp3
246     storeq temp1, ThisArgumentOffset + 8[sp, temp3, 8]
247     bineq temp2, temp3, .fillExtraArgsLoop
248
249 .copyArgs:
250     loadp ProtoCallFrame::args[protoCallFrame], temp1
251
252 .copyArgsLoop:
253     btiz temp2, .copyArgsDone
254     subi 1, temp2
255     loadq [temp1, temp2, 8], temp3
256     storeq temp3, ThisArgumentOffset + 8[sp, temp2, 8]
257     jmp .copyArgsLoop
258
259 .copyArgsDone:
260     if ARM64
261         move sp, temp2
262         storep temp2, VM::topCallFrame[vm]
263     else
264         storep sp, VM::topCallFrame[vm]
265     end
266     storep cfr, VM::topVMEntryFrame[vm]
267
268     move 0xffff000000000000, csr1
269     addp 2, csr1, csr2
270
271     checkStackPointerAlignment(temp3, 0xbad0dc02)
272
273     makeCall(entry, temp1)
274
275     checkStackPointerAlignment(temp3, 0xbad0dc03)
276
277     vmEntryRecord(cfr, temp2)
278
279     loadp VMEntryRecord::m_vm[temp2], vm
280     loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
281     storep temp3, VM::topCallFrame[vm]
282     loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
283     storep temp3, VM::topVMEntryFrame[vm]
284
285     subp cfr, CalleeRegisterSaveSize, sp
286
287     popCalleeSaves()
288     functionEpilogue()
289
290     ret
291 end
292
293
294 macro makeJavaScriptCall(entry, temp)
295     addp 16, sp
296     if C_LOOP
297         cloopCallJSFunction entry
298     else
299         call entry
300     end
301     subp 16, sp
302 end
303
304
305 macro makeHostFunctionCall(entry, temp)
306     move entry, temp
307     storep cfr, [sp]
308     if X86_64
309         move sp, t4
310     elsif X86_64_WIN
311         move sp, t2
312     elsif ARM64 or C_LOOP
313         move sp, a0
314     end
315     if C_LOOP
316         storep lr, 8[sp]
317         cloopCallNative temp
318     elsif X86_64_WIN
319         # We need to allocate 32 bytes on the stack for the shadow space.
320         subp 32, sp
321         call temp
322         addp 32, sp
323     else
324         call temp
325     end
326 end
327
328
329 _handleUncaughtException:
330     loadp Callee[cfr], t3
331     andp MarkedBlockMask, t3
332     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
333     loadp VM::callFrameForThrow[t3], cfr
334
335     loadp CallerFrame[cfr], cfr
336     vmEntryRecord(cfr, t2)
337
338     loadp VMEntryRecord::m_vm[t2], t3
339     loadp VMEntryRecord::m_prevTopCallFrame[t2], t5
340     storep t5, VM::topCallFrame[t3]
341     loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], t5
342     storep t5, VM::topVMEntryFrame[t3]
343
344     subp cfr, CalleeRegisterSaveSize, sp
345
346     popCalleeSaves()
347     functionEpilogue()
348     ret
349
350
351 macro prepareStateForCCall()
352     leap [PB, PC, 8], PC
353     move PB, t3
354 end
355
356 macro restoreStateAfterCCall()
357     move t0, PC
358     move t3, PB
359     subp PB, PC
360     rshiftp 3, PC
361 end
362
363 macro callSlowPath(slowPath)
364     prepareStateForCCall()
365     cCall2(slowPath, cfr, PC)
366     restoreStateAfterCCall()
367 end
368
369 macro traceOperand(fromWhere, operand)
370     prepareStateForCCall()
371     cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
372     restoreStateAfterCCall()
373 end
374
375 macro traceValue(fromWhere, operand)
376     prepareStateForCCall()
377     cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
378     restoreStateAfterCCall()
379 end
380
381 # Call a slow path for call call opcodes.
382 macro callCallSlowPath(slowPath, action)
383     storei PC, ArgumentCount + TagOffset[cfr]
384     prepareStateForCCall()
385     cCall2(slowPath, cfr, PC)
386     action(t0)
387 end
388
389 macro callWatchdogTimerHandler(throwHandler)
390     storei PC, ArgumentCount + TagOffset[cfr]
391     prepareStateForCCall()
392     cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
393     btpnz t0, throwHandler
394     move t3, PB
395     loadi ArgumentCount + TagOffset[cfr], PC
396 end
397
398 macro checkSwitchToJITForLoop()
399     checkSwitchToJIT(
400         1,
401         macro()
402             storei PC, ArgumentCount + TagOffset[cfr]
403             prepareStateForCCall()
404             cCall2(_llint_loop_osr, cfr, PC)
405             btpz t0, .recover
406             move t1, sp
407             jmp t0
408         .recover:
409             move t3, PB
410             loadi ArgumentCount + TagOffset[cfr], PC
411         end)
412 end
413
414 macro loadVariable(operand, value)
415     loadisFromInstruction(operand, value)
416     loadq [cfr, value, 8], value
417 end
418
419 # Index and value must be different registers. Index may be clobbered.
420 macro loadConstantOrVariable(index, value)
421     bpgteq index, FirstConstantRegisterIndex, .constant
422     loadq [cfr, index, 8], value
423     jmp .done
424 .constant:
425     loadp CodeBlock[cfr], value
426     loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
427     subp FirstConstantRegisterIndex, index
428     loadq [value, index, 8], value
429 .done:
430 end
431
432 macro loadConstantOrVariableInt32(index, value, slow)
433     loadConstantOrVariable(index, value)
434     bqb value, tagTypeNumber, slow
435 end
436
437 macro loadConstantOrVariableCell(index, value, slow)
438     loadConstantOrVariable(index, value)
439     btqnz value, tagMask, slow
440 end
441
442 macro writeBarrierOnOperand(cellOperand)
443     if GGC
444         loadisFromInstruction(cellOperand, t1)
445         loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
446         skipIfIsRememberedOrInEden(t2, t1, t3, 
447             macro(gcData)
448                 btbnz gcData, .writeBarrierDone
449                 push PB, PC
450                 cCall2Void(_llint_write_barrier_slow, cfr, t2)
451                 pop PC, PB
452             end
453         )
454     .writeBarrierDone:
455     end
456 end
457
458 macro writeBarrierOnOperands(cellOperand, valueOperand)
459     if GGC
460         loadisFromInstruction(valueOperand, t1)
461         loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
462         btpz t0, .writeBarrierDone
463     
464         writeBarrierOnOperand(cellOperand)
465     .writeBarrierDone:
466     end
467 end
468
469 macro writeBarrierOnGlobalObject(valueOperand)
470     if GGC
471         loadisFromInstruction(valueOperand, t1)
472         loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
473         btpz t0, .writeBarrierDone
474     
475         loadp CodeBlock[cfr], t3
476         loadp CodeBlock::m_globalObject[t3], t3
477         skipIfIsRememberedOrInEden(t3, t1, t2,
478             macro(gcData)
479                 btbnz gcData, .writeBarrierDone
480                 push PB, PC
481                 cCall2Void(_llint_write_barrier_slow, cfr, t3)
482                 pop PC, PB
483             end
484         )
485     .writeBarrierDone:
486     end
487 end
488
489 macro valueProfile(value, operand, scratch)
490     loadpFromInstruction(operand, scratch)
491     storeq value, ValueProfile::m_buckets[scratch]
492 end
493
494 macro loadStructure(cell, structure)
495 end
496
497 macro loadStructureWithScratch(cell, structure, scratch)
498     loadp CodeBlock[cfr], scratch
499     loadp CodeBlock::m_vm[scratch], scratch
500     loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
501     loadi JSCell::m_structureID[cell], structure
502     loadp [scratch, structure, 8], structure
503 end
504
505 macro loadStructureAndClobberFirstArg(cell, structure)
506     loadi JSCell::m_structureID[cell], structure
507     loadp CodeBlock[cfr], cell
508     loadp CodeBlock::m_vm[cell], cell
509     loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
510     loadp [cell, structure, 8], structure
511 end
512
513 macro storeStructureWithTypeInfo(cell, structure, scratch)
514     loadq Structure::m_blob + StructureIDBlob::u.doubleWord[structure], scratch
515     storeq scratch, JSCell::m_structureID[cell]
516 end
517
518 # Entrypoints into the interpreter.
519
520 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
521 macro functionArityCheck(doneLabel, slowPath)
522     loadi PayloadOffset + ArgumentCount[cfr], t0
523     biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
524     prepareStateForCCall()
525     cCall2(slowPath, cfr, PC)   # This slowPath has the protocol: t0 = 0 => no error, t0 != 0 => error
526     btiz t0, .noError
527     move t1, cfr   # t1 contains caller frame
528     jmp _llint_throw_from_slow_path_trampoline
529
530 .noError:
531     # t1 points to ArityCheckData.
532     loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
533     btpz t2, .proceedInline
534     
535     loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t7
536     loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
537     call t2
538     if ASSERT_ENABLED
539         loadp ReturnPC[cfr], t0
540         loadp [t0], t0
541     end
542     jmp .continue
543
544 .proceedInline:
545     loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
546     btiz t1, .continue
547
548     // Move frame up "t1 * 2" slots
549     lshiftp 1, t1
550     negq t1
551     move cfr, t3
552     loadi PayloadOffset + ArgumentCount[cfr], t2
553     addi CallFrameHeaderSlots, t2
554 .copyLoop:
555     loadq [t3], t0
556     storeq t0, [t3, t1, 8]
557     addp 8, t3
558     bsubinz 1, t2, .copyLoop
559
560     // Fill new slots with JSUndefined
561     move t1, t2
562     move ValueUndefined, t0
563 .fillLoop:
564     storeq t0, [t3, t1, 8]
565     addp 8, t3
566     baddinz 1, t2, .fillLoop
567
568     lshiftp 3, t1
569     addp t1, cfr
570     addp t1, sp
571
572 .continue:
573     # Reload CodeBlock and reset PC, since the slow_path clobbered them.
574     loadp CodeBlock[cfr], t1
575     loadp CodeBlock::m_instructions[t1], PB
576     move 0, PC
577     jmp doneLabel
578 end
579
580 macro branchIfException(label)
581     loadp Callee[cfr], t3
582     andp MarkedBlockMask, t3
583     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
584     btqz VM::m_exception[t3], .noException
585     jmp label
586 .noException:
587 end
588
589
590 # Instruction implementations
591
592 _llint_op_enter:
593     traceExecution()
594     checkStackPointerAlignment(t2, 0xdead00e1)
595     loadp CodeBlock[cfr], t2                // t2<CodeBlock> = cfr.CodeBlock
596     loadi CodeBlock::m_numVars[t2], t2      // t2<size_t> = t2<CodeBlock>.m_numVars
597     btiz t2, .opEnterDone
598     move ValueUndefined, t0
599     negi t2
600     sxi2q t2, t2
601 .opEnterLoop:
602     storeq t0, [cfr, t2, 8]
603     addq 1, t2
604     btqnz t2, .opEnterLoop
605 .opEnterDone:
606     callSlowPath(_slow_path_enter)
607     dispatch(1)
608
609
610 _llint_op_create_lexical_environment:
611     traceExecution()
612     loadisFromInstruction(1, t0)
613     callSlowPath(_llint_slow_path_create_lexical_environment)
614     dispatch(3)
615
616
617 _llint_op_get_scope:
618     traceExecution()
619     loadp Callee[cfr], t0
620     loadp JSCallee::m_scope[t0], t0
621     loadisFromInstruction(1, t1)
622     storeq t0, [cfr, t1, 8]
623     dispatch(2)
624
625
626 _llint_op_init_lazy_reg:
627     traceExecution()
628     loadisFromInstruction(1, t0)
629     storeq ValueEmpty, [cfr, t0, 8]
630     dispatch(2)
631
632
633 _llint_op_create_arguments:
634     traceExecution()
635     loadisFromInstruction(1, t0)
636     bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
637     callSlowPath(_slow_path_create_arguments)
638 .opCreateArgumentsDone:
639     dispatch(3)
640
641
642 _llint_op_create_this:
643     traceExecution()
644     loadisFromInstruction(2, t0)
645     loadp [cfr, t0, 8], t0
646     loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
647     loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
648     btpz t1, .opCreateThisSlow
649     allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
650     loadisFromInstruction(1, t1)
651     storeq t0, [cfr, t1, 8]
652     dispatch(4)
653
654 .opCreateThisSlow:
655     callSlowPath(_slow_path_create_this)
656     dispatch(4)
657
658
659 _llint_op_get_callee:
660     traceExecution()
661     loadisFromInstruction(1, t0)
662     loadp Callee[cfr], t1
663     loadpFromInstruction(2, t2)
664     bpneq t1, t2, .opGetCalleeSlow
665     storep t1, [cfr, t0, 8]
666     dispatch(3)
667
668 .opGetCalleeSlow:
669     callSlowPath(_slow_path_get_callee)
670     dispatch(3)
671
672 _llint_op_to_this:
673     traceExecution()
674     loadisFromInstruction(1, t0)
675     loadq [cfr, t0, 8], t0
676     btqnz t0, tagMask, .opToThisSlow
677     bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
678     loadStructureWithScratch(t0, t1, t2)
679     loadpFromInstruction(2, t2)
680     bpneq t1, t2, .opToThisSlow
681     dispatch(4)
682
683 .opToThisSlow:
684     callSlowPath(_slow_path_to_this)
685     dispatch(4)
686
687
688 _llint_op_new_object:
689     traceExecution()
690     loadpFromInstruction(3, t0)
691     loadp ObjectAllocationProfile::m_allocator[t0], t1
692     loadp ObjectAllocationProfile::m_structure[t0], t2
693     allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
694     loadisFromInstruction(1, t1)
695     storeq t0, [cfr, t1, 8]
696     dispatch(4)
697
698 .opNewObjectSlow:
699     callSlowPath(_llint_slow_path_new_object)
700     dispatch(4)
701
702
703 _llint_op_mov:
704     traceExecution()
705     loadisFromInstruction(2, t1)
706     loadisFromInstruction(1, t0)
707     loadConstantOrVariable(t1, t2)
708     storeq t2, [cfr, t0, 8]
709     dispatch(3)
710
711
712 macro notifyWrite(set, value, scratch, slow)
713     loadb VariableWatchpointSet::m_state[set], scratch
714     bieq scratch, IsInvalidated, .done
715     bqneq value, VariableWatchpointSet::m_inferredValue[set], slow
716 .done:
717 end
718
719 _llint_op_not:
720     traceExecution()
721     loadisFromInstruction(2, t0)
722     loadisFromInstruction(1, t1)
723     loadConstantOrVariable(t0, t2)
724     xorq ValueFalse, t2
725     btqnz t2, ~1, .opNotSlow
726     xorq ValueTrue, t2
727     storeq t2, [cfr, t1, 8]
728     dispatch(3)
729
730 .opNotSlow:
731     callSlowPath(_slow_path_not)
732     dispatch(3)
733
734
735 macro equalityComparison(integerComparison, slowPath)
736     traceExecution()
737     loadisFromInstruction(3, t0)
738     loadisFromInstruction(2, t2)
739     loadisFromInstruction(1, t3)
740     loadConstantOrVariableInt32(t0, t1, .slow)
741     loadConstantOrVariableInt32(t2, t0, .slow)
742     integerComparison(t0, t1, t0)
743     orq ValueFalse, t0
744     storeq t0, [cfr, t3, 8]
745     dispatch(4)
746
747 .slow:
748     callSlowPath(slowPath)
749     dispatch(4)
750 end
751
752 _llint_op_eq:
753     equalityComparison(
754         macro (left, right, result) cieq left, right, result end,
755         _slow_path_eq)
756
757
758 _llint_op_neq:
759     equalityComparison(
760         macro (left, right, result) cineq left, right, result end,
761         _slow_path_neq)
762
763
764 macro equalNullComparison()
765     loadisFromInstruction(2, t0)
766     loadq [cfr, t0, 8], t0
767     btqnz t0, tagMask, .immediate
768     btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
769     move 0, t0
770     jmp .done
771 .masqueradesAsUndefined:
772     loadStructureWithScratch(t0, t2, t1)
773     loadp CodeBlock[cfr], t0
774     loadp CodeBlock::m_globalObject[t0], t0
775     cpeq Structure::m_globalObject[t2], t0, t0
776     jmp .done
777 .immediate:
778     andq ~TagBitUndefined, t0
779     cqeq t0, ValueNull, t0
780 .done:
781 end
782
783 _llint_op_eq_null:
784     traceExecution()
785     equalNullComparison()
786     loadisFromInstruction(1, t1)
787     orq ValueFalse, t0
788     storeq t0, [cfr, t1, 8]
789     dispatch(3)
790
791
792 _llint_op_neq_null:
793     traceExecution()
794     equalNullComparison()
795     loadisFromInstruction(1, t1)
796     xorq ValueTrue, t0
797     storeq t0, [cfr, t1, 8]
798     dispatch(3)
799
800
801 macro strictEq(equalityOperation, slowPath)
802     traceExecution()
803     loadisFromInstruction(3, t0)
804     loadisFromInstruction(2, t2)
805     loadConstantOrVariable(t0, t1)
806     loadConstantOrVariable(t2, t0)
807     move t0, t2
808     orq t1, t2
809     btqz t2, tagMask, .slow
810     bqaeq t0, tagTypeNumber, .leftOK
811     btqnz t0, tagTypeNumber, .slow
812 .leftOK:
813     bqaeq t1, tagTypeNumber, .rightOK
814     btqnz t1, tagTypeNumber, .slow
815 .rightOK:
816     equalityOperation(t0, t1, t0)
817     loadisFromInstruction(1, t1)
818     orq ValueFalse, t0
819     storeq t0, [cfr, t1, 8]
820     dispatch(4)
821
822 .slow:
823     callSlowPath(slowPath)
824     dispatch(4)
825 end
826
827 _llint_op_stricteq:
828     strictEq(
829         macro (left, right, result) cqeq left, right, result end,
830         _slow_path_stricteq)
831
832
833 _llint_op_nstricteq:
834     strictEq(
835         macro (left, right, result) cqneq left, right, result end,
836         _slow_path_nstricteq)
837
838
839 macro preOp(arithmeticOperation, slowPath)
840     traceExecution()
841     loadisFromInstruction(1, t0)
842     loadq [cfr, t0, 8], t1
843     bqb t1, tagTypeNumber, .slow
844     arithmeticOperation(t1, .slow)
845     orq tagTypeNumber, t1
846     storeq t1, [cfr, t0, 8]
847     dispatch(2)
848
849 .slow:
850     callSlowPath(slowPath)
851     dispatch(2)
852 end
853
854 _llint_op_inc:
855     preOp(
856         macro (value, slow) baddio 1, value, slow end,
857         _slow_path_inc)
858
859
860 _llint_op_dec:
861     preOp(
862         macro (value, slow) bsubio 1, value, slow end,
863         _slow_path_dec)
864
865
866 _llint_op_to_number:
867     traceExecution()
868     loadisFromInstruction(2, t0)
869     loadisFromInstruction(1, t1)
870     loadConstantOrVariable(t0, t2)
871     bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
872     btqz t2, tagTypeNumber, .opToNumberSlow
873 .opToNumberIsImmediate:
874     storeq t2, [cfr, t1, 8]
875     dispatch(3)
876
877 .opToNumberSlow:
878     callSlowPath(_slow_path_to_number)
879     dispatch(3)
880
881
882 _llint_op_negate:
883     traceExecution()
884     loadisFromInstruction(2, t0)
885     loadisFromInstruction(1, t1)
886     loadConstantOrVariable(t0, t2)
887     bqb t2, tagTypeNumber, .opNegateNotInt
888     btiz t2, 0x7fffffff, .opNegateSlow
889     negi t2
890     orq tagTypeNumber, t2
891     storeq t2, [cfr, t1, 8]
892     dispatch(3)
893 .opNegateNotInt:
894     btqz t2, tagTypeNumber, .opNegateSlow
895     xorq 0x8000000000000000, t2
896     storeq t2, [cfr, t1, 8]
897     dispatch(3)
898
899 .opNegateSlow:
900     callSlowPath(_slow_path_negate)
901     dispatch(3)
902
903
904 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
905     loadisFromInstruction(3, t0)
906     loadisFromInstruction(2, t2)
907     loadConstantOrVariable(t0, t1)
908     loadConstantOrVariable(t2, t0)
909     bqb t0, tagTypeNumber, .op1NotInt
910     bqb t1, tagTypeNumber, .op2NotInt
911     loadisFromInstruction(1, t2)
912     integerOperationAndStore(t1, t0, .slow, t2)
913     dispatch(5)
914
915 .op1NotInt:
916     # First operand is definitely not an int, the second operand could be anything.
917     btqz t0, tagTypeNumber, .slow
918     bqaeq t1, tagTypeNumber, .op1NotIntOp2Int
919     btqz t1, tagTypeNumber, .slow
920     addq tagTypeNumber, t1
921     fq2d t1, ft1
922     jmp .op1NotIntReady
923 .op1NotIntOp2Int:
924     ci2d t1, ft1
925 .op1NotIntReady:
926     loadisFromInstruction(1, t2)
927     addq tagTypeNumber, t0
928     fq2d t0, ft0
929     doubleOperation(ft1, ft0)
930     fd2q ft0, t0
931     subq tagTypeNumber, t0
932     storeq t0, [cfr, t2, 8]
933     dispatch(5)
934
935 .op2NotInt:
936     # First operand is definitely an int, the second is definitely not.
937     loadisFromInstruction(1, t2)
938     btqz t1, tagTypeNumber, .slow
939     ci2d t0, ft0
940     addq tagTypeNumber, t1
941     fq2d t1, ft1
942     doubleOperation(ft1, ft0)
943     fd2q ft0, t0
944     subq tagTypeNumber, t0
945     storeq t0, [cfr, t2, 8]
946     dispatch(5)
947
948 .slow:
949     callSlowPath(slowPath)
950     dispatch(5)
951 end
952
953 macro binaryOp(integerOperation, doubleOperation, slowPath)
954     binaryOpCustomStore(
955         macro (left, right, slow, index)
956             integerOperation(left, right, slow)
957             orq tagTypeNumber, right
958             storeq right, [cfr, index, 8]
959         end,
960         doubleOperation, slowPath)
961 end
962
963 _llint_op_add:
964     traceExecution()
965     binaryOp(
966         macro (left, right, slow) baddio left, right, slow end,
967         macro (left, right) addd left, right end,
968         _slow_path_add)
969
970
971 _llint_op_mul:
972     traceExecution()
973     binaryOpCustomStore(
974         macro (left, right, slow, index)
975             # Assume t3 is scratchable.
976             move right, t3
977             bmulio left, t3, slow
978             btinz t3, .done
979             bilt left, 0, slow
980             bilt right, 0, slow
981         .done:
982             orq tagTypeNumber, t3
983             storeq t3, [cfr, index, 8]
984         end,
985         macro (left, right) muld left, right end,
986         _slow_path_mul)
987
988
989 _llint_op_sub:
990     traceExecution()
991     binaryOp(
992         macro (left, right, slow) bsubio left, right, slow end,
993         macro (left, right) subd left, right end,
994         _slow_path_sub)
995
996
997 _llint_op_div:
998     traceExecution()
999     if X86_64 or X86_64_WIN
1000         binaryOpCustomStore(
1001             macro (left, right, slow, index)
1002                 # Assume t3 is scratchable.
1003                 btiz left, slow
1004                 bineq left, -1, .notNeg2TwoThe31DivByNeg1
1005                 bieq right, -2147483648, .slow
1006             .notNeg2TwoThe31DivByNeg1:
1007                 btinz right, .intOK
1008                 bilt left, 0, slow
1009             .intOK:
1010                 move left, t3
1011                 move right, t0
1012                 cdqi
1013                 idivi t3
1014                 btinz t1, slow
1015                 orq tagTypeNumber, t0
1016                 storeq t0, [cfr, index, 8]
1017             end,
1018             macro (left, right) divd left, right end,
1019             _slow_path_div)
1020     else
1021         callSlowPath(_slow_path_div)
1022         dispatch(5)
1023     end
1024
1025
1026 macro bitOp(operation, slowPath, advance)
1027     loadisFromInstruction(3, t0)
1028     loadisFromInstruction(2, t2)
1029     loadisFromInstruction(1, t3)
1030     loadConstantOrVariable(t0, t1)
1031     loadConstantOrVariable(t2, t0)
1032     bqb t0, tagTypeNumber, .slow
1033     bqb t1, tagTypeNumber, .slow
1034     operation(t1, t0)
1035     orq tagTypeNumber, t0
1036     storeq t0, [cfr, t3, 8]
1037     dispatch(advance)
1038
1039 .slow:
1040     callSlowPath(slowPath)
1041     dispatch(advance)
1042 end
1043
1044 _llint_op_lshift:
1045     traceExecution()
1046     bitOp(
1047         macro (left, right) lshifti left, right end,
1048         _slow_path_lshift,
1049         4)
1050
1051
1052 _llint_op_rshift:
1053     traceExecution()
1054     bitOp(
1055         macro (left, right) rshifti left, right end,
1056         _slow_path_rshift,
1057         4)
1058
1059
1060 _llint_op_urshift:
1061     traceExecution()
1062     bitOp(
1063         macro (left, right) urshifti left, right end,
1064         _slow_path_urshift,
1065         4)
1066
1067
1068 _llint_op_unsigned:
1069     traceExecution()
1070     loadisFromInstruction(1, t0)
1071     loadisFromInstruction(2, t1)
1072     loadConstantOrVariable(t1, t2)
1073     bilt t2, 0, .opUnsignedSlow
1074     storeq t2, [cfr, t0, 8]
1075     dispatch(3)
1076 .opUnsignedSlow:
1077     callSlowPath(_slow_path_unsigned)
1078     dispatch(3)
1079
1080
1081 _llint_op_bitand:
1082     traceExecution()
1083     bitOp(
1084         macro (left, right) andi left, right end,
1085         _slow_path_bitand,
1086         5)
1087
1088
1089 _llint_op_bitxor:
1090     traceExecution()
1091     bitOp(
1092         macro (left, right) xori left, right end,
1093         _slow_path_bitxor,
1094         5)
1095
1096
1097 _llint_op_bitor:
1098     traceExecution()
1099     bitOp(
1100         macro (left, right) ori left, right end,
1101         _slow_path_bitor,
1102         5)
1103
1104
1105 _llint_op_check_has_instance:
1106     traceExecution()
1107     loadisFromInstruction(3, t1)
1108     loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
1109     btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
1110     dispatch(5)
1111
1112 .opCheckHasInstanceSlow:
1113     callSlowPath(_llint_slow_path_check_has_instance)
1114     dispatch(0)
1115
1116
1117 _llint_op_instanceof:
1118     traceExecution()
1119     # Actually do the work.
1120     loadisFromInstruction(3, t0)
1121     loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
1122     bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
1123     loadisFromInstruction(2, t0)
1124     loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
1125     
1126     # Register state: t1 = prototype, t2 = value
1127     move 1, t0
1128 .opInstanceofLoop:
1129     loadStructureAndClobberFirstArg(t2, t3)
1130     loadq Structure::m_prototype[t3], t2
1131     bqeq t2, t1, .opInstanceofDone
1132     btqz t2, tagMask, .opInstanceofLoop
1133
1134     move 0, t0
1135 .opInstanceofDone:
1136     orq ValueFalse, t0
1137     loadisFromInstruction(1, t3)
1138     storeq t0, [cfr, t3, 8]
1139     dispatch(4)
1140
1141 .opInstanceofSlow:
1142     callSlowPath(_llint_slow_path_instanceof)
1143     dispatch(4)
1144
1145
1146 _llint_op_is_undefined:
1147     traceExecution()
1148     loadisFromInstruction(2, t1)
1149     loadisFromInstruction(1, t2)
1150     loadConstantOrVariable(t1, t0)
1151     btqz t0, tagMask, .opIsUndefinedCell
1152     cqeq t0, ValueUndefined, t3
1153     orq ValueFalse, t3
1154     storeq t3, [cfr, t2, 8]
1155     dispatch(3)
1156 .opIsUndefinedCell:
1157     btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
1158     move ValueFalse, t1
1159     storeq t1, [cfr, t2, 8]
1160     dispatch(3)
1161 .masqueradesAsUndefined:
1162     loadStructureWithScratch(t0, t3, t1)
1163     loadp CodeBlock[cfr], t1
1164     loadp CodeBlock::m_globalObject[t1], t1
1165     cpeq Structure::m_globalObject[t3], t1, t0
1166     orq ValueFalse, t0
1167     storeq t0, [cfr, t2, 8]
1168     dispatch(3)
1169
1170
1171 _llint_op_is_boolean:
1172     traceExecution()
1173     loadisFromInstruction(2, t1)
1174     loadisFromInstruction(1, t2)
1175     loadConstantOrVariable(t1, t0)
1176     xorq ValueFalse, t0
1177     tqz t0, ~1, t0
1178     orq ValueFalse, t0
1179     storeq t0, [cfr, t2, 8]
1180     dispatch(3)
1181
1182
1183 _llint_op_is_number:
1184     traceExecution()
1185     loadisFromInstruction(2, t1)
1186     loadisFromInstruction(1, t2)
1187     loadConstantOrVariable(t1, t0)
1188     tqnz t0, tagTypeNumber, t1
1189     orq ValueFalse, t1
1190     storeq t1, [cfr, t2, 8]
1191     dispatch(3)
1192
1193
1194 _llint_op_is_string:
1195     traceExecution()
1196     loadisFromInstruction(2, t1)
1197     loadisFromInstruction(1, t2)
1198     loadConstantOrVariable(t1, t0)
1199     btqnz t0, tagMask, .opIsStringNotCell
1200     cbeq JSCell::m_type[t0], StringType, t1
1201     orq ValueFalse, t1
1202     storeq t1, [cfr, t2, 8]
1203     dispatch(3)
1204 .opIsStringNotCell:
1205     storeq ValueFalse, [cfr, t2, 8]
1206     dispatch(3)
1207
1208
1209 macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1210     bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1211     loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1212     negi propertyOffsetAsInt
1213     sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1214     jmp .ready
1215 .isInline:
1216     addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1217 .ready:
1218     loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
1219 end
1220
1221
1222 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
1223     bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1224     loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1225     negi propertyOffsetAsInt
1226     sxi2q propertyOffsetAsInt, propertyOffsetAsInt
1227     jmp .ready
1228 .isInline:
1229     addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1230 .ready:
1231     storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1232 end
1233
1234 _llint_op_init_global_const:
1235     traceExecution()
1236     writeBarrierOnGlobalObject(2)
1237     loadisFromInstruction(2, t1)
1238     loadpFromInstruction(1, t0)
1239     loadConstantOrVariable(t1, t2)
1240     storeq t2, [t0]
1241     dispatch(5)
1242
1243
1244 macro getById(getPropertyStorage)
1245     traceExecution()
1246     # We only do monomorphic get_by_id caching for now, and we do not modify the
1247     # opcode. We do, however, allow for the cache to change anytime if fails, since
1248     # ping-ponging is free. At best we get lucky and the get_by_id will continue
1249     # to take fast path on the new cache. At worst we take slow path, which is what
1250     # we would have been doing anyway.
1251     loadisFromInstruction(2, t0)
1252     loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
1253     loadStructureWithScratch(t3, t2, t1)
1254     loadpFromInstruction(4, t1)
1255     bpneq t2, t1, .opGetByIdSlow
1256     getPropertyStorage(
1257         t3,
1258         t0,
1259         macro (propertyStorage, scratch)
1260             loadisFromInstruction(5, t2)
1261             loadisFromInstruction(1, t1)
1262             loadq [propertyStorage, t2], scratch
1263             storeq scratch, [cfr, t1, 8]
1264             valueProfile(scratch, 8, t1)
1265             dispatch(9)
1266         end)
1267             
1268     .opGetByIdSlow:
1269         callSlowPath(_llint_slow_path_get_by_id)
1270         dispatch(9)
1271 end
1272
1273 _llint_op_get_by_id:
1274     getById(withInlineStorage)
1275
1276
1277 _llint_op_get_by_id_out_of_line:
1278     getById(withOutOfLineStorage)
1279
1280
1281 _llint_op_get_array_length:
1282     traceExecution()
1283     loadisFromInstruction(2, t0)
1284     loadpFromInstruction(4, t1)
1285     loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
1286     move t3, t2
1287     arrayProfile(t2, t1, t0)
1288     btiz t2, IsArray, .opGetArrayLengthSlow
1289     btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1290     loadisFromInstruction(1, t1)
1291     loadp JSObject::m_butterfly[t3], t0
1292     loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
1293     bilt t0, 0, .opGetArrayLengthSlow
1294     orq tagTypeNumber, t0
1295     valueProfile(t0, 8, t2)
1296     storeq t0, [cfr, t1, 8]
1297     dispatch(9)
1298
1299 .opGetArrayLengthSlow:
1300     callSlowPath(_llint_slow_path_get_by_id)
1301     dispatch(9)
1302
1303
1304 _llint_op_get_arguments_length:
1305     traceExecution()
1306     loadisFromInstruction(2, t0)
1307     loadisFromInstruction(1, t1)
1308     btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow
1309     loadi ArgumentCount + PayloadOffset[cfr], t2
1310     subi 1, t2
1311     orq tagTypeNumber, t2
1312     storeq t2, [cfr, t1, 8]
1313     dispatch(4)
1314
1315 .opGetArgumentsLengthSlow:
1316     callSlowPath(_llint_slow_path_get_arguments_length)
1317     dispatch(4)
1318
1319
1320 macro putById(getPropertyStorage)
1321     traceExecution()
1322     writeBarrierOnOperands(1, 3)
1323     loadisFromInstruction(1, t3)
1324     loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
1325     loadStructureWithScratch(t0, t2, t1)
1326     loadpFromInstruction(4, t1)
1327     bpneq t2, t1, .opPutByIdSlow
1328     getPropertyStorage(
1329         t0,
1330         t3,
1331         macro (propertyStorage, scratch)
1332             loadisFromInstruction(5, t1)
1333             loadisFromInstruction(3, t2)
1334             loadConstantOrVariable(t2, scratch)
1335             storeq scratch, [propertyStorage, t1]
1336             dispatch(9)
1337         end)
1338 end
1339
1340 _llint_op_put_by_id:
1341     putById(withInlineStorage)
1342
1343 .opPutByIdSlow:
1344     callSlowPath(_llint_slow_path_put_by_id)
1345     dispatch(9)
1346
1347
1348 _llint_op_put_by_id_out_of_line:
1349     putById(withOutOfLineStorage)
1350
1351
1352 macro putByIdTransition(additionalChecks, getPropertyStorage)
1353     traceExecution()
1354     writeBarrierOnOperand(1)
1355     loadisFromInstruction(1, t3)
1356     loadpFromInstruction(4, t1)
1357     loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
1358     loadStructureWithScratch(t0, t2, t3)
1359     bpneq t2, t1, .opPutByIdSlow
1360     additionalChecks(t1, t3, t2)
1361     loadisFromInstruction(3, t2)
1362     loadisFromInstruction(5, t1)
1363     getPropertyStorage(
1364         t0,
1365         t3,
1366         macro (propertyStorage, scratch)
1367             addp t1, propertyStorage, t3
1368             loadConstantOrVariable(t2, t1)
1369             storeq t1, [t3]
1370             loadpFromInstruction(6, t1)
1371             loadi Structure::m_blob + StructureIDBlob::u.words.word1[t1], t1
1372             storei t1, JSCell::m_structureID[t0]
1373             dispatch(9)
1374         end)
1375 end
1376
1377 macro noAdditionalChecks(oldStructure, scratch, scratch2)
1378 end
1379
1380 macro structureChainChecks(oldStructure, scratch, scratch2)
1381     const protoCell = oldStructure    # Reusing the oldStructure register for the proto
1382     loadpFromInstruction(7, scratch)
1383     assert(macro (ok) btpnz scratch, ok end)
1384     loadp StructureChain::m_vector[scratch], scratch
1385     assert(macro (ok) btpnz scratch, ok end)
1386     bqeq Structure::m_prototype[oldStructure], ValueNull, .done
1387 .loop:
1388     loadq Structure::m_prototype[oldStructure], protoCell
1389     loadStructureAndClobberFirstArg(protoCell, scratch2)
1390     move scratch2, oldStructure
1391     bpneq oldStructure, [scratch], .opPutByIdSlow
1392     addp 8, scratch
1393     bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
1394 .done:
1395 end
1396
1397 _llint_op_put_by_id_transition_direct:
1398     putByIdTransition(noAdditionalChecks, withInlineStorage)
1399
1400
1401 _llint_op_put_by_id_transition_direct_out_of_line:
1402     putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1403
1404
1405 _llint_op_put_by_id_transition_normal:
1406     putByIdTransition(structureChainChecks, withInlineStorage)
1407
1408
1409 _llint_op_put_by_id_transition_normal_out_of_line:
1410     putByIdTransition(structureChainChecks, withOutOfLineStorage)
1411
1412
1413 _llint_op_get_by_val:
1414     traceExecution()
1415     loadisFromInstruction(2, t2)
1416     loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
1417     loadpFromInstruction(4, t3)
1418     move t0, t2
1419     arrayProfile(t2, t3, t1)
1420     loadisFromInstruction(3, t3)
1421     loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
1422     sxi2q t1, t1
1423     loadp JSObject::m_butterfly[t0], t3
1424     andi IndexingShapeMask, t2
1425     bieq t2, Int32Shape, .opGetByValIsContiguous
1426     bineq t2, ContiguousShape, .opGetByValNotContiguous
1427 .opGetByValIsContiguous:
1428
1429     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1430     loadisFromInstruction(1, t0)
1431     loadq [t3, t1, 8], t2
1432     btqz t2, .opGetByValOutOfBounds
1433     jmp .opGetByValDone
1434
1435 .opGetByValNotContiguous:
1436     bineq t2, DoubleShape, .opGetByValNotDouble
1437     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1438     loadis 8[PB, PC, 8], t0
1439     loadd [t3, t1, 8], ft0
1440     bdnequn ft0, ft0, .opGetByValOutOfBounds
1441     fd2q ft0, t2
1442     subq tagTypeNumber, t2
1443     jmp .opGetByValDone
1444     
1445 .opGetByValNotDouble:
1446     subi ArrayStorageShape, t2
1447     bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1448     biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
1449     loadisFromInstruction(1, t0)
1450     loadq ArrayStorage::m_vector[t3, t1, 8], t2
1451     btqz t2, .opGetByValOutOfBounds
1452
1453 .opGetByValDone:
1454     storeq t2, [cfr, t0, 8]
1455     valueProfile(t2, 5, t0)
1456     dispatch(6)
1457
1458 .opGetByValOutOfBounds:
1459     loadpFromInstruction(4, t0)
1460     storeb 1, ArrayProfile::m_outOfBounds[t0]
1461 .opGetByValSlow:
1462     callSlowPath(_llint_slow_path_get_by_val)
1463     dispatch(6)
1464
1465
1466 _llint_op_get_argument_by_val:
1467     # FIXME: At some point we should array profile this. Right now it isn't necessary
1468     # since the DFG will never turn a get_argument_by_val into a GetByVal.
1469     traceExecution()
1470     loadisFromInstruction(2, t0)
1471     loadisFromInstruction(3, t1)
1472     btqnz [cfr, t0, 8], .opGetArgumentByValSlow
1473     loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
1474     loadi ArgumentCount + PayloadOffset[cfr], t1
1475     sxi2q t2, t2
1476     subi 1, t1
1477     biaeq t2, t1, .opGetArgumentByValSlow
1478     loadisFromInstruction(1, t3)
1479     loadpFromInstruction(6, t1)
1480     loadq FirstArgumentOffset[cfr, t2, 8], t0
1481     storeq t0, [cfr, t3, 8]
1482     valueProfile(t0, 6, t1)
1483     dispatch(7)
1484
1485 .opGetArgumentByValSlow:
1486     callSlowPath(_llint_slow_path_get_argument_by_val)
1487     dispatch(7)
1488
1489
1490 macro contiguousPutByVal(storeCallback)
1491     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
1492 .storeResult:
1493     loadisFromInstruction(3, t2)
1494     storeCallback(t2, t1, [t0, t3, 8])
1495     dispatch(5)
1496
1497 .outOfBounds:
1498     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1499     loadp 32[PB, PC, 8], t2
1500     storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1501     addi 1, t3, t2
1502     storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1503     jmp .storeResult
1504 end
1505
1506 macro putByVal(slowPath)
1507     traceExecution()
1508     writeBarrierOnOperands(1, 3)
1509     loadisFromInstruction(1, t0)
1510     loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
1511     loadpFromInstruction(4, t3)
1512     move t1, t2
1513     arrayProfile(t2, t3, t0)
1514     loadisFromInstruction(2, t0)
1515     loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
1516     sxi2q t3, t3
1517     loadp JSObject::m_butterfly[t1], t0
1518     andi IndexingShapeMask, t2
1519     bineq t2, Int32Shape, .opPutByValNotInt32
1520     contiguousPutByVal(
1521         macro (operand, scratch, address)
1522             loadConstantOrVariable(operand, scratch)
1523             bpb scratch, tagTypeNumber, .opPutByValSlow
1524             storep scratch, address
1525         end)
1526
1527 .opPutByValNotInt32:
1528     bineq t2, DoubleShape, .opPutByValNotDouble
1529     contiguousPutByVal(
1530         macro (operand, scratch, address)
1531             loadConstantOrVariable(operand, scratch)
1532             bqb scratch, tagTypeNumber, .notInt
1533             ci2d scratch, ft0
1534             jmp .ready
1535         .notInt:
1536             addp tagTypeNumber, scratch
1537             fq2d scratch, ft0
1538             bdnequn ft0, ft0, .opPutByValSlow
1539         .ready:
1540             stored ft0, address
1541         end)
1542
1543 .opPutByValNotDouble:
1544     bineq t2, ContiguousShape, .opPutByValNotContiguous
1545     contiguousPutByVal(
1546         macro (operand, scratch, address)
1547             loadConstantOrVariable(operand, scratch)
1548             storep scratch, address
1549         end)
1550
1551 .opPutByValNotContiguous:
1552     bineq t2, ArrayStorageShape, .opPutByValSlow
1553     biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1554     btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
1555 .opPutByValArrayStorageStoreResult:
1556     loadisFromInstruction(3, t2)
1557     loadConstantOrVariable(t2, t1)
1558     storeq t1, ArrayStorage::m_vector[t0, t3, 8]
1559     dispatch(5)
1560
1561 .opPutByValArrayStorageEmpty:
1562     loadpFromInstruction(4, t1)
1563     storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1564     addi 1, ArrayStorage::m_numValuesInVector[t0]
1565     bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
1566     addi 1, t3, t1
1567     storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1568     jmp .opPutByValArrayStorageStoreResult
1569
1570 .opPutByValOutOfBounds:
1571     loadpFromInstruction(4, t0)
1572     storeb 1, ArrayProfile::m_outOfBounds[t0]
1573 .opPutByValSlow:
1574     callSlowPath(slowPath)
1575     dispatch(5)
1576 end
1577
1578 _llint_op_put_by_val:
1579     putByVal(_llint_slow_path_put_by_val)
1580
1581 _llint_op_put_by_val_direct:
1582     putByVal(_llint_slow_path_put_by_val_direct)
1583
1584
1585 _llint_op_jmp:
1586     traceExecution()
1587     dispatchIntIndirect(1)
1588
1589
1590 macro jumpTrueOrFalse(conditionOp, slow)
1591     loadisFromInstruction(1, t1)
1592     loadConstantOrVariable(t1, t0)
1593     xorq ValueFalse, t0
1594     btqnz t0, -1, .slow
1595     conditionOp(t0, .target)
1596     dispatch(3)
1597
1598 .target:
1599     dispatchIntIndirect(2)
1600
1601 .slow:
1602     callSlowPath(slow)
1603     dispatch(0)
1604 end
1605
1606
1607 macro equalNull(cellHandler, immediateHandler)
1608     loadisFromInstruction(1, t0)
1609     assertNotConstant(t0)
1610     loadq [cfr, t0, 8], t0
1611     btqnz t0, tagMask, .immediate
1612     loadStructureWithScratch(t0, t2, t1)
1613     cellHandler(t2, JSCell::m_flags[t0], .target)
1614     dispatch(3)
1615
1616 .target:
1617     dispatchIntIndirect(2)
1618
1619 .immediate:
1620     andq ~TagBitUndefined, t0
1621     immediateHandler(t0, .target)
1622     dispatch(3)
1623 end
1624
1625 _llint_op_jeq_null:
1626     traceExecution()
1627     equalNull(
1628         macro (structure, value, target) 
1629             btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined
1630             loadp CodeBlock[cfr], t0
1631             loadp CodeBlock::m_globalObject[t0], t0
1632             bpeq Structure::m_globalObject[structure], t0, target
1633 .notMasqueradesAsUndefined:
1634         end,
1635         macro (value, target) bqeq value, ValueNull, target end)
1636
1637
1638 _llint_op_jneq_null:
1639     traceExecution()
1640     equalNull(
1641         macro (structure, value, target) 
1642             btbz value, MasqueradesAsUndefined, target
1643             loadp CodeBlock[cfr], t0
1644             loadp CodeBlock::m_globalObject[t0], t0
1645             bpneq Structure::m_globalObject[structure], t0, target
1646         end,
1647         macro (value, target) bqneq value, ValueNull, target end)
1648
1649
1650 _llint_op_jneq_ptr:
1651     traceExecution()
1652     loadisFromInstruction(1, t0)
1653     loadisFromInstruction(2, t1)
1654     loadp CodeBlock[cfr], t2
1655     loadp CodeBlock::m_globalObject[t2], t2
1656     loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
1657     bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
1658     dispatch(4)
1659
1660 .opJneqPtrTarget:
1661     dispatchIntIndirect(3)
1662
1663
1664 macro compare(integerCompare, doubleCompare, slowPath)
1665     loadisFromInstruction(1, t2)
1666     loadisFromInstruction(2, t3)
1667     loadConstantOrVariable(t2, t0)
1668     loadConstantOrVariable(t3, t1)
1669     bqb t0, tagTypeNumber, .op1NotInt
1670     bqb t1, tagTypeNumber, .op2NotInt
1671     integerCompare(t0, t1, .jumpTarget)
1672     dispatch(4)
1673
1674 .op1NotInt:
1675     btqz t0, tagTypeNumber, .slow
1676     bqb t1, tagTypeNumber, .op1NotIntOp2NotInt
1677     ci2d t1, ft1
1678     jmp .op1NotIntReady
1679 .op1NotIntOp2NotInt:
1680     btqz t1, tagTypeNumber, .slow
1681     addq tagTypeNumber, t1
1682     fq2d t1, ft1
1683 .op1NotIntReady:
1684     addq tagTypeNumber, t0
1685     fq2d t0, ft0
1686     doubleCompare(ft0, ft1, .jumpTarget)
1687     dispatch(4)
1688
1689 .op2NotInt:
1690     ci2d t0, ft0
1691     btqz t1, tagTypeNumber, .slow
1692     addq tagTypeNumber, t1
1693     fq2d t1, ft1
1694     doubleCompare(ft0, ft1, .jumpTarget)
1695     dispatch(4)
1696
1697 .jumpTarget:
1698     dispatchIntIndirect(3)
1699
1700 .slow:
1701     callSlowPath(slowPath)
1702     dispatch(0)
1703 end
1704
1705
1706 _llint_op_switch_imm:
1707     traceExecution()
1708     loadisFromInstruction(3, t2)
1709     loadisFromInstruction(1, t3)
1710     loadConstantOrVariable(t2, t1)
1711     loadp CodeBlock[cfr], t2
1712     loadp CodeBlock::m_rareData[t2], t2
1713     muli sizeof SimpleJumpTable, t3    # FIXME: would be nice to peephole this!
1714     loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1715     addp t3, t2
1716     bqb t1, tagTypeNumber, .opSwitchImmNotInt
1717     subi SimpleJumpTable::min[t2], t1
1718     biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1719     loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1720     loadis [t3, t1, 4], t1
1721     btiz t1, .opSwitchImmFallThrough
1722     dispatch(t1)
1723
1724 .opSwitchImmNotInt:
1725     btqnz t1, tagTypeNumber, .opSwitchImmSlow   # Go slow if it's a double.
1726 .opSwitchImmFallThrough:
1727     dispatchIntIndirect(2)
1728
1729 .opSwitchImmSlow:
1730     callSlowPath(_llint_slow_path_switch_imm)
1731     dispatch(0)
1732
1733
1734 _llint_op_switch_char:
1735     traceExecution()
1736     loadisFromInstruction(3, t2)
1737     loadisFromInstruction(1, t3)
1738     loadConstantOrVariable(t2, t1)
1739     loadp CodeBlock[cfr], t2
1740     loadp CodeBlock::m_rareData[t2], t2
1741     muli sizeof SimpleJumpTable, t3
1742     loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1743     addp t3, t2
1744     btqnz t1, tagMask, .opSwitchCharFallThrough
1745     bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
1746     bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
1747     loadp JSString::m_value[t1], t0
1748     btpz  t0, .opSwitchOnRope
1749     loadp StringImpl::m_data8[t0], t1
1750     btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1751     loadh [t1], t0
1752     jmp .opSwitchCharReady
1753 .opSwitchChar8Bit:
1754     loadb [t1], t0
1755 .opSwitchCharReady:
1756     subi SimpleJumpTable::min[t2], t0
1757     biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1758     loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1759     loadis [t2, t0, 4], t1
1760     btiz t1, .opSwitchCharFallThrough
1761     dispatch(t1)
1762
1763 .opSwitchCharFallThrough:
1764     dispatchIntIndirect(2)
1765
1766 .opSwitchOnRope:
1767     callSlowPath(_llint_slow_path_switch_char)
1768     dispatch(0)
1769
1770
1771 _llint_op_new_func:
1772     traceExecution()
1773     loadisFromInstruction(4, t2)
1774     btiz t2, .opNewFuncUnchecked
1775     loadisFromInstruction(1, t1)
1776     btqnz [cfr, t1, 8], .opNewFuncDone
1777 .opNewFuncUnchecked:
1778     callSlowPath(_llint_slow_path_new_func)
1779 .opNewFuncDone:
1780     dispatch(5)
1781
1782 macro arrayProfileForCall()
1783     loadisFromInstruction(4, t3)
1784     negp t3
1785     loadq ThisArgumentOffset[cfr, t3, 8], t0
1786     btqnz t0, tagMask, .done
1787     loadpFromInstruction((CallOpCodeSize - 2), t1)
1788     loadi JSCell::m_structureID[t0], t3
1789     storei t3, ArrayProfile::m_lastSeenStructureID[t1]
1790 .done:
1791 end
1792
1793 macro doCall(slowPath)
1794     loadisFromInstruction(2, t0)
1795     loadpFromInstruction(5, t1)
1796     loadp LLIntCallLinkInfo::callee[t1], t2
1797     loadConstantOrVariable(t0, t3)
1798     bqneq t3, t2, .opCallSlow
1799     loadisFromInstruction(4, t3)
1800     lshifti 3, t3
1801     negp t3
1802     addp cfr, t3
1803     storeq t2, Callee[t3]
1804     loadisFromInstruction(3, t2)
1805     storei PC, ArgumentCount + TagOffset[cfr]
1806     storei t2, ArgumentCount + PayloadOffset[t3]
1807     addp CallerFrameAndPCSize, t3
1808     callTargetFunction(t1, t3)
1809
1810 .opCallSlow:
1811     slowPathForCall(slowPath)
1812 end
1813
1814 _llint_op_tear_off_arguments:
1815     traceExecution()
1816     loadisFromInstruction(1, t0)
1817     btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated
1818     callSlowPath(_llint_slow_path_tear_off_arguments)
1819 .opTearOffArgumentsNotCreated:
1820     dispatch(3)
1821
1822
1823 _llint_op_ret:
1824     traceExecution()
1825     checkSwitchToJITForEpilogue()
1826     loadisFromInstruction(1, t2)
1827     loadConstantOrVariable(t2, t0)
1828     doReturn()
1829
1830
1831 _llint_op_to_primitive:
1832     traceExecution()
1833     loadisFromInstruction(2, t2)
1834     loadisFromInstruction(1, t3)
1835     loadConstantOrVariable(t2, t0)
1836     btqnz t0, tagMask, .opToPrimitiveIsImm
1837     bbneq JSCell::m_type[t0], StringType, .opToPrimitiveSlowCase
1838 .opToPrimitiveIsImm:
1839     storeq t0, [cfr, t3, 8]
1840     dispatch(3)
1841
1842 .opToPrimitiveSlowCase:
1843     callSlowPath(_slow_path_to_primitive)
1844     dispatch(3)
1845
1846
1847 _llint_op_catch:
1848     # This is where we end up from the JIT's throw trampoline (because the
1849     # machine code return address will be set to _llint_op_catch), and from
1850     # the interpreter's throw trampoline (see _llint_throw_trampoline).
1851     # The throwing code must have known that we were throwing to the interpreter,
1852     # and have set VM::targetInterpreterPCForThrow.
1853     loadp Callee[cfr], t3
1854     andp MarkedBlockMask, t3
1855     loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1856     loadp VM::callFrameForThrow[t3], cfr
1857     loadp VM::vmEntryFrameForThrow[t3], t0
1858     storep t0, VM::topVMEntryFrame[t3]
1859     restoreStackPointerAfterCall()
1860
1861     loadp CodeBlock[cfr], PB
1862     loadp CodeBlock::m_instructions[PB], PB
1863     loadp VM::targetInterpreterPCForThrow[t3], PC
1864     subp PB, PC
1865     rshiftp 3, PC
1866     loadq VM::m_exception[t3], t0
1867     storeq 0, VM::m_exception[t3]
1868     loadisFromInstruction(1, t2)
1869     storeq t0, [cfr, t2, 8]
1870     traceExecution()
1871     dispatch(2)
1872
1873
1874 _llint_op_end:
1875     traceExecution()
1876     checkSwitchToJITForEpilogue()
1877     loadisFromInstruction(1, t0)
1878     assertNotConstant(t0)
1879     loadq [cfr, t0, 8], t0
1880     doReturn()
1881
1882
1883 _llint_throw_from_slow_path_trampoline:
1884     callSlowPath(_llint_slow_path_handle_exception)
1885
1886     # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
1887     # the throw target is not necessarily interpreted code, we come to here.
1888     # This essentially emulates the JIT's throwing protocol.
1889     loadp CodeBlock[cfr], t1
1890     loadp CodeBlock::m_vm[t1], t1
1891     jmp VM::targetMachinePCForThrow[t1]
1892
1893
1894 _llint_throw_during_call_trampoline:
1895     preserveReturnAddressAfterCall(t2)
1896     jmp _llint_throw_from_slow_path_trampoline
1897
1898
1899 macro nativeCallTrampoline(executableOffsetToFunction)
1900
1901     functionPrologue()
1902     storep 0, CodeBlock[cfr]
1903     if X86_64 or X86_64_WIN
1904         if X86_64
1905             const arg1 = t4  # t4 = rdi
1906             const arg2 = t5  # t5 = rsi
1907             const temp = t1
1908         elsif X86_64_WIN
1909             const arg1 = t2  # t2 = rcx
1910             const arg2 = t1  # t1 = rdx
1911             const temp = t0
1912         end
1913         loadp Callee[cfr], t0
1914         andp MarkedBlockMask, t0, t1
1915         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
1916         storep cfr, VM::topCallFrame[t1]
1917         move cfr, arg1
1918         loadp Callee[cfr], arg2
1919         loadp JSFunction::m_executable[arg2], temp
1920         checkStackPointerAlignment(t3, 0xdead0001)
1921         if X86_64_WIN
1922             subp 32, sp
1923         end
1924         call executableOffsetToFunction[temp]
1925         if X86_64_WIN
1926             addp 32, sp
1927         end
1928         loadp Callee[cfr], t3
1929         andp MarkedBlockMask, t3
1930         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1931     elsif ARM64 or C_LOOP
1932         loadp Callee[cfr], t0
1933         andp MarkedBlockMask, t0, t1
1934         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
1935         storep cfr, VM::topCallFrame[t1]
1936         preserveReturnAddressAfterCall(t3)
1937         storep t3, ReturnPC[cfr]
1938         move cfr, t0
1939         loadp Callee[cfr], t1
1940         loadp JSFunction::m_executable[t1], t1
1941         if C_LOOP
1942             cloopCallNative executableOffsetToFunction[t1]
1943         else
1944             call executableOffsetToFunction[t1]
1945         end
1946         restoreReturnAddressBeforeReturn(t3)
1947         loadp Callee[cfr], t3
1948         andp MarkedBlockMask, t3
1949         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1950     else
1951         error
1952     end
1953
1954     functionEpilogue()
1955
1956     btqnz VM::m_exception[t3], .handleException
1957     ret
1958
1959 .handleException:
1960     storep cfr, VM::topCallFrame[t3]
1961     restoreStackPointerAfterCall()
1962     jmp _llint_throw_from_slow_path_trampoline
1963 end
1964
1965
1966 macro getGlobalObject(dst)
1967     loadp CodeBlock[cfr], t0
1968     loadp CodeBlock::m_globalObject[t0], t0
1969     loadisFromInstruction(dst, t1)
1970     storeq t0, [cfr, t1, 8]
1971 end
1972
1973 macro varInjectionCheck(slowPath)
1974     loadp CodeBlock[cfr], t0
1975     loadp CodeBlock::m_globalObject[t0], t0
1976     loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
1977     bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
1978 end
1979
1980 macro resolveScope()
1981     loadisFromInstruction(5, t2)
1982     loadisFromInstruction(2, t0)
1983     loadp [cfr, t0, 8], t0
1984     btiz t2, .resolveScopeLoopEnd
1985
1986 .resolveScopeLoop:
1987     loadp JSScope::m_next[t0], t0
1988     subi 1, t2
1989     btinz t2, .resolveScopeLoop
1990
1991 .resolveScopeLoopEnd:
1992     loadisFromInstruction(1, t1)
1993     storeq t0, [cfr, t1, 8]
1994 end
1995
1996
1997 _llint_op_resolve_scope:
1998     traceExecution()
1999     loadisFromInstruction(4, t0)
2000
2001 #rGlobalProperty:
2002     bineq t0, GlobalProperty, .rGlobalVar
2003     getGlobalObject(1)
2004     dispatch(7)
2005
2006 .rGlobalVar:
2007     bineq t0, GlobalVar, .rClosureVar
2008     getGlobalObject(1)
2009     dispatch(7)
2010
2011 .rClosureVar:
2012     bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
2013     resolveScope()
2014     dispatch(7)
2015
2016 .rGlobalPropertyWithVarInjectionChecks:
2017     bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
2018     varInjectionCheck(.rDynamic)
2019     getGlobalObject(1)
2020     dispatch(7)
2021
2022 .rGlobalVarWithVarInjectionChecks:
2023     bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
2024     varInjectionCheck(.rDynamic)
2025     getGlobalObject(1)
2026     dispatch(7)
2027
2028 .rClosureVarWithVarInjectionChecks:
2029     bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
2030     varInjectionCheck(.rDynamic)
2031     resolveScope()
2032     dispatch(7)
2033
2034 .rDynamic:
2035     callSlowPath(_llint_slow_path_resolve_scope)
2036     dispatch(7)
2037
2038
2039 macro loadWithStructureCheck(operand, slowPath)
2040     loadisFromInstruction(operand, t0)
2041     loadq [cfr, t0, 8], t0
2042     loadStructureWithScratch(t0, t2, t1)
2043     loadpFromInstruction(5, t1)
2044     bpneq t2, t1, slowPath
2045 end
2046
2047 macro getProperty()
2048     loadisFromInstruction(6, t1)
2049     loadPropertyAtVariableOffset(t1, t0, t2)
2050     valueProfile(t2, 7, t0)
2051     loadisFromInstruction(1, t0)
2052     storeq t2, [cfr, t0, 8]
2053 end
2054
2055 macro getGlobalVar()
2056     loadpFromInstruction(6, t0)
2057     loadq [t0], t0
2058     valueProfile(t0, 7, t1)
2059     loadisFromInstruction(1, t1)
2060     storeq t0, [cfr, t1, 8]
2061 end
2062
2063 macro getClosureVar()
2064     loadp JSEnvironmentRecord::m_registers[t0], t0
2065     loadisFromInstruction(6, t1)
2066     loadq [t0, t1, 8], t0
2067     valueProfile(t0, 7, t1)
2068     loadisFromInstruction(1, t1)
2069     storeq t0, [cfr, t1, 8]
2070 end
2071
2072 _llint_op_get_from_scope:
2073     traceExecution()
2074     loadisFromInstruction(4, t0)
2075     andi ResolveModeMask, t0
2076
2077 #gGlobalProperty:
2078     bineq t0, GlobalProperty, .gGlobalVar
2079     loadWithStructureCheck(2, .gDynamic)
2080     getProperty()
2081     dispatch(8)
2082
2083 .gGlobalVar:
2084     bineq t0, GlobalVar, .gClosureVar
2085     getGlobalVar()
2086     dispatch(8)
2087
2088 .gClosureVar:
2089     bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
2090     loadVariable(2, t0)
2091     getClosureVar()
2092     dispatch(8)
2093
2094 .gGlobalPropertyWithVarInjectionChecks:
2095     bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
2096     loadWithStructureCheck(2, .gDynamic)
2097     getProperty()
2098     dispatch(8)
2099
2100 .gGlobalVarWithVarInjectionChecks:
2101     bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
2102     varInjectionCheck(.gDynamic)
2103     loadVariable(2, t0)
2104     getGlobalVar()
2105     dispatch(8)
2106
2107 .gClosureVarWithVarInjectionChecks:
2108     bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
2109     varInjectionCheck(.gDynamic)
2110     loadVariable(2, t0)
2111     getClosureVar()
2112     dispatch(8)
2113
2114 .gDynamic:
2115     callSlowPath(_llint_slow_path_get_from_scope)
2116     dispatch(8)
2117
2118
2119 macro putProperty()
2120     loadisFromInstruction(3, t1)
2121     loadConstantOrVariable(t1, t2)
2122     loadisFromInstruction(6, t1)
2123     storePropertyAtVariableOffset(t1, t0, t2)
2124 end
2125
2126 macro putGlobalVar()
2127     loadisFromInstruction(3, t0)
2128     loadConstantOrVariable(t0, t1)
2129     loadpFromInstruction(5, t2)
2130     notifyWrite(t2, t1, t0, .pDynamic)
2131     loadpFromInstruction(6, t0)
2132     storeq t1, [t0]
2133 end
2134
2135 macro putClosureVar()
2136     loadisFromInstruction(3, t1)
2137     loadConstantOrVariable(t1, t2)
2138     loadp JSEnvironmentRecord::m_registers[t0], t0
2139     loadisFromInstruction(6, t1)
2140     storeq t2, [t0, t1, 8]
2141 end
2142
2143 macro putLocalClosureVar()
2144     loadisFromInstruction(3, t1)
2145     loadConstantOrVariable(t1, t2)
2146     loadpFromInstruction(5, t3)
2147     btpz t3, .noVariableWatchpointSet
2148     notifyWrite(t3, t2, t1, .pDynamic)
2149 .noVariableWatchpointSet:
2150     loadp JSEnvironmentRecord::m_registers[t0], t0
2151     loadisFromInstruction(6, t1)
2152     storeq t2, [t0, t1, 8]
2153 end
2154
2155
2156 _llint_op_put_to_scope:
2157     traceExecution()
2158     loadisFromInstruction(4, t0)
2159     andi ResolveModeMask, t0
2160
2161 #pLocalClosureVar:
2162     bineq t0, LocalClosureVar, .pGlobalProperty
2163     writeBarrierOnOperands(1, 3)
2164     loadVariable(1, t0)
2165     putLocalClosureVar()
2166     dispatch(7)
2167
2168 .pGlobalProperty:
2169     bineq t0, GlobalProperty, .pGlobalVar
2170     writeBarrierOnOperands(1, 3)
2171     loadWithStructureCheck(1, .pDynamic)
2172     putProperty()
2173     dispatch(7)
2174
2175 .pGlobalVar:
2176     bineq t0, GlobalVar, .pClosureVar
2177     writeBarrierOnGlobalObject(3)
2178     putGlobalVar()
2179     dispatch(7)
2180
2181 .pClosureVar:
2182     bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
2183     writeBarrierOnOperands(1, 3)
2184     loadVariable(1, t0)
2185     putClosureVar()
2186     dispatch(7)
2187
2188 .pGlobalPropertyWithVarInjectionChecks:
2189     bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
2190     writeBarrierOnOperands(1, 3)
2191     loadWithStructureCheck(1, .pDynamic)
2192     putProperty()
2193     dispatch(7)
2194
2195 .pGlobalVarWithVarInjectionChecks:
2196     bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
2197     writeBarrierOnGlobalObject(3)
2198     varInjectionCheck(.pDynamic)
2199     putGlobalVar()
2200     dispatch(7)
2201
2202 .pClosureVarWithVarInjectionChecks:
2203     bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
2204     writeBarrierOnOperands(1, 3)
2205     varInjectionCheck(.pDynamic)
2206     loadVariable(1, t0)
2207     putClosureVar()
2208     dispatch(7)
2209
2210 .pDynamic:
2211     callSlowPath(_llint_slow_path_put_to_scope)
2212     dispatch(7)
2213
2214 _llint_op_profile_type:
2215     traceExecution()
2216     loadp CodeBlock[cfr], t1
2217     loadp CodeBlock::m_vm[t1], t1
2218     # t1 is holding the pointer to the typeProfilerLog.
2219     loadp VM::m_typeProfilerLog[t1], t1
2220     # t2 is holding the pointer to the current log entry.
2221     loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
2222
2223     # t0 is holding the JSValue argument.
2224     loadisFromInstruction(1, t3)
2225     loadConstantOrVariable(t3, t0)
2226
2227     # Store the JSValue onto the log entry.
2228     storeq t0, TypeProfilerLog::LogEntry::value[t2]
2229     
2230     # Store the TypeLocation onto the log entry.
2231     loadpFromInstruction(2, t3)
2232     storep t3, TypeProfilerLog::LogEntry::location[t2]
2233
2234     btqz t0, tagMask, .opProfileTypeIsCell
2235     storei 0, TypeProfilerLog::LogEntry::structureID[t2]
2236     jmp .opProfileTypeSkipIsCell
2237 .opProfileTypeIsCell:
2238     loadi JSCell::m_structureID[t0], t3
2239     storei t3, TypeProfilerLog::LogEntry::structureID[t2]
2240 .opProfileTypeSkipIsCell:
2241     
2242     # Increment the current log entry.
2243     addp sizeof TypeProfilerLog::LogEntry, t2
2244     storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
2245
2246     loadp TypeProfilerLog::m_logEndPtr[t1], t1
2247     bpneq t2, t1, .opProfileTypeDone
2248     callSlowPath(_slow_path_profile_type_clear_log)
2249
2250 .opProfileTypeDone:
2251     dispatch(6)