[JSC] Shrink size of VM by lazily allocating IsoSubspaces for non-common types
[WebKit-https.git] / Source / JavaScriptCore / jit / JITOpcodes.cpp
1 /*
2  * Copyright (C) 2009-2019 Apple Inc. All rights reserved.
3  * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
25  */
26
27 #include "config.h"
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "BasicBlockLocation.h"
32 #include "BytecodeStructs.h"
33 #include "Exception.h"
34 #include "Heap.h"
35 #include "InterpreterInlines.h"
36 #include "JITInlines.h"
37 #include "JSArray.h"
38 #include "JSCast.h"
39 #include "JSFunction.h"
40 #include "JSPropertyNameEnumerator.h"
41 #include "LinkBuffer.h"
42 #include "MaxFrameExtentForSlowPathCall.h"
43 #include "OpcodeInlines.h"
44 #include "SlowPathCall.h"
45 #include "SuperSampler.h"
46 #include "ThunkGenerators.h"
47 #include "TypeLocation.h"
48 #include "TypeProfilerLog.h"
49 #include "VirtualRegister.h"
50 #include "Watchdog.h"
51
52 namespace JSC {
53
54 #if USE(JSVALUE64)
55
56 void JIT::emit_op_mov(const Instruction* currentInstruction)
57 {
58     auto bytecode = currentInstruction->as<OpMov>();
59     int dst = bytecode.m_dst.offset();
60     int src = bytecode.m_src.offset();
61
62     if (m_codeBlock->isConstantRegisterIndex(src)) {
63         JSValue value = m_codeBlock->getConstant(src);
64         if (!value.isNumber())
65             store64(TrustedImm64(JSValue::encode(value)), addressFor(dst));
66         else
67             store64(Imm64(JSValue::encode(value)), addressFor(dst));
68         return;
69     }
70
71     load64(addressFor(src), regT0);
72     store64(regT0, addressFor(dst));
73 }
74
75
76 void JIT::emit_op_end(const Instruction* currentInstruction)
77 {
78     auto bytecode = currentInstruction->as<OpEnd>();
79     RELEASE_ASSERT(returnValueGPR != callFrameRegister);
80     emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR);
81     emitRestoreCalleeSaves();
82     emitFunctionEpilogue();
83     ret();
84 }
85
86 void JIT::emit_op_jmp(const Instruction* currentInstruction)
87 {
88     auto bytecode = currentInstruction->as<OpJmp>();
89     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
90     addJump(jump(), target);
91 }
92
93 void JIT::emit_op_new_object(const Instruction* currentInstruction)
94 {
95     auto bytecode = currentInstruction->as<OpNewObject>();
96     auto& metadata = bytecode.metadata(m_codeBlock);
97     Structure* structure = metadata.m_objectAllocationProfile.structure();
98     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
99     Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists);
100
101     RegisterID resultReg = regT0;
102     RegisterID allocatorReg = regT1;
103     RegisterID scratchReg = regT2;
104
105     if (!allocator)
106         addSlowCase(jump());
107     else {
108         JumpList slowCases;
109         auto butterfly = TrustedImmPtr(nullptr);
110         emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases);
111         emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
112         addSlowCase(slowCases);
113         emitPutVirtualRegister(bytecode.m_dst.offset());
114     }
115 }
116
117 void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
118 {
119     linkAllSlowCases(iter);
120
121     auto bytecode = currentInstruction->as<OpNewObject>();
122     auto& metadata = bytecode.metadata(m_codeBlock);
123     int dst = bytecode.m_dst.offset();
124     Structure* structure = metadata.m_objectAllocationProfile.structure();
125     callOperation(operationNewObject, structure);
126     emitStoreCell(dst, returnValueGPR);
127 }
128
129 void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction)
130 {
131     auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
132     int dst = bytecode.m_dst.offset();
133     int constructor = bytecode.m_constructor.offset();
134     int hasInstanceValue = bytecode.m_hasInstanceValue.offset();
135
136     emitGetVirtualRegister(hasInstanceValue, regT0);
137
138     // We don't jump if we know what Symbol.hasInstance would do.
139     Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
140
141     emitGetVirtualRegister(constructor, regT0);
142
143     // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
144     test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
145     boxBoolean(regT0, JSValueRegs { regT0 });
146     Jump done = jump();
147
148     customhasInstanceValue.link(this);
149     move(TrustedImm32(ValueTrue), regT0);
150
151     done.link(this);
152     emitPutVirtualRegister(dst);
153 }
154
155 void JIT::emit_op_instanceof(const Instruction* currentInstruction)
156 {
157     auto bytecode = currentInstruction->as<OpInstanceof>();
158     int dst = bytecode.m_dst.offset();
159     int value = bytecode.m_value.offset();
160     int proto = bytecode.m_prototype.offset();
161
162     // Load the operands (baseVal, proto, and value respectively) into registers.
163     // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
164     emitGetVirtualRegister(value, regT2);
165     emitGetVirtualRegister(proto, regT1);
166     
167     // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
168     emitJumpSlowCaseIfNotJSCell(regT2, value);
169     emitJumpSlowCaseIfNotJSCell(regT1, proto);
170
171     JITInstanceOfGenerator gen(
172         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset),
173         RegisterSet::stubUnavailableRegisters(),
174         regT0, // result
175         regT2, // value
176         regT1, // proto
177         regT3, regT4); // scratch
178     gen.generateFastPath(*this);
179     m_instanceOfs.append(gen);
180     
181     emitPutVirtualRegister(dst);
182 }
183
184 void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
185 {
186     linkAllSlowCases(iter);
187     
188     auto bytecode = currentInstruction->as<OpInstanceof>();
189     int resultVReg = bytecode.m_dst.offset();
190     
191     JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++];
192     
193     Label coldPathBegin = label();
194     Call call = callOperation(operationInstanceOfOptimize, resultVReg, gen.stubInfo(), regT2, regT1);
195     gen.reportSlowPathCall(coldPathBegin, call);
196 }
197
198 void JIT::emit_op_instanceof_custom(const Instruction*)
199 {
200     // This always goes to slow path since we expect it to be rare.
201     addSlowCase(jump());
202 }
203     
204 void JIT::emit_op_is_empty(const Instruction* currentInstruction)
205 {
206     auto bytecode = currentInstruction->as<OpIsEmpty>();
207     int dst = bytecode.m_dst.offset();
208     int value = bytecode.m_operand.offset();
209
210     emitGetVirtualRegister(value, regT0);
211     compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0);
212
213     boxBoolean(regT0, JSValueRegs { regT0 });
214     emitPutVirtualRegister(dst);
215 }
216
217 void JIT::emit_op_is_undefined(const Instruction* currentInstruction)
218 {
219     auto bytecode = currentInstruction->as<OpIsUndefined>();
220     int dst = bytecode.m_dst.offset();
221     int value = bytecode.m_operand.offset();
222     
223     emitGetVirtualRegister(value, regT0);
224     Jump isCell = branchIfCell(regT0);
225
226     compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
227     Jump done = jump();
228     
229     isCell.link(this);
230     Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
231     move(TrustedImm32(0), regT0);
232     Jump notMasqueradesAsUndefined = jump();
233
234     isMasqueradesAsUndefined.link(this);
235     emitLoadStructure(*vm(), regT0, regT1, regT2);
236     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
237     loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
238     comparePtr(Equal, regT0, regT1, regT0);
239
240     notMasqueradesAsUndefined.link(this);
241     done.link(this);
242     boxBoolean(regT0, JSValueRegs { regT0 });
243     emitPutVirtualRegister(dst);
244 }
245
246 void JIT::emit_op_is_undefined_or_null(const Instruction* currentInstruction)
247 {
248     auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
249     int dst = bytecode.m_dst.offset();
250     int value = bytecode.m_operand.offset();
251
252     emitGetVirtualRegister(value, regT0);
253
254     and64(TrustedImm32(~TagBitUndefined), regT0);
255     compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
256
257     boxBoolean(regT0, JSValueRegs { regT0 });
258     emitPutVirtualRegister(dst);
259 }
260
261 void JIT::emit_op_is_boolean(const Instruction* currentInstruction)
262 {
263     auto bytecode = currentInstruction->as<OpIsBoolean>();
264     int dst = bytecode.m_dst.offset();
265     int value = bytecode.m_operand.offset();
266     
267     emitGetVirtualRegister(value, regT0);
268     xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
269     test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
270     boxBoolean(regT0, JSValueRegs { regT0 });
271     emitPutVirtualRegister(dst);
272 }
273
274 void JIT::emit_op_is_number(const Instruction* currentInstruction)
275 {
276     auto bytecode = currentInstruction->as<OpIsNumber>();
277     int dst = bytecode.m_dst.offset();
278     int value = bytecode.m_operand.offset();
279     
280     emitGetVirtualRegister(value, regT0);
281     test64(NonZero, regT0, tagTypeNumberRegister, regT0);
282     boxBoolean(regT0, JSValueRegs { regT0 });
283     emitPutVirtualRegister(dst);
284 }
285
286 void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction)
287 {
288     auto bytecode = currentInstruction->as<OpIsCellWithType>();
289     int dst = bytecode.m_dst.offset();
290     int value = bytecode.m_operand.offset();
291     int type = bytecode.m_type;
292
293     emitGetVirtualRegister(value, regT0);
294     Jump isNotCell = branchIfNotCell(regT0);
295
296     compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0);
297     boxBoolean(regT0, JSValueRegs { regT0 });
298     Jump done = jump();
299
300     isNotCell.link(this);
301     move(TrustedImm32(ValueFalse), regT0);
302
303     done.link(this);
304     emitPutVirtualRegister(dst);
305 }
306
307 void JIT::emit_op_is_object(const Instruction* currentInstruction)
308 {
309     auto bytecode = currentInstruction->as<OpIsObject>();
310     int dst = bytecode.m_dst.offset();
311     int value = bytecode.m_operand.offset();
312
313     emitGetVirtualRegister(value, regT0);
314     Jump isNotCell = branchIfNotCell(regT0);
315
316     compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
317     boxBoolean(regT0, JSValueRegs { regT0 });
318     Jump done = jump();
319
320     isNotCell.link(this);
321     move(TrustedImm32(ValueFalse), regT0);
322
323     done.link(this);
324     emitPutVirtualRegister(dst);
325 }
326
327 void JIT::emit_op_ret(const Instruction* currentInstruction)
328 {
329     ASSERT(callFrameRegister != regT1);
330     ASSERT(regT1 != returnValueGPR);
331     ASSERT(returnValueGPR != callFrameRegister);
332
333     // Return the result in %eax.
334     auto bytecode = currentInstruction->as<OpRet>();
335     emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR);
336
337     checkStackPointerAlignment();
338     emitRestoreCalleeSaves();
339     emitFunctionEpilogue();
340     ret();
341 }
342
343 void JIT::emit_op_to_primitive(const Instruction* currentInstruction)
344 {
345     auto bytecode = currentInstruction->as<OpToPrimitive>();
346     int dst = bytecode.m_dst.offset();
347     int src = bytecode.m_src.offset();
348
349     emitGetVirtualRegister(src, regT0);
350     
351     Jump isImm = branchIfNotCell(regT0);
352     addSlowCase(branchIfObject(regT0));
353     isImm.link(this);
354
355     if (dst != src)
356         emitPutVirtualRegister(dst);
357
358 }
359
360 void JIT::emit_op_set_function_name(const Instruction* currentInstruction)
361 {
362     auto bytecode = currentInstruction->as<OpSetFunctionName>();
363     emitGetVirtualRegister(bytecode.m_function.offset(), regT0);
364     emitGetVirtualRegister(bytecode.m_name.offset(), regT1);
365     callOperation(operationSetFunctionName, regT0, regT1);
366 }
367
368 void JIT::emit_op_not(const Instruction* currentInstruction)
369 {
370     auto bytecode = currentInstruction->as<OpNot>();
371     emitGetVirtualRegister(bytecode.m_operand.offset(), regT0);
372
373     // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
374     // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
375     // Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
376     xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
377     addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
378     xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
379
380     emitPutVirtualRegister(bytecode.m_dst.offset());
381 }
382
383 void JIT::emit_op_jfalse(const Instruction* currentInstruction)
384 {
385     auto bytecode = currentInstruction->as<OpJfalse>();
386     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
387
388     GPRReg value = regT0;
389     GPRReg scratch1 = regT1;
390     GPRReg scratch2 = regT2;
391     bool shouldCheckMasqueradesAsUndefined = true;
392
393     emitGetVirtualRegister(bytecode.m_condition.offset(), value);
394     addJump(branchIfFalsey(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
395 }
396
397 void JIT::emit_op_jeq_null(const Instruction* currentInstruction)
398 {
399     auto bytecode = currentInstruction->as<OpJeqNull>();
400     int src = bytecode.m_value.offset();
401     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
402
403     emitGetVirtualRegister(src, regT0);
404     Jump isImmediate = branchIfNotCell(regT0);
405
406     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
407     Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
408     emitLoadStructure(*vm(), regT0, regT2, regT1);
409     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
410     addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
411     Jump masqueradesGlobalObjectIsForeign = jump();
412
413     // Now handle the immediate cases - undefined & null
414     isImmediate.link(this);
415     and64(TrustedImm32(~TagBitUndefined), regT0);
416     addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);            
417
418     isNotMasqueradesAsUndefined.link(this);
419     masqueradesGlobalObjectIsForeign.link(this);
420 };
421 void JIT::emit_op_jneq_null(const Instruction* currentInstruction)
422 {
423     auto bytecode = currentInstruction->as<OpJneqNull>();
424     int src = bytecode.m_value.offset();
425     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
426
427     emitGetVirtualRegister(src, regT0);
428     Jump isImmediate = branchIfNotCell(regT0);
429
430     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
431     addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
432     emitLoadStructure(*vm(), regT0, regT2, regT1);
433     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
434     addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
435     Jump wasNotImmediate = jump();
436
437     // Now handle the immediate cases - undefined & null
438     isImmediate.link(this);
439     and64(TrustedImm32(~TagBitUndefined), regT0);
440     addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);            
441
442     wasNotImmediate.link(this);
443 }
444
445 void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction)
446 {
447     auto bytecode = currentInstruction->as<OpJneqPtr>();
448     auto& metadata = bytecode.metadata(m_codeBlock);
449     int src = bytecode.m_value.offset();
450     Special::Pointer ptr = bytecode.m_specialPointer;
451     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
452     
453     emitGetVirtualRegister(src, regT0);
454     CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr)));
455     store8(TrustedImm32(1), &metadata.m_hasJumped);
456     addJump(jump(), target);
457     equal.link(this);
458 }
459
460 void JIT::emit_op_eq(const Instruction* currentInstruction)
461 {
462     auto bytecode = currentInstruction->as<OpEq>();
463     emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
464     emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
465     compare32(Equal, regT1, regT0, regT0);
466     boxBoolean(regT0, JSValueRegs { regT0 });
467     emitPutVirtualRegister(bytecode.m_dst.offset());
468 }
469
470 void JIT::emit_op_jeq(const Instruction* currentInstruction)
471 {
472     auto bytecode = currentInstruction->as<OpJeq>();
473     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
474     emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
475     emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
476     addJump(branch32(Equal, regT0, regT1), target);
477 }
478
479 void JIT::emit_op_jtrue(const Instruction* currentInstruction)
480 {
481     auto bytecode = currentInstruction->as<OpJtrue>();
482     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
483
484     GPRReg value = regT0;
485     GPRReg scratch1 = regT1;
486     GPRReg scratch2 = regT2;
487     bool shouldCheckMasqueradesAsUndefined = true;
488     emitGetVirtualRegister(bytecode.m_condition.offset(), value);
489     addJump(branchIfTruthy(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
490 }
491
492 void JIT::emit_op_neq(const Instruction* currentInstruction)
493 {
494     auto bytecode = currentInstruction->as<OpNeq>();
495     emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
496     emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
497     compare32(NotEqual, regT1, regT0, regT0);
498     boxBoolean(regT0, JSValueRegs { regT0 });
499
500     emitPutVirtualRegister(bytecode.m_dst.offset());
501 }
502
503 void JIT::emit_op_jneq(const Instruction* currentInstruction)
504 {
505     auto bytecode = currentInstruction->as<OpJneq>();
506     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
507     emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
508     emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
509     addJump(branch32(NotEqual, regT0, regT1), target);
510 }
511
512 void JIT::emit_op_throw(const Instruction* currentInstruction)
513 {
514     auto bytecode = currentInstruction->as<OpThrow>();
515     ASSERT(regT0 == returnValueGPR);
516     copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
517     emitGetVirtualRegister(bytecode.m_value.offset(), regT0);
518     callOperationNoExceptionCheck(operationThrow, regT0);
519     jumpToExceptionHandler(*vm());
520 }
521
522 template<typename Op>
523 void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type)
524 {
525     auto bytecode = currentInstruction->as<Op>();
526     int dst = bytecode.m_dst.offset();
527     int src1 = bytecode.m_lhs.offset();
528     int src2 = bytecode.m_rhs.offset();
529
530     emitGetVirtualRegisters(src1, regT0, src2, regT1);
531     
532     // Jump slow if both are cells (to cover strings).
533     move(regT0, regT2);
534     or64(regT1, regT2);
535     addSlowCase(branchIfCell(regT2));
536     
537     // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
538     // if it's a double.
539     Jump leftOK = branchIfInt32(regT0);
540     addSlowCase(branchIfNumber(regT0));
541     leftOK.link(this);
542     Jump rightOK = branchIfInt32(regT1);
543     addSlowCase(branchIfNumber(regT1));
544     rightOK.link(this);
545
546     if (type == CompileOpStrictEqType::StrictEq)
547         compare64(Equal, regT1, regT0, regT0);
548     else
549         compare64(NotEqual, regT1, regT0, regT0);
550     boxBoolean(regT0, JSValueRegs { regT0 });
551
552     emitPutVirtualRegister(dst);
553 }
554
555 void JIT::emit_op_stricteq(const Instruction* currentInstruction)
556 {
557     compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
558 }
559
560 void JIT::emit_op_nstricteq(const Instruction* currentInstruction)
561 {
562     compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
563 }
564
565 template<typename Op>
566 void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type)
567 {
568     auto bytecode = currentInstruction->as<Op>();
569     int target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
570     int src1 = bytecode.m_lhs.offset();
571     int src2 = bytecode.m_rhs.offset();
572
573     emitGetVirtualRegisters(src1, regT0, src2, regT1);
574
575     // Jump slow if both are cells (to cover strings).
576     move(regT0, regT2);
577     or64(regT1, regT2);
578     addSlowCase(branchIfCell(regT2));
579
580     // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
581     // if it's a double.
582     Jump leftOK = branchIfInt32(regT0);
583     addSlowCase(branchIfNumber(regT0));
584     leftOK.link(this);
585     Jump rightOK = branchIfInt32(regT1);
586     addSlowCase(branchIfNumber(regT1));
587     rightOK.link(this);
588
589     if (type == CompileOpStrictEqType::StrictEq)
590         addJump(branch64(Equal, regT1, regT0), target);
591     else
592         addJump(branch64(NotEqual, regT1, regT0), target);
593 }
594
595 void JIT::emit_op_jstricteq(const Instruction* currentInstruction)
596 {
597     compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
598 }
599
600 void JIT::emit_op_jnstricteq(const Instruction* currentInstruction)
601 {
602     compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
603 }
604
605 void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
606 {
607     linkAllSlowCases(iter);
608
609     auto bytecode = currentInstruction->as<OpJstricteq>();
610     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
611     callOperation(operationCompareStrictEq, regT0, regT1);
612     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
613 }
614
615 void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
616 {
617     linkAllSlowCases(iter);
618
619     auto bytecode = currentInstruction->as<OpJnstricteq>();
620     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
621     callOperation(operationCompareStrictEq, regT0, regT1);
622     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
623 }
624
625 void JIT::emit_op_to_number(const Instruction* currentInstruction)
626 {
627     auto bytecode = currentInstruction->as<OpToNumber>();
628     int dstVReg = bytecode.m_dst.offset();
629     int srcVReg = bytecode.m_operand.offset();
630     emitGetVirtualRegister(srcVReg, regT0);
631     
632     addSlowCase(branchIfNotNumber(regT0));
633
634     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
635     if (srcVReg != dstVReg)
636         emitPutVirtualRegister(dstVReg);
637 }
638
639 void JIT::emit_op_to_string(const Instruction* currentInstruction)
640 {
641     auto bytecode = currentInstruction->as<OpToString>();
642     int srcVReg = bytecode.m_operand.offset();
643     emitGetVirtualRegister(srcVReg, regT0);
644
645     addSlowCase(branchIfNotCell(regT0));
646     addSlowCase(branchIfNotString(regT0));
647
648     emitPutVirtualRegister(bytecode.m_dst.offset());
649 }
650
651 void JIT::emit_op_to_object(const Instruction* currentInstruction)
652 {
653     auto bytecode = currentInstruction->as<OpToObject>();
654     int dstVReg = bytecode.m_dst.offset();
655     int srcVReg = bytecode.m_operand.offset();
656     emitGetVirtualRegister(srcVReg, regT0);
657
658     addSlowCase(branchIfNotCell(regT0));
659     addSlowCase(branchIfNotObject(regT0));
660
661     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
662     if (srcVReg != dstVReg)
663         emitPutVirtualRegister(dstVReg);
664 }
665
666 void JIT::emit_op_catch(const Instruction* currentInstruction)
667 {
668     auto bytecode = currentInstruction->as<OpCatch>();
669
670     restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
671
672     move(TrustedImmPtr(m_vm), regT3);
673     load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
674     storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
675
676     addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
677
678     callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
679     Jump isCatchableException = branchTest32(Zero, returnValueGPR);
680     jumpToExceptionHandler(*vm());
681     isCatchableException.link(this);
682
683     move(TrustedImmPtr(m_vm), regT3);
684     load64(Address(regT3, VM::exceptionOffset()), regT0);
685     store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
686     emitPutVirtualRegister(bytecode.m_exception.offset());
687
688     load64(Address(regT0, Exception::valueOffset()), regT0);
689     emitPutVirtualRegister(bytecode.m_thrownValue.offset());
690
691 #if ENABLE(DFG_JIT)
692     // FIXME: consider inline caching the process of doing OSR entry, including
693     // argument type proofs, storing locals to the buffer, etc
694     // https://bugs.webkit.org/show_bug.cgi?id=175598
695
696     auto& metadata = bytecode.metadata(m_codeBlock);
697     ValueProfileAndOperandBuffer* buffer = metadata.m_buffer;
698     if (buffer || !shouldEmitProfiling())
699         callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset);
700     else
701         callOperation(operationTryOSREnterAtCatchAndValueProfile, m_bytecodeOffset);
702     auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
703     emitRestoreCalleeSaves();
704     jump(returnValueGPR, ExceptionHandlerPtrTag);
705     skipOSREntry.link(this);
706     if (buffer && shouldEmitProfiling()) {
707         buffer->forEach([&] (ValueProfileAndOperand& profile) {
708             JSValueRegs regs(regT0);
709             emitGetVirtualRegister(profile.m_operand, regs);
710             emitValueProfilingSite(profile.m_profile);
711         });
712     }
713 #endif // ENABLE(DFG_JIT)
714 }
715
716 void JIT::emit_op_identity_with_profile(const Instruction*)
717 {
718     // We don't need to do anything here...
719 }
720
721 void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction)
722 {
723     auto bytecode = currentInstruction->as<OpGetParentScope>();
724     int currentScope = bytecode.m_scope.offset();
725     emitGetVirtualRegister(currentScope, regT0);
726     loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
727     emitStoreCell(bytecode.m_dst.offset(), regT0);
728 }
729
730 void JIT::emit_op_switch_imm(const Instruction* currentInstruction)
731 {
732     auto bytecode = currentInstruction->as<OpSwitchImm>();
733     size_t tableIndex = bytecode.m_tableIndex;
734     unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
735     unsigned scrutinee = bytecode.m_scrutinee.offset();
736
737     // create jump table for switch destinations, track this switch statement.
738     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
739     m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
740     jumpTable->ensureCTITable();
741
742     emitGetVirtualRegister(scrutinee, regT0);
743     callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
744     jump(returnValueGPR, JSSwitchPtrTag);
745 }
746
747 void JIT::emit_op_switch_char(const Instruction* currentInstruction)
748 {
749     auto bytecode = currentInstruction->as<OpSwitchChar>();
750     size_t tableIndex = bytecode.m_tableIndex;
751     unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
752     unsigned scrutinee = bytecode.m_scrutinee.offset();
753
754     // create jump table for switch destinations, track this switch statement.
755     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
756     m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
757     jumpTable->ensureCTITable();
758
759     emitGetVirtualRegister(scrutinee, regT0);
760     callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
761     jump(returnValueGPR, JSSwitchPtrTag);
762 }
763
764 void JIT::emit_op_switch_string(const Instruction* currentInstruction)
765 {
766     auto bytecode = currentInstruction->as<OpSwitchString>();
767     size_t tableIndex = bytecode.m_tableIndex;
768     unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
769     unsigned scrutinee = bytecode.m_scrutinee.offset();
770
771     // create jump table for switch destinations, track this switch statement.
772     StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
773     m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
774
775     emitGetVirtualRegister(scrutinee, regT0);
776     callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
777     jump(returnValueGPR, JSSwitchPtrTag);
778 }
779
780 void JIT::emit_op_debug(const Instruction* currentInstruction)
781 {
782     auto bytecode = currentInstruction->as<OpDebug>();
783     load32(codeBlock()->debuggerRequestsAddress(), regT0);
784     Jump noDebuggerRequests = branchTest32(Zero, regT0);
785     callOperation(operationDebug, static_cast<int>(bytecode.m_debugHookType));
786     noDebuggerRequests.link(this);
787 }
788
789 void JIT::emit_op_eq_null(const Instruction* currentInstruction)
790 {
791     auto bytecode = currentInstruction->as<OpEqNull>();
792     int dst = bytecode.m_dst.offset();
793     int src1 = bytecode.m_operand.offset();
794
795     emitGetVirtualRegister(src1, regT0);
796     Jump isImmediate = branchIfNotCell(regT0);
797
798     Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
799     move(TrustedImm32(0), regT0);
800     Jump wasNotMasqueradesAsUndefined = jump();
801
802     isMasqueradesAsUndefined.link(this);
803     emitLoadStructure(*vm(), regT0, regT2, regT1);
804     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
805     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
806     comparePtr(Equal, regT0, regT2, regT0);
807     Jump wasNotImmediate = jump();
808
809     isImmediate.link(this);
810
811     and64(TrustedImm32(~TagBitUndefined), regT0);
812     compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
813
814     wasNotImmediate.link(this);
815     wasNotMasqueradesAsUndefined.link(this);
816
817     boxBoolean(regT0, JSValueRegs { regT0 });
818     emitPutVirtualRegister(dst);
819
820 }
821
822 void JIT::emit_op_neq_null(const Instruction* currentInstruction)
823 {
824     auto bytecode = currentInstruction->as<OpNeqNull>();
825     int dst = bytecode.m_dst.offset();
826     int src1 = bytecode.m_operand.offset();
827
828     emitGetVirtualRegister(src1, regT0);
829     Jump isImmediate = branchIfNotCell(regT0);
830
831     Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
832     move(TrustedImm32(1), regT0);
833     Jump wasNotMasqueradesAsUndefined = jump();
834
835     isMasqueradesAsUndefined.link(this);
836     emitLoadStructure(*vm(), regT0, regT2, regT1);
837     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
838     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
839     comparePtr(NotEqual, regT0, regT2, regT0);
840     Jump wasNotImmediate = jump();
841
842     isImmediate.link(this);
843
844     and64(TrustedImm32(~TagBitUndefined), regT0);
845     compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
846
847     wasNotImmediate.link(this);
848     wasNotMasqueradesAsUndefined.link(this);
849
850     boxBoolean(regT0, JSValueRegs { regT0 });
851     emitPutVirtualRegister(dst);
852 }
853
854 void JIT::emit_op_enter(const Instruction*)
855 {
856     // Even though CTI doesn't use them, we initialize our constant
857     // registers to zap stale pointers, to avoid unnecessarily prolonging
858     // object lifetime and increasing GC pressure.
859     size_t count = m_codeBlock->numVars();
860     for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
861         emitInitRegister(virtualRegisterForLocal(j).offset());
862
863     emitWriteBarrier(m_codeBlock);
864
865     emitEnterOptimizationCheck();
866 }
867
868 void JIT::emit_op_get_scope(const Instruction* currentInstruction)
869 {
870     auto bytecode = currentInstruction->as<OpGetScope>();
871     int dst = bytecode.m_dst.offset();
872     emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
873     loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
874     emitStoreCell(dst, regT0);
875 }
876
877 void JIT::emit_op_to_this(const Instruction* currentInstruction)
878 {
879     auto bytecode = currentInstruction->as<OpToThis>();
880     auto& metadata = bytecode.metadata(m_codeBlock);
881     WriteBarrierBase<Structure>* cachedStructure = &metadata.m_cachedStructure;
882     emitGetVirtualRegister(bytecode.m_srcDst.offset(), regT1);
883
884     emitJumpSlowCaseIfNotJSCell(regT1);
885
886     addSlowCase(branchIfNotType(regT1, FinalObjectType));
887     loadPtr(cachedStructure, regT2);
888     addSlowCase(branchTestPtr(Zero, regT2));
889     load32(Address(regT2, Structure::structureIDOffset()), regT2);
890     addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
891 }
892
893 void JIT::emit_op_create_this(const Instruction* currentInstruction)
894 {
895     auto bytecode = currentInstruction->as<OpCreateThis>();
896     auto& metadata = bytecode.metadata(m_codeBlock);
897     int callee = bytecode.m_callee.offset();
898     WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee;
899     RegisterID calleeReg = regT0;
900     RegisterID rareDataReg = regT4;
901     RegisterID resultReg = regT0;
902     RegisterID allocatorReg = regT1;
903     RegisterID structureReg = regT2;
904     RegisterID cachedFunctionReg = regT4;
905     RegisterID scratchReg = regT3;
906
907     emitGetVirtualRegister(callee, calleeReg);
908     addSlowCase(branchIfNotFunction(calleeReg));
909     loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
910     addSlowCase(branchTestPtr(Zero, rareDataReg));
911     xorPtr(TrustedImmPtr(JSFunctionPoison::key()), rareDataReg);
912     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
913     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
914
915     loadPtr(cachedFunction, cachedFunctionReg);
916     Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
917     addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
918     hasSeenMultipleCallees.link(this);
919
920     JumpList slowCases;
921     auto butterfly = TrustedImmPtr(nullptr);
922     emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases);
923     emitGetVirtualRegister(callee, scratchReg);
924     loadPtr(Address(scratchReg, JSFunction::offsetOfRareData()), scratchReg);
925     xorPtr(TrustedImmPtr(JSFunctionPoison::key()), scratchReg);
926     load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg);
927     emitInitializeInlineStorage(resultReg, scratchReg);
928     addSlowCase(slowCases);
929     emitPutVirtualRegister(bytecode.m_dst.offset());
930 }
931
932 void JIT::emit_op_check_tdz(const Instruction* currentInstruction)
933 {
934     auto bytecode = currentInstruction->as<OpCheckTdz>();
935     emitGetVirtualRegister(bytecode.m_targetVirtualRegister.offset(), regT0);
936     addSlowCase(branchIfEmpty(regT0));
937 }
938
939
940 // Slow cases
941
942 void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
943 {
944     linkAllSlowCases(iter);
945
946     auto bytecode = currentInstruction->as<OpEq>();
947     callOperation(operationCompareEq, regT0, regT1);
948     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
949     emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR);
950 }
951
952 void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
953 {
954     linkAllSlowCases(iter);
955
956     auto bytecode = currentInstruction->as<OpNeq>();
957     callOperation(operationCompareEq, regT0, regT1);
958     xor32(TrustedImm32(0x1), regT0);
959     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
960     emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR);
961 }
962
963 void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
964 {
965     linkAllSlowCases(iter);
966
967     auto bytecode = currentInstruction->as<OpJeq>();
968     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
969     callOperation(operationCompareEq, regT0, regT1);
970     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
971 }
972
973 void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
974 {
975     linkAllSlowCases(iter);
976
977     auto bytecode = currentInstruction->as<OpJneq>();
978     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
979     callOperation(operationCompareEq, regT0, regT1);
980     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
981 }
982
983 void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
984 {
985     linkAllSlowCases(iter);
986
987     auto bytecode = currentInstruction->as<OpInstanceofCustom>();
988     int dst = bytecode.m_dst.offset();
989     int value = bytecode.m_value.offset();
990     int constructor = bytecode.m_constructor.offset();
991     int hasInstanceValue = bytecode.m_hasInstanceValue.offset();
992
993     emitGetVirtualRegister(value, regT0);
994     emitGetVirtualRegister(constructor, regT1);
995     emitGetVirtualRegister(hasInstanceValue, regT2);
996     callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
997     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
998     emitPutVirtualRegister(dst, returnValueGPR);
999 }
1000
1001 #endif // USE(JSVALUE64)
1002
1003 void JIT::emit_op_loop_hint(const Instruction*)
1004 {
1005     // Emit the JIT optimization check: 
1006     if (canBeOptimized()) {
1007         addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
1008             AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
1009     }
1010 }
1011
1012 void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1013 {
1014 #if ENABLE(DFG_JIT)
1015     // Emit the slow path for the JIT optimization check:
1016     if (canBeOptimized()) {
1017         linkAllSlowCases(iter);
1018
1019         copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
1020
1021         callOperation(operationOptimize, m_bytecodeOffset);
1022         Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
1023         if (!ASSERT_DISABLED) {
1024             Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
1025             abortWithReason(JITUnreasonableLoopHintJumpTarget);
1026             ok.link(this);
1027         }
1028         jump(returnValueGPR, GPRInfo::callFrameRegister);
1029         noOptimizedEntry.link(this);
1030
1031         emitJumpSlowToHot(jump(), currentInstruction->size());
1032     }
1033 #else
1034     UNUSED_PARAM(currentInstruction);
1035     UNUSED_PARAM(iter);
1036 #endif
1037 }
1038
1039 void JIT::emit_op_check_traps(const Instruction*)
1040 {
1041     addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress())));
1042 }
1043
1044 void JIT::emit_op_nop(const Instruction*)
1045 {
1046 }
1047
1048 void JIT::emit_op_super_sampler_begin(const Instruction*)
1049 {
1050     add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
1051 }
1052
1053 void JIT::emit_op_super_sampler_end(const Instruction*)
1054 {
1055     sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
1056 }
1057
1058 void JIT::emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
1059 {
1060     linkAllSlowCases(iter);
1061
1062     callOperation(operationHandleTraps);
1063 }
1064
1065 void JIT::emit_op_new_regexp(const Instruction* currentInstruction)
1066 {
1067     auto bytecode = currentInstruction->as<OpNewRegexp>();
1068     int dst = bytecode.m_dst.offset();
1069     int regexp = bytecode.m_regexp.offset();
1070     callOperation(operationNewRegexp, jsCast<RegExp*>(m_codeBlock->getConstant(regexp)));
1071     emitStoreCell(dst, returnValueGPR);
1072 }
1073
1074 template<typename Op>
1075 void JIT::emitNewFuncCommon(const Instruction* currentInstruction)
1076 {
1077     Jump lazyJump;
1078     auto bytecode = currentInstruction->as<Op>();
1079     int dst = bytecode.m_dst.offset();
1080
1081 #if USE(JSVALUE64)
1082     emitGetVirtualRegister(bytecode.m_scope.offset(), regT0);
1083 #else
1084     emitLoadPayload(bytecode.m_scope.offset(), regT0);
1085 #endif
1086     FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.m_functionDecl);
1087
1088     OpcodeID opcodeID = Op::opcodeID;
1089     if (opcodeID == op_new_func)
1090         callOperation(operationNewFunction, dst, regT0, funcExec);
1091     else if (opcodeID == op_new_generator_func)
1092         callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
1093     else if (opcodeID == op_new_async_func)
1094         callOperation(operationNewAsyncFunction, dst, regT0, funcExec);
1095     else {
1096         ASSERT(opcodeID == op_new_async_generator_func);
1097         callOperation(operationNewAsyncGeneratorFunction, dst, regT0, funcExec);
1098     }
1099 }
1100
1101 void JIT::emit_op_new_func(const Instruction* currentInstruction)
1102 {
1103     emitNewFuncCommon<OpNewFunc>(currentInstruction);
1104 }
1105
1106 void JIT::emit_op_new_generator_func(const Instruction* currentInstruction)
1107 {
1108     emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction);
1109 }
1110
1111 void JIT::emit_op_new_async_generator_func(const Instruction* currentInstruction)
1112 {
1113     emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction);
1114 }
1115
1116 void JIT::emit_op_new_async_func(const Instruction* currentInstruction)
1117 {
1118     emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction);
1119 }
1120     
1121 template<typename Op>
1122 void JIT::emitNewFuncExprCommon(const Instruction* currentInstruction)
1123 {
1124     auto bytecode = currentInstruction->as<Op>();
1125     int dst = bytecode.m_dst.offset();
1126 #if USE(JSVALUE64)
1127     emitGetVirtualRegister(bytecode.m_scope.offset(), regT0);
1128 #else
1129     emitLoadPayload(bytecode.m_scope.offset(), regT0);
1130 #endif
1131
1132     FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.m_functionDecl);
1133     OpcodeID opcodeID = Op::opcodeID;
1134
1135     if (opcodeID == op_new_func_exp)
1136         callOperation(operationNewFunction, dst, regT0, function);
1137     else if (opcodeID == op_new_generator_func_exp)
1138         callOperation(operationNewGeneratorFunction, dst, regT0, function);
1139     else if (opcodeID == op_new_async_func_exp)
1140         callOperation(operationNewAsyncFunction, dst, regT0, function);
1141     else {
1142         ASSERT(opcodeID == op_new_async_generator_func_exp);
1143         callOperation(operationNewAsyncGeneratorFunction, dst, regT0, function);
1144     }
1145 }
1146
1147 void JIT::emit_op_new_func_exp(const Instruction* currentInstruction)
1148 {
1149     emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction);
1150 }
1151
1152 void JIT::emit_op_new_generator_func_exp(const Instruction* currentInstruction)
1153 {
1154     emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction);
1155 }
1156
1157 void JIT::emit_op_new_async_func_exp(const Instruction* currentInstruction)
1158 {
1159     emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction);
1160 }
1161     
1162 void JIT::emit_op_new_async_generator_func_exp(const Instruction* currentInstruction)
1163 {
1164     emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction);
1165 }
1166     
1167 void JIT::emit_op_new_array(const Instruction* currentInstruction)
1168 {
1169     auto bytecode = currentInstruction->as<OpNewArray>();
1170     auto& metadata = bytecode.metadata(m_codeBlock);
1171     int dst = bytecode.m_dst.offset();
1172     int valuesIndex = bytecode.m_argv.offset();
1173     int size = bytecode.m_argc;
1174     addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0);
1175     callOperation(operationNewArrayWithProfile, dst,
1176         &metadata.m_arrayAllocationProfile, regT0, size);
1177 }
1178
1179 void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction)
1180 {
1181     auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
1182     auto& metadata = bytecode.metadata(m_codeBlock);
1183     int dst = bytecode.m_dst.offset();
1184     int sizeIndex = bytecode.m_length.offset();
1185 #if USE(JSVALUE64)
1186     emitGetVirtualRegister(sizeIndex, regT0);
1187     callOperation(operationNewArrayWithSizeAndProfile, dst,
1188         &metadata.m_arrayAllocationProfile, regT0);
1189 #else
1190     emitLoad(sizeIndex, regT1, regT0);
1191     callOperation(operationNewArrayWithSizeAndProfile, dst,
1192         &metadata.m_arrayAllocationProfile, JSValueRegs(regT1, regT0));
1193 #endif
1194 }
1195
1196 #if USE(JSVALUE64)
1197 void JIT::emit_op_has_structure_property(const Instruction* currentInstruction)
1198 {
1199     auto bytecode = currentInstruction->as<OpHasStructureProperty>();
1200     int dst = bytecode.m_dst.offset();
1201     int base = bytecode.m_base.offset();
1202     int enumerator = bytecode.m_enumerator.offset();
1203
1204     emitGetVirtualRegister(base, regT0);
1205     emitGetVirtualRegister(enumerator, regT1);
1206     emitJumpSlowCaseIfNotJSCell(regT0, base);
1207
1208     load32(Address(regT0, JSCell::structureIDOffset()), regT0);
1209     addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
1210     
1211     move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1212     emitPutVirtualRegister(dst);
1213 }
1214
1215 void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1216 {
1217     const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1218     
1219     PatchableJump badType;
1220     
1221     // FIXME: Add support for other types like TypedArrays and Arguments.
1222     // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
1223     JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
1224     move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1225     Jump done = jump();
1226
1227     LinkBuffer patchBuffer(*this, m_codeBlock);
1228     
1229     patchBuffer.link(badType, byValInfo->slowPathTarget);
1230     patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1231
1232     patchBuffer.link(done, byValInfo->badTypeDoneTarget);
1233
1234     byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1235         m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1236         "Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
1237     
1238     MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1239     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric));
1240 }
1241
1242 void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction)
1243 {
1244     auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
1245     auto& metadata = bytecode.metadata(m_codeBlock);
1246     int dst = bytecode.m_dst.offset();
1247     int base = bytecode.m_base.offset();
1248     int property = bytecode.m_property.offset();
1249     ArrayProfile* profile = &metadata.m_arrayProfile;
1250     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
1251     
1252     emitGetVirtualRegisters(base, regT0, property, regT1);
1253
1254     // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
1255     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
1256     // number was signed since m_vectorLength is always less than intmax (since the total allocation
1257     // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
1258     // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
1259     // extending since it makes it easier to re-tag the value in the slow case.
1260     zeroExtend32ToPtr(regT1, regT1);
1261
1262     emitJumpSlowCaseIfNotJSCell(regT0, base);
1263     emitArrayProfilingSiteWithCell(regT0, regT2, profile);
1264     and32(TrustedImm32(IndexingShapeMask), regT2);
1265
1266     JITArrayMode mode = chooseArrayMode(profile);
1267     PatchableJump badType;
1268
1269     // FIXME: Add support for other types like TypedArrays and Arguments.
1270     // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
1271     JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
1272     
1273     move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1274
1275     addSlowCase(badType);
1276     addSlowCase(slowCases);
1277     
1278     Label done = label();
1279     
1280     emitPutVirtualRegister(dst);
1281
1282     Label nextHotPath = label();
1283     
1284     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
1285 }
1286
1287 void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1288 {
1289     linkAllSlowCases(iter);
1290
1291     auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
1292     int dst = bytecode.m_dst.offset();
1293     int base = bytecode.m_base.offset();
1294     int property = bytecode.m_property.offset();
1295     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
1296
1297     Label slowPath = label();
1298     
1299     emitGetVirtualRegister(base, regT0);
1300     emitGetVirtualRegister(property, regT1);
1301     Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
1302
1303     m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
1304     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
1305     m_byValInstructionIndex++;
1306 }
1307
1308 void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction)
1309 {
1310     auto bytecode = currentInstruction->as<OpGetDirectPname>();
1311     int dst = bytecode.m_dst.offset();
1312     int base = bytecode.m_base.offset();
1313     int index = bytecode.m_index.offset();
1314     int enumerator = bytecode.m_enumerator.offset();
1315
1316     // Check that base is a cell
1317     emitGetVirtualRegister(base, regT0);
1318     emitJumpSlowCaseIfNotJSCell(regT0, base);
1319
1320     // Check the structure
1321     emitGetVirtualRegister(enumerator, regT2);
1322     load32(Address(regT0, JSCell::structureIDOffset()), regT1);
1323     addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
1324
1325     // Compute the offset
1326     emitGetVirtualRegister(index, regT1);
1327     // If index is less than the enumerator's cached inline storage, then it's an inline access
1328     Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
1329     addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
1330     signExtend32ToPtr(regT1, regT1);
1331     load64(BaseIndex(regT0, regT1, TimesEight), regT0);
1332     
1333     Jump done = jump();
1334
1335     // Otherwise it's out of line
1336     outOfLineAccess.link(this);
1337     loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
1338     sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
1339     neg32(regT1);
1340     signExtend32ToPtr(regT1, regT1);
1341     int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
1342     load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
1343     
1344     done.link(this);
1345     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
1346     emitPutVirtualRegister(dst, regT0);
1347 }
1348
1349 void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction)
1350 {
1351     auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
1352     int dst = bytecode.m_dst.offset();
1353     int enumerator = bytecode.m_enumerator.offset();
1354     int index = bytecode.m_index.offset();
1355
1356     emitGetVirtualRegister(index, regT0);
1357     emitGetVirtualRegister(enumerator, regT1);
1358     Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
1359
1360     move(TrustedImm64(JSValue::encode(jsNull())), regT0);
1361
1362     Jump done = jump();
1363     inBounds.link(this);
1364
1365     loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
1366     signExtend32ToPtr(regT0, regT0);
1367     load64(BaseIndex(regT1, regT0, TimesEight), regT0);
1368
1369     done.link(this);
1370     emitPutVirtualRegister(dst);
1371 }
1372
1373 void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction)
1374 {
1375     auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
1376     int dst = bytecode.m_dst.offset();
1377     int enumerator = bytecode.m_enumerator.offset();
1378     int index = bytecode.m_index.offset();
1379
1380     emitGetVirtualRegister(index, regT0);
1381     emitGetVirtualRegister(enumerator, regT1);
1382     Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
1383
1384     move(TrustedImm64(JSValue::encode(jsNull())), regT0);
1385
1386     Jump done = jump();
1387     inBounds.link(this);
1388
1389     loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
1390     signExtend32ToPtr(regT0, regT0);
1391     load64(BaseIndex(regT1, regT0, TimesEight), regT0);
1392     
1393     done.link(this);
1394     emitPutVirtualRegister(dst);
1395 }
1396
1397 void JIT::emit_op_profile_type(const Instruction* currentInstruction)
1398 {
1399     auto bytecode = currentInstruction->as<OpProfileType>();
1400     auto& metadata = bytecode.metadata(m_codeBlock);
1401     TypeLocation* cachedTypeLocation = metadata.m_typeLocation;
1402     int valueToProfile = bytecode.m_targetVirtualRegister.offset();
1403
1404     emitGetVirtualRegister(valueToProfile, regT0);
1405
1406     JumpList jumpToEnd;
1407
1408     jumpToEnd.append(branchIfEmpty(regT0));
1409
1410     // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
1411     // These typechecks are inlined to match those of the 64-bit JSValue type checks.
1412     if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
1413         jumpToEnd.append(branchIfUndefined(regT0));
1414     else if (cachedTypeLocation->m_lastSeenType == TypeNull)
1415         jumpToEnd.append(branchIfNull(regT0));
1416     else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
1417         jumpToEnd.append(branchIfBoolean(regT0, regT1));
1418     else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
1419         jumpToEnd.append(branchIfInt32(regT0));
1420     else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
1421         jumpToEnd.append(branchIfNumber(regT0));
1422     else if (cachedTypeLocation->m_lastSeenType == TypeString) {
1423         Jump isNotCell = branchIfNotCell(regT0);
1424         jumpToEnd.append(branchIfString(regT0));
1425         isNotCell.link(this);
1426     }
1427
1428     // Load the type profiling log into T2.
1429     TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
1430     move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
1431     // Load the next log entry into T1.
1432     loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
1433
1434     // Store the JSValue onto the log entry.
1435     store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
1436
1437     // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
1438     Jump notCell = branchIfNotCell(regT0);
1439     load32(Address(regT0, JSCell::structureIDOffset()), regT0);
1440     store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
1441     Jump skipIsCell = jump();
1442     notCell.link(this);
1443     store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
1444     skipIsCell.link(this);
1445
1446     // Store the typeLocation on the log entry.
1447     move(TrustedImmPtr(cachedTypeLocation), regT0);
1448     store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
1449
1450     // Increment the current log entry.
1451     addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
1452     store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
1453     Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
1454     // Clear the log if we're at the end of the log.
1455     callOperation(operationProcessTypeProfilerLog);
1456     skipClearLog.link(this);
1457
1458     jumpToEnd.link(this);
1459 }
1460
1461 void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction)
1462 {
1463     RELEASE_ASSERT(vm()->shadowChicken());
1464     updateTopCallFrame();
1465     static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
1466     auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
1467     GPRReg shadowPacketReg = regT0;
1468     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
1469     GPRReg scratch2Reg = regT2;
1470     ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
1471     emitGetVirtualRegister(bytecode.m_scope.offset(), regT3);
1472     logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
1473 }
1474
1475 void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction)
1476 {
1477     RELEASE_ASSERT(vm()->shadowChicken());
1478     updateTopCallFrame();
1479     static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
1480     auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
1481     GPRReg shadowPacketReg = regT0;
1482     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
1483     GPRReg scratch2Reg = regT2;
1484     ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
1485     emitGetVirtualRegister(bytecode.m_thisValue.offset(), regT2);
1486     emitGetVirtualRegister(bytecode.m_scope.offset(), regT3);
1487     logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset));
1488 }
1489
1490 #endif // USE(JSVALUE64)
1491
1492 void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction)
1493 {
1494     auto bytecode = currentInstruction->as<OpProfileControlFlow>();
1495     auto& metadata = bytecode.metadata(m_codeBlock);
1496     BasicBlockLocation* basicBlockLocation = metadata.m_basicBlockLocation;
1497 #if USE(JSVALUE64)
1498     basicBlockLocation->emitExecuteCode(*this);
1499 #else
1500     basicBlockLocation->emitExecuteCode(*this, regT0);
1501 #endif
1502 }
1503
1504 void JIT::emit_op_argument_count(const Instruction* currentInstruction)
1505 {
1506     auto bytecode = currentInstruction->as<OpArgumentCount>();
1507     int dst = bytecode.m_dst.offset();
1508     load32(payloadFor(CallFrameSlot::argumentCount), regT0);
1509     sub32(TrustedImm32(1), regT0);
1510     JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1);
1511     boxInt32(regT0, result);
1512     emitPutVirtualRegister(dst, result);
1513 }
1514
1515 void JIT::emit_op_get_rest_length(const Instruction* currentInstruction)
1516 {
1517     auto bytecode = currentInstruction->as<OpGetRestLength>();
1518     int dst = bytecode.m_dst.offset();
1519     unsigned numParamsToSkip = bytecode.m_numParametersToSkip;
1520     load32(payloadFor(CallFrameSlot::argumentCount), regT0);
1521     sub32(TrustedImm32(1), regT0);
1522     Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
1523     sub32(Imm32(numParamsToSkip), regT0);
1524 #if USE(JSVALUE64)
1525     boxInt32(regT0, JSValueRegs(regT0));
1526 #endif
1527     Jump done = jump();
1528
1529     zeroLength.link(this);
1530 #if USE(JSVALUE64)
1531     move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
1532 #else
1533     move(TrustedImm32(0), regT0);
1534 #endif
1535
1536     done.link(this);
1537 #if USE(JSVALUE64)
1538     emitPutVirtualRegister(dst, regT0);
1539 #else
1540     move(TrustedImm32(JSValue::Int32Tag), regT1);
1541     emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
1542 #endif
1543 }
1544
1545 void JIT::emit_op_get_argument(const Instruction* currentInstruction)
1546 {
1547     auto bytecode = currentInstruction->as<OpGetArgument>();
1548     int dst = bytecode.m_dst.offset();
1549     int index = bytecode.m_index;
1550 #if USE(JSVALUE64)
1551     JSValueRegs resultRegs(regT0);
1552 #else
1553     JSValueRegs resultRegs(regT1, regT0);
1554 #endif
1555
1556     load32(payloadFor(CallFrameSlot::argumentCount), regT2);
1557     Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index));
1558     loadValue(addressFor(CallFrameSlot::thisArgument + index), resultRegs);
1559     Jump done = jump();
1560
1561     argumentOutOfBounds.link(this);
1562     moveValue(jsUndefined(), resultRegs);
1563
1564     done.link(this);
1565     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
1566     emitPutVirtualRegister(dst, resultRegs);
1567 }
1568
1569 } // namespace JSC
1570
1571 #endif // ENABLE(JIT)