AX: Progress: [Mac] Content in label element should be used as AXTitle or AXDescription
[WebKit-https.git] / Source / JavaScriptCore / jit / JITArithmetic.cpp
1 /*
2  * Copyright (C) 2008, 2015-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "ArithProfile.h"
32 #include "CodeBlock.h"
33 #include "JITAddGenerator.h"
34 #include "JITBitAndGenerator.h"
35 #include "JITBitOrGenerator.h"
36 #include "JITBitXorGenerator.h"
37 #include "JITDivGenerator.h"
38 #include "JITInlines.h"
39 #include "JITLeftShiftGenerator.h"
40 #include "JITMathIC.h"
41 #include "JITMulGenerator.h"
42 #include "JITNegGenerator.h"
43 #include "JITOperations.h"
44 #include "JITRightShiftGenerator.h"
45 #include "JITSubGenerator.h"
46 #include "JSArray.h"
47 #include "JSFunction.h"
48 #include "Interpreter.h"
49 #include "JSCInlines.h"
50 #include "LinkBuffer.h"
51 #include "ResultType.h"
52 #include "SlowPathCall.h"
53
54 namespace JSC {
55
56 void JIT::emit_op_jless(Instruction* currentInstruction)
57 {
58     int op1 = currentInstruction[1].u.operand;
59     int op2 = currentInstruction[2].u.operand;
60     unsigned target = currentInstruction[3].u.operand;
61
62     emit_compareAndJump(op_jless, op1, op2, target, LessThan);
63 }
64
65 void JIT::emit_op_jlesseq(Instruction* currentInstruction)
66 {
67     int op1 = currentInstruction[1].u.operand;
68     int op2 = currentInstruction[2].u.operand;
69     unsigned target = currentInstruction[3].u.operand;
70
71     emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
72 }
73
74 void JIT::emit_op_jgreater(Instruction* currentInstruction)
75 {
76     int op1 = currentInstruction[1].u.operand;
77     int op2 = currentInstruction[2].u.operand;
78     unsigned target = currentInstruction[3].u.operand;
79
80     emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
81 }
82
83 void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
84 {
85     int op1 = currentInstruction[1].u.operand;
86     int op2 = currentInstruction[2].u.operand;
87     unsigned target = currentInstruction[3].u.operand;
88
89     emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
90 }
91
92 void JIT::emit_op_jnless(Instruction* currentInstruction)
93 {
94     int op1 = currentInstruction[1].u.operand;
95     int op2 = currentInstruction[2].u.operand;
96     unsigned target = currentInstruction[3].u.operand;
97
98     emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
99 }
100
101 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
102 {
103     int op1 = currentInstruction[1].u.operand;
104     int op2 = currentInstruction[2].u.operand;
105     unsigned target = currentInstruction[3].u.operand;
106
107     emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
108 }
109
110 void JIT::emit_op_jngreater(Instruction* currentInstruction)
111 {
112     int op1 = currentInstruction[1].u.operand;
113     int op2 = currentInstruction[2].u.operand;
114     unsigned target = currentInstruction[3].u.operand;
115
116     emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
117 }
118
119 void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
120 {
121     int op1 = currentInstruction[1].u.operand;
122     int op2 = currentInstruction[2].u.operand;
123     unsigned target = currentInstruction[3].u.operand;
124
125     emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
126 }
127
128 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
129 {
130     int op1 = currentInstruction[1].u.operand;
131     int op2 = currentInstruction[2].u.operand;
132     unsigned target = currentInstruction[3].u.operand;
133
134     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, operationCompareLess, false, iter);
135 }
136
137 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
138 {
139     int op1 = currentInstruction[1].u.operand;
140     int op2 = currentInstruction[2].u.operand;
141     unsigned target = currentInstruction[3].u.operand;
142
143     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, operationCompareLessEq, false, iter);
144 }
145
146 void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
147 {
148     int op1 = currentInstruction[1].u.operand;
149     int op2 = currentInstruction[2].u.operand;
150     unsigned target = currentInstruction[3].u.operand;
151
152     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, operationCompareGreater, false, iter);
153 }
154
155 void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
156 {
157     int op1 = currentInstruction[1].u.operand;
158     int op2 = currentInstruction[2].u.operand;
159     unsigned target = currentInstruction[3].u.operand;
160
161     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter);
162 }
163
164 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
165 {
166     int op1 = currentInstruction[1].u.operand;
167     int op2 = currentInstruction[2].u.operand;
168     unsigned target = currentInstruction[3].u.operand;
169
170     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
171 }
172
173 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
174 {
175     int op1 = currentInstruction[1].u.operand;
176     int op2 = currentInstruction[2].u.operand;
177     unsigned target = currentInstruction[3].u.operand;
178
179     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
180 }
181
182 void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
183 {
184     int op1 = currentInstruction[1].u.operand;
185     int op2 = currentInstruction[2].u.operand;
186     unsigned target = currentInstruction[3].u.operand;
187
188     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
189 }
190
191 void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
192 {
193     int op1 = currentInstruction[1].u.operand;
194     int op2 = currentInstruction[2].u.operand;
195     unsigned target = currentInstruction[3].u.operand;
196
197     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
198 }
199
200 #if USE(JSVALUE64)
201
202 void JIT::emit_op_unsigned(Instruction* currentInstruction)
203 {
204     int result = currentInstruction[1].u.operand;
205     int op1 = currentInstruction[2].u.operand;
206     
207     emitGetVirtualRegister(op1, regT0);
208     emitJumpSlowCaseIfNotInt(regT0);
209     addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
210     emitTagInt(regT0, regT0);
211     emitPutVirtualRegister(result, regT0);
212 }
213
214 void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
215 {
216     linkSlowCase(iter);
217     linkSlowCase(iter);
218     
219     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
220     slowPathCall.call();
221 }
222
223 void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition condition)
224 {
225     // We generate inline code for the following cases in the fast path:
226     // - int immediate to constant int immediate
227     // - constant int immediate to int immediate
228     // - int immediate to int immediate
229
230     if (isOperandConstantChar(op1)) {
231         emitGetVirtualRegister(op2, regT0);
232         addSlowCase(emitJumpIfNotJSCell(regT0));
233         JumpList failures;
234         emitLoadCharacterString(regT0, regT0, failures);
235         addSlowCase(failures);
236         addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
237         return;
238     }
239     if (isOperandConstantChar(op2)) {
240         emitGetVirtualRegister(op1, regT0);
241         addSlowCase(emitJumpIfNotJSCell(regT0));
242         JumpList failures;
243         emitLoadCharacterString(regT0, regT0, failures);
244         addSlowCase(failures);
245         addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
246         return;
247     }
248     if (isOperandConstantInt(op2)) {
249         emitGetVirtualRegister(op1, regT0);
250         emitJumpSlowCaseIfNotInt(regT0);
251         int32_t op2imm = getOperandConstantInt(op2);
252         addJump(branch32(condition, regT0, Imm32(op2imm)), target);
253     } else if (isOperandConstantInt(op1)) {
254         emitGetVirtualRegister(op2, regT1);
255         emitJumpSlowCaseIfNotInt(regT1);
256         int32_t op1imm = getOperandConstantInt(op1);
257         addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
258     } else {
259         emitGetVirtualRegisters(op1, regT0, op2, regT1);
260         emitJumpSlowCaseIfNotInt(regT0);
261         emitJumpSlowCaseIfNotInt(regT1);
262
263         addJump(branch32(condition, regT0, regT1), target);
264     }
265 }
266
267 void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
268 {
269     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
270     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
271     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless);
272     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless);
273     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless);
274     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless);
275     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless);
276     
277     // We generate inline code for the following cases in the slow path:
278     // - floating-point number to constant int immediate
279     // - constant int immediate to floating-point number
280     // - floating-point number to floating-point number.
281     if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
282         linkSlowCase(iter);
283         linkSlowCase(iter);
284         linkSlowCase(iter);
285         linkSlowCase(iter);
286
287         emitGetVirtualRegister(op1, argumentGPR0);
288         emitGetVirtualRegister(op2, argumentGPR1);
289         callOperation(operation, argumentGPR0, argumentGPR1);
290         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
291         return;
292     }
293
294     if (isOperandConstantInt(op2)) {
295         linkSlowCase(iter);
296
297         if (supportsFloatingPoint()) {
298             Jump fail1 = emitJumpIfNotNumber(regT0);
299             add64(tagTypeNumberRegister, regT0);
300             move64ToDouble(regT0, fpRegT0);
301
302             int32_t op2imm = getConstantOperand(op2).asInt32();
303
304             move(Imm32(op2imm), regT1);
305             convertInt32ToDouble(regT1, fpRegT1);
306
307             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
308
309             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
310
311             fail1.link(this);
312         }
313
314         emitGetVirtualRegister(op2, regT1);
315         callOperation(operation, regT0, regT1);
316         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
317     } else if (isOperandConstantInt(op1)) {
318         linkSlowCase(iter);
319
320         if (supportsFloatingPoint()) {
321             Jump fail1 = emitJumpIfNotNumber(regT1);
322             add64(tagTypeNumberRegister, regT1);
323             move64ToDouble(regT1, fpRegT1);
324
325             int32_t op1imm = getConstantOperand(op1).asInt32();
326
327             move(Imm32(op1imm), regT0);
328             convertInt32ToDouble(regT0, fpRegT0);
329
330             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
331
332             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
333
334             fail1.link(this);
335         }
336
337         emitGetVirtualRegister(op1, regT2);
338         callOperation(operation, regT2, regT1);
339         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
340     } else {
341         linkSlowCase(iter);
342
343         if (supportsFloatingPoint()) {
344             Jump fail1 = emitJumpIfNotNumber(regT0);
345             Jump fail2 = emitJumpIfNotNumber(regT1);
346             Jump fail3 = emitJumpIfInt(regT1);
347             add64(tagTypeNumberRegister, regT0);
348             add64(tagTypeNumberRegister, regT1);
349             move64ToDouble(regT0, fpRegT0);
350             move64ToDouble(regT1, fpRegT1);
351
352             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
353
354             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
355
356             fail1.link(this);
357             fail2.link(this);
358             fail3.link(this);
359         }
360
361         linkSlowCase(iter);
362         callOperation(operation, regT0, regT1);
363         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
364     }
365 }
366
367 void JIT::emit_op_inc(Instruction* currentInstruction)
368 {
369     int srcDst = currentInstruction[1].u.operand;
370
371     emitGetVirtualRegister(srcDst, regT0);
372     emitJumpSlowCaseIfNotInt(regT0);
373     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
374     emitTagInt(regT0, regT0);
375     emitPutVirtualRegister(srcDst);
376 }
377
378 void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
379 {
380     linkSlowCase(iter);
381     linkSlowCase(iter);
382     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
383     slowPathCall.call();
384 }
385
386 void JIT::emit_op_dec(Instruction* currentInstruction)
387 {
388     int srcDst = currentInstruction[1].u.operand;
389
390     emitGetVirtualRegister(srcDst, regT0);
391     emitJumpSlowCaseIfNotInt(regT0);
392     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
393     emitTagInt(regT0, regT0);
394     emitPutVirtualRegister(srcDst);
395 }
396
397 void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
398 {
399     linkSlowCase(iter);
400     linkSlowCase(iter);
401     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
402     slowPathCall.call();
403 }
404
405 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
406
407 #if CPU(X86) || CPU(X86_64)
408
409 void JIT::emit_op_mod(Instruction* currentInstruction)
410 {
411     int result = currentInstruction[1].u.operand;
412     int op1 = currentInstruction[2].u.operand;
413     int op2 = currentInstruction[3].u.operand;
414
415     // Make sure registers are correct for x86 IDIV instructions.
416     ASSERT(regT0 == X86Registers::eax);
417     auto edx = X86Registers::edx;
418     auto ecx = X86Registers::ecx;
419     ASSERT(regT4 != edx);
420     ASSERT(regT4 != ecx);
421
422     emitGetVirtualRegisters(op1, regT4, op2, ecx);
423     emitJumpSlowCaseIfNotInt(regT4);
424     emitJumpSlowCaseIfNotInt(ecx);
425
426     move(regT4, regT0);
427     addSlowCase(branchTest32(Zero, ecx));
428     Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
429     addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
430     denominatorNotNeg1.link(this);
431     x86ConvertToDoubleWord32();
432     x86Div32(ecx);
433     Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
434     addSlowCase(branchTest32(Zero, edx));
435     numeratorPositive.link(this);
436     emitTagInt(edx, regT0);
437     emitPutVirtualRegister(result);
438 }
439
440 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
441 {
442     linkSlowCase(iter);
443     linkSlowCase(iter);
444     linkSlowCase(iter);
445     linkSlowCase(iter);
446     linkSlowCase(iter);
447     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
448     slowPathCall.call();
449 }
450
451 #else // CPU(X86) || CPU(X86_64)
452
453 void JIT::emit_op_mod(Instruction* currentInstruction)
454 {
455     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
456     slowPathCall.call();
457 }
458
459 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
460 {
461     UNREACHABLE_FOR_PLATFORM();
462 }
463
464 #endif // CPU(X86) || CPU(X86_64)
465
466 /* ------------------------------ END: OP_MOD ------------------------------ */
467
468 #endif // USE(JSVALUE64)
469
470 void JIT::emit_op_negate(Instruction* currentInstruction)
471 {
472     JITNegIC* negateIC = m_codeBlock->addJITNegIC();
473     m_instructionToMathIC.add(currentInstruction, negateIC);
474     emitMathICFast(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
475 }
476
477 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
478 {
479     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
480
481     JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction));
482     emitMathICSlow(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize);
483 }
484
485 template<typename SnippetGenerator>
486 void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction)
487 {
488     int result = currentInstruction[1].u.operand;
489     int op1 = currentInstruction[2].u.operand;
490     int op2 = currentInstruction[3].u.operand;
491
492 #if USE(JSVALUE64)
493     JSValueRegs leftRegs = JSValueRegs(regT0);
494     JSValueRegs rightRegs = JSValueRegs(regT1);
495     JSValueRegs resultRegs = leftRegs;
496     GPRReg scratchGPR = regT2;
497 #else
498     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
499     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
500     JSValueRegs resultRegs = leftRegs;
501     GPRReg scratchGPR = regT4;
502 #endif
503
504     SnippetOperand leftOperand;
505     SnippetOperand rightOperand;
506
507     if (isOperandConstantInt(op1))
508         leftOperand.setConstInt32(getOperandConstantInt(op1));
509     else if (isOperandConstantInt(op2))
510         rightOperand.setConstInt32(getOperandConstantInt(op2));
511
512     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
513
514     if (!leftOperand.isConst())
515         emitGetVirtualRegister(op1, leftRegs);
516     if (!rightOperand.isConst())
517         emitGetVirtualRegister(op2, rightRegs);
518
519     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
520
521     gen.generateFastPath(*this);
522
523     ASSERT(gen.didEmitFastPath());
524     gen.endJumpList().link(this);
525     emitPutVirtualRegister(result, resultRegs);
526
527     addSlowCase(gen.slowPathJumpList());
528 }
529
530 void JIT::emit_op_bitand(Instruction* currentInstruction)
531 {
532     emitBitBinaryOpFastPath<JITBitAndGenerator>(currentInstruction);
533 }
534
535 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
536 {
537     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
538
539     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
540     slowPathCall.call();
541 }
542
543 void JIT::emit_op_bitor(Instruction* currentInstruction)
544 {
545     emitBitBinaryOpFastPath<JITBitOrGenerator>(currentInstruction);
546 }
547
548 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
549 {
550     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
551
552     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
553     slowPathCall.call();
554 }
555
556 void JIT::emit_op_bitxor(Instruction* currentInstruction)
557 {
558     emitBitBinaryOpFastPath<JITBitXorGenerator>(currentInstruction);
559 }
560
561 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
562 {
563     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
564
565     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
566     slowPathCall.call();
567 }
568
569 void JIT::emit_op_lshift(Instruction* currentInstruction)
570 {
571     emitBitBinaryOpFastPath<JITLeftShiftGenerator>(currentInstruction);
572 }
573
574 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
575 {
576     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
577
578     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
579     slowPathCall.call();
580 }
581
582 void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcodeID)
583 {
584     ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
585
586     JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ?
587         JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
588
589     int result = currentInstruction[1].u.operand;
590     int op1 = currentInstruction[2].u.operand;
591     int op2 = currentInstruction[3].u.operand;
592
593 #if USE(JSVALUE64)
594     JSValueRegs leftRegs = JSValueRegs(regT0);
595     JSValueRegs rightRegs = JSValueRegs(regT1);
596     JSValueRegs resultRegs = leftRegs;
597     GPRReg scratchGPR = regT2;
598     FPRReg scratchFPR = InvalidFPRReg;
599 #else
600     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
601     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
602     JSValueRegs resultRegs = leftRegs;
603     GPRReg scratchGPR = regT4;
604     FPRReg scratchFPR = fpRegT2;
605 #endif
606
607     SnippetOperand leftOperand;
608     SnippetOperand rightOperand;
609
610     if (isOperandConstantInt(op1))
611         leftOperand.setConstInt32(getOperandConstantInt(op1));
612     else if (isOperandConstantInt(op2))
613         rightOperand.setConstInt32(getOperandConstantInt(op2));
614
615     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
616
617     if (!leftOperand.isConst())
618         emitGetVirtualRegister(op1, leftRegs);
619     if (!rightOperand.isConst())
620         emitGetVirtualRegister(op2, rightRegs);
621
622     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
623         fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
624
625     gen.generateFastPath(*this);
626
627     ASSERT(gen.didEmitFastPath());
628     gen.endJumpList().link(this);
629     emitPutVirtualRegister(result, resultRegs);
630
631     addSlowCase(gen.slowPathJumpList());
632 }
633
634 void JIT::emit_op_rshift(Instruction* currentInstruction)
635 {
636     emitRightShiftFastPath(currentInstruction, op_rshift);
637 }
638
639 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
640 {
641     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
642
643     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
644     slowPathCall.call();
645 }
646
647 void JIT::emit_op_urshift(Instruction* currentInstruction)
648 {
649     emitRightShiftFastPath(currentInstruction, op_urshift);
650 }
651
652 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
653 {
654     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
655
656     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
657     slowPathCall.call();
658 }
659
660 ALWAYS_INLINE static OperandTypes getOperandTypes(Instruction* instruction)
661 {
662     return OperandTypes(ArithProfile::fromInt(instruction[4].u.operand).lhsResultType(), ArithProfile::fromInt(instruction[4].u.operand).rhsResultType());
663 }
664
665 void JIT::emit_op_add(Instruction* currentInstruction)
666 {
667     JITAddIC* addIC = m_codeBlock->addJITAddIC();
668     m_instructionToMathIC.add(currentInstruction, addIC);
669     emitMathICFast(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
670 }
671
672 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
673 {
674     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
675
676     JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction));
677     emitMathICSlow(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize);
678 }
679
680 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
681 void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
682 {
683     int result = currentInstruction[1].u.operand;
684     int operand = currentInstruction[2].u.operand;
685
686 #if USE(JSVALUE64)
687     // ArithNegate benefits from using the same register as src and dst.
688     // Since regT1==argumentGPR1, using regT1 avoid shuffling register to call the slow path.
689     JSValueRegs srcRegs = JSValueRegs(regT1);
690     JSValueRegs resultRegs = JSValueRegs(regT1);
691     GPRReg scratchGPR = regT2;
692 #else
693     JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
694     JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
695     GPRReg scratchGPR = regT4;
696 #endif
697
698 #if ENABLE(MATH_IC_STATS)
699     auto inlineStart = label();
700 #endif
701
702     ArithProfile& arithProfile = *bitwise_cast<ArithProfile*>(&currentInstruction[3].u.operand);
703     mathIC->m_generator = Generator(resultRegs, srcRegs, scratchGPR, arithProfile);
704
705     emitGetVirtualRegister(operand, srcRegs);
706
707     MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
708
709     bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
710     if (!generatedInlineCode) {
711         if (shouldEmitProfiling())
712             callOperation(profiledFunction, resultRegs, srcRegs, &arithProfile);
713         else
714             callOperation(nonProfiledFunction, resultRegs, srcRegs);
715     } else
716         addSlowCase(mathICGenerationState.slowPathJumps);
717
718 #if ENABLE(MATH_IC_STATS)
719     auto inlineEnd = label();
720     addLinkTask([=] (LinkBuffer& linkBuffer) {
721         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
722         mathIC->m_generatedCodeSize += size;
723     });
724 #endif
725
726     emitPutVirtualRegister(result, resultRegs);
727 }
728
729 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
730 void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
731 {
732     int result = currentInstruction[1].u.operand;
733     int op1 = currentInstruction[2].u.operand;
734     int op2 = currentInstruction[3].u.operand;
735
736 #if USE(JSVALUE64)
737     OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
738     JSValueRegs leftRegs = JSValueRegs(regT0);
739     JSValueRegs rightRegs = JSValueRegs(regT1);
740     JSValueRegs resultRegs = JSValueRegs(regT2);
741     GPRReg scratchGPR = regT3;
742     FPRReg scratchFPR = fpRegT2;
743 #else
744     OperandTypes types = getOperandTypes(currentInstruction);
745     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
746     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
747     JSValueRegs resultRegs = leftRegs;
748     GPRReg scratchGPR = regT4;
749     FPRReg scratchFPR = fpRegT2;
750 #endif
751
752     ArithProfile* arithProfile = nullptr;
753     if (shouldEmitProfiling())
754         arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
755
756     SnippetOperand leftOperand(types.first());
757     SnippetOperand rightOperand(types.second());
758
759     if (isOperandConstantInt(op1))
760         leftOperand.setConstInt32(getOperandConstantInt(op1));
761     else if (isOperandConstantInt(op2))
762         rightOperand.setConstInt32(getOperandConstantInt(op2));
763
764     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
765
766     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
767     
768     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
769     
770     if (!Generator::isLeftOperandValidConstant(leftOperand))
771         emitGetVirtualRegister(op1, leftRegs);
772     if (!Generator::isRightOperandValidConstant(rightOperand))
773         emitGetVirtualRegister(op2, rightRegs);
774
775 #if ENABLE(MATH_IC_STATS)
776     auto inlineStart = label();
777 #endif
778
779     MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
780
781     bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
782     if (!generatedInlineCode) {
783         if (leftOperand.isConst())
784             emitGetVirtualRegister(op1, leftRegs);
785         else if (rightOperand.isConst())
786             emitGetVirtualRegister(op2, rightRegs);
787         if (arithProfile)
788             callOperation(profiledFunction, resultRegs, leftRegs, rightRegs, arithProfile);
789         else
790             callOperation(nonProfiledFunction, resultRegs, leftRegs, rightRegs);
791     } else
792         addSlowCase(mathICGenerationState.slowPathJumps);
793
794 #if ENABLE(MATH_IC_STATS)
795     auto inlineEnd = label();
796     addLinkTask([=] (LinkBuffer& linkBuffer) {
797         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
798         mathIC->m_generatedCodeSize += size;
799     });
800 #endif
801
802     emitPutVirtualRegister(result, resultRegs);
803 }
804
805 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
806 void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
807 {
808     MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
809     mathICGenerationState.slowPathStart = label();
810
811     int result = currentInstruction[1].u.operand;
812
813 #if USE(JSVALUE64)
814     JSValueRegs srcRegs = JSValueRegs(regT1);
815     JSValueRegs resultRegs = JSValueRegs(regT0);
816 #else
817     JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
818     JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
819 #endif
820
821 #if ENABLE(MATH_IC_STATS)
822     auto slowPathStart = label();
823 #endif
824
825     if (shouldEmitProfiling()) {
826         ArithProfile* arithProfile = bitwise_cast<ArithProfile*>(&currentInstruction[3].u.operand);
827         if (mathICGenerationState.shouldSlowPathRepatch)
828             mathICGenerationState.slowPathCall = callOperation(reinterpret_cast<J_JITOperation_EJMic>(profiledRepatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
829         else
830             mathICGenerationState.slowPathCall = callOperation(profiledFunction, resultRegs, srcRegs, arithProfile);
831     } else
832         mathICGenerationState.slowPathCall = callOperation(reinterpret_cast<J_JITOperation_EJMic>(repatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
833
834 #if ENABLE(MATH_IC_STATS)
835     auto slowPathEnd = label();
836     addLinkTask([=] (LinkBuffer& linkBuffer) {
837         size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
838         mathIC->m_generatedCodeSize += size;
839     });
840 #endif
841
842     emitPutVirtualRegister(result, resultRegs);
843
844     addLinkTask([=] (LinkBuffer& linkBuffer) {
845         MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
846         mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
847     });
848 }
849
850 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
851 void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
852 {
853     MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
854     mathICGenerationState.slowPathStart = label();
855
856     int result = currentInstruction[1].u.operand;
857     int op1 = currentInstruction[2].u.operand;
858     int op2 = currentInstruction[3].u.operand;
859
860 #if USE(JSVALUE64)
861     OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
862     JSValueRegs leftRegs = JSValueRegs(regT0);
863     JSValueRegs rightRegs = JSValueRegs(regT1);
864     JSValueRegs resultRegs = JSValueRegs(regT2);
865 #else
866     OperandTypes types = getOperandTypes(currentInstruction);
867     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
868     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
869     JSValueRegs resultRegs = leftRegs;
870 #endif
871     
872     SnippetOperand leftOperand(types.first());
873     SnippetOperand rightOperand(types.second());
874
875     if (isOperandConstantInt(op1))
876         leftOperand.setConstInt32(getOperandConstantInt(op1));
877     else if (isOperandConstantInt(op2))
878         rightOperand.setConstInt32(getOperandConstantInt(op2));
879
880     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
881
882     if (Generator::isLeftOperandValidConstant(leftOperand))
883         emitGetVirtualRegister(op1, leftRegs);
884     else if (Generator::isRightOperandValidConstant(rightOperand))
885         emitGetVirtualRegister(op2, rightRegs);
886
887 #if ENABLE(MATH_IC_STATS)
888     auto slowPathStart = label();
889 #endif
890
891     if (shouldEmitProfiling()) {
892         ArithProfile& arithProfile = *m_codeBlock->arithProfileForPC(currentInstruction);
893         if (mathICGenerationState.shouldSlowPathRepatch)
894             mathICGenerationState.slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJArpMic>(profiledRepatchFunction), resultRegs, leftRegs, rightRegs, &arithProfile, TrustedImmPtr(mathIC));
895         else
896             mathICGenerationState.slowPathCall = callOperation(profiledFunction, resultRegs, leftRegs, rightRegs, &arithProfile);
897     } else
898         mathICGenerationState.slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchFunction), resultRegs, leftRegs, rightRegs, TrustedImmPtr(mathIC));
899
900 #if ENABLE(MATH_IC_STATS)
901     auto slowPathEnd = label();
902     addLinkTask([=] (LinkBuffer& linkBuffer) {
903         size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
904         mathIC->m_generatedCodeSize += size;
905     });
906 #endif
907
908     emitPutVirtualRegister(result, resultRegs);
909
910     addLinkTask([=] (LinkBuffer& linkBuffer) {
911         MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
912         mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
913     });
914 }
915
916 void JIT::emit_op_div(Instruction* currentInstruction)
917 {
918     int result = currentInstruction[1].u.operand;
919     int op1 = currentInstruction[2].u.operand;
920     int op2 = currentInstruction[3].u.operand;
921
922 #if USE(JSVALUE64)
923     OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
924     JSValueRegs leftRegs = JSValueRegs(regT0);
925     JSValueRegs rightRegs = JSValueRegs(regT1);
926     JSValueRegs resultRegs = leftRegs;
927     GPRReg scratchGPR = regT2;
928 #else
929     OperandTypes types = getOperandTypes(currentInstruction);
930     JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
931     JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
932     JSValueRegs resultRegs = leftRegs;
933     GPRReg scratchGPR = regT4;
934 #endif
935     FPRReg scratchFPR = fpRegT2;
936
937     ArithProfile* arithProfile = nullptr;
938     if (shouldEmitProfiling())
939         arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
940
941     SnippetOperand leftOperand(types.first());
942     SnippetOperand rightOperand(types.second());
943
944     if (isOperandConstantInt(op1))
945         leftOperand.setConstInt32(getOperandConstantInt(op1));
946 #if USE(JSVALUE64)
947     else if (isOperandConstantDouble(op1))
948         leftOperand.setConstDouble(getOperandConstantDouble(op1));
949 #endif
950     else if (isOperandConstantInt(op2))
951         rightOperand.setConstInt32(getOperandConstantInt(op2));
952 #if USE(JSVALUE64)
953     else if (isOperandConstantDouble(op2))
954         rightOperand.setConstDouble(getOperandConstantDouble(op2));
955 #endif
956
957     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
958
959     if (!leftOperand.isConst())
960         emitGetVirtualRegister(op1, leftRegs);
961     if (!rightOperand.isConst())
962         emitGetVirtualRegister(op2, rightRegs);
963
964     JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
965         fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
966
967     gen.generateFastPath(*this);
968
969     if (gen.didEmitFastPath()) {
970         gen.endJumpList().link(this);
971         emitPutVirtualRegister(result, resultRegs);
972
973         addSlowCase(gen.slowPathJumpList());
974     } else {
975         ASSERT(gen.endJumpList().empty());
976         ASSERT(gen.slowPathJumpList().empty());
977         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
978         slowPathCall.call();
979     }
980 }
981
982 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
983 {
984     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
985
986     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
987     slowPathCall.call();
988 }
989
990 void JIT::emit_op_mul(Instruction* currentInstruction)
991 {
992     JITMulIC* mulIC = m_codeBlock->addJITMulIC();
993     m_instructionToMathIC.add(currentInstruction, mulIC);
994     emitMathICFast(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
995 }
996
997 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
998 {
999     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
1000
1001     JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction));
1002     emitMathICSlow(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);
1003 }
1004
1005 void JIT::emit_op_sub(Instruction* currentInstruction)
1006 {
1007     JITSubIC* subIC = m_codeBlock->addJITSubIC();
1008     m_instructionToMathIC.add(currentInstruction, subIC);
1009     emitMathICFast(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
1010 }
1011
1012 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1013 {
1014     linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
1015     JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction));
1016     emitMathICSlow(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);
1017 }
1018
1019 void JIT::emit_op_pow(Instruction* currentInstruction)
1020 {
1021     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_pow);
1022     slowPathCall.call();
1023 }
1024
1025 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL, OP_POW ------------------------------ */
1026
1027 } // namespace JSC
1028
1029 #endif // ENABLE(JIT)