8930a0fb6a8e658747e848690d4f49df1c92dddc
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
35 #endif
36
37 #include "CodeBlock.h"
38 #include <wtf/CryptographicallyRandomNumber.h>
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlines.h"
42 #include "JITStubCall.h"
43 #include "JSArray.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
49
50 using namespace std;
51
52 namespace JSC {
53
54 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
55 {
56     RepatchBuffer repatchBuffer(codeblock);
57     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
58 }
59
60 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
61 {
62     RepatchBuffer repatchBuffer(codeblock);
63     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
64 }
65
66 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
67 {
68     RepatchBuffer repatchBuffer(codeblock);
69     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
70 }
71
72 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
73     : m_interpreter(globalData->interpreter)
74     , m_globalData(globalData)
75     , m_codeBlock(codeBlock)
76     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
77     , m_bytecodeOffset((unsigned)-1)
78     , m_propertyAccessInstructionIndex(UINT_MAX)
79     , m_byValInstructionIndex(UINT_MAX)
80     , m_globalResolveInfoIndex(UINT_MAX)
81     , m_callLinkInfoIndex(UINT_MAX)
82 #if USE(JSVALUE32_64)
83     , m_jumpTargetIndex(0)
84     , m_mappedBytecodeOffset((unsigned)-1)
85     , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
86     , m_mappedTag((RegisterID)-1)
87     , m_mappedPayload((RegisterID)-1)
88 #else
89     , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
90     , m_jumpTargetsPosition(0)
91 #endif
92 #if USE(OS_RANDOMNESS)
93     , m_randomGenerator(cryptographicallyRandomNumber())
94 #else
95     , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
96 #endif
97 #if ENABLE(VALUE_PROFILER)
98     , m_canBeOptimized(false)
99     , m_shouldEmitProfiling(false)
100 #endif
101 {
102 }
103
104 #if ENABLE(DFG_JIT)
105 void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
106 {
107     if (!canBeOptimized())
108         return;
109     
110     Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop() : Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
111     JITStubCall stubCall(this, cti_optimize);
112     stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
113     if (kind == EnterOptimizationCheck)
114         ASSERT(!m_bytecodeOffset);
115     stubCall.call();
116     skipOptimize.link(this);
117 }
118 #endif
119
120 #if CPU(X86)
121 void JIT::emitTimeoutCheck()
122 {
123     Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), AbsoluteAddress(&m_globalData->m_timeoutCount));
124     JITStubCall stubCall(this, cti_timeout_check);
125     stubCall.addArgument(regT1, regT0); // save last result registers.
126     stubCall.call(regT0);
127     store32(regT0, &m_globalData->m_timeoutCount);
128     stubCall.getArgument(0, regT1, regT0); // reload last result registers.
129     skipTimeout.link(this);
130 }
131 #elif USE(JSVALUE32_64)
132 void JIT::emitTimeoutCheck()
133 {
134     Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
135     JITStubCall stubCall(this, cti_timeout_check);
136     stubCall.addArgument(regT1, regT0); // save last result registers.
137     stubCall.call(timeoutCheckRegister);
138     stubCall.getArgument(0, regT1, regT0); // reload last result registers.
139     skipTimeout.link(this);
140 }
141 #else
142 void JIT::emitTimeoutCheck()
143 {
144     Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
145     JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
146     skipTimeout.link(this);
147
148     killLastResultRegister();
149 }
150 #endif
151
152 #define NEXT_OPCODE(name) \
153     m_bytecodeOffset += OPCODE_LENGTH(name); \
154     break;
155
156 #if USE(JSVALUE32_64)
157 #define DEFINE_BINARY_OP(name) \
158     case name: { \
159         JITStubCall stubCall(this, cti_##name); \
160         stubCall.addArgument(currentInstruction[2].u.operand); \
161         stubCall.addArgument(currentInstruction[3].u.operand); \
162         stubCall.call(currentInstruction[1].u.operand); \
163         NEXT_OPCODE(name); \
164     }
165
166 #define DEFINE_UNARY_OP(name) \
167     case name: { \
168         JITStubCall stubCall(this, cti_##name); \
169         stubCall.addArgument(currentInstruction[2].u.operand); \
170         stubCall.call(currentInstruction[1].u.operand); \
171         NEXT_OPCODE(name); \
172     }
173
174 #else // USE(JSVALUE32_64)
175
176 #define DEFINE_BINARY_OP(name) \
177     case name: { \
178         JITStubCall stubCall(this, cti_##name); \
179         stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
180         stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
181         stubCall.call(currentInstruction[1].u.operand); \
182         NEXT_OPCODE(name); \
183     }
184
185 #define DEFINE_UNARY_OP(name) \
186     case name: { \
187         JITStubCall stubCall(this, cti_##name); \
188         stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
189         stubCall.call(currentInstruction[1].u.operand); \
190         NEXT_OPCODE(name); \
191     }
192 #endif // USE(JSVALUE32_64)
193
194 #define DEFINE_OP(name) \
195     case name: { \
196         emit_##name(currentInstruction); \
197         NEXT_OPCODE(name); \
198     }
199
200 #define DEFINE_SLOWCASE_OP(name) \
201     case name: { \
202         emitSlow_##name(currentInstruction, iter); \
203         NEXT_OPCODE(name); \
204     }
205
206 void JIT::privateCompileMainPass()
207 {
208     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
209     unsigned instructionCount = m_codeBlock->instructions().size();
210
211     m_globalResolveInfoIndex = 0;
212     m_callLinkInfoIndex = 0;
213
214     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
215         if (m_disassembler)
216             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
217         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
218         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
219
220 #if ENABLE(OPCODE_SAMPLING)
221         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
222             sampleInstruction(currentInstruction);
223 #endif
224
225 #if USE(JSVALUE64)
226         if (atJumpTarget())
227             killLastResultRegister();
228 #endif
229
230         m_labels[m_bytecodeOffset] = label();
231
232 #if ENABLE(JIT_VERBOSE)
233         dataLog("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
234 #endif
235
236         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
237         DEFINE_BINARY_OP(op_del_by_val)
238         DEFINE_BINARY_OP(op_in)
239         DEFINE_BINARY_OP(op_less)
240         DEFINE_BINARY_OP(op_lesseq)
241         DEFINE_BINARY_OP(op_greater)
242         DEFINE_BINARY_OP(op_greatereq)
243         DEFINE_UNARY_OP(op_is_function)
244         DEFINE_UNARY_OP(op_is_object)
245         DEFINE_UNARY_OP(op_typeof)
246
247         DEFINE_OP(op_add)
248         DEFINE_OP(op_bitand)
249         DEFINE_OP(op_bitor)
250         DEFINE_OP(op_bitxor)
251         DEFINE_OP(op_call)
252         DEFINE_OP(op_call_eval)
253         DEFINE_OP(op_call_varargs)
254         DEFINE_OP(op_catch)
255         DEFINE_OP(op_construct)
256         DEFINE_OP(op_get_callee)
257         DEFINE_OP(op_create_this)
258         DEFINE_OP(op_convert_this)
259         DEFINE_OP(op_init_lazy_reg)
260         DEFINE_OP(op_create_arguments)
261         DEFINE_OP(op_debug)
262         DEFINE_OP(op_del_by_id)
263         DEFINE_OP(op_div)
264         DEFINE_OP(op_end)
265         DEFINE_OP(op_enter)
266         DEFINE_OP(op_create_activation)
267         DEFINE_OP(op_eq)
268         DEFINE_OP(op_eq_null)
269         case op_get_by_id_out_of_line:
270         case op_get_array_length:
271         DEFINE_OP(op_get_by_id)
272         DEFINE_OP(op_get_arguments_length)
273         DEFINE_OP(op_get_by_val)
274         DEFINE_OP(op_get_argument_by_val)
275         DEFINE_OP(op_get_by_pname)
276         DEFINE_OP(op_get_pnames)
277         DEFINE_OP(op_check_has_instance)
278         DEFINE_OP(op_instanceof)
279         DEFINE_OP(op_is_undefined)
280         DEFINE_OP(op_is_boolean)
281         DEFINE_OP(op_is_number)
282         DEFINE_OP(op_is_string)
283         DEFINE_OP(op_jeq_null)
284         DEFINE_OP(op_jfalse)
285         DEFINE_OP(op_jmp)
286         DEFINE_OP(op_jmp_scopes)
287         DEFINE_OP(op_jneq_null)
288         DEFINE_OP(op_jneq_ptr)
289         DEFINE_OP(op_jless)
290         DEFINE_OP(op_jlesseq)
291         DEFINE_OP(op_jgreater)
292         DEFINE_OP(op_jgreatereq)
293         DEFINE_OP(op_jnless)
294         DEFINE_OP(op_jnlesseq)
295         DEFINE_OP(op_jngreater)
296         DEFINE_OP(op_jngreatereq)
297         DEFINE_OP(op_jtrue)
298         DEFINE_OP(op_loop)
299         DEFINE_OP(op_loop_hint)
300         DEFINE_OP(op_loop_if_less)
301         DEFINE_OP(op_loop_if_lesseq)
302         DEFINE_OP(op_loop_if_greater)
303         DEFINE_OP(op_loop_if_greatereq)
304         DEFINE_OP(op_loop_if_true)
305         DEFINE_OP(op_loop_if_false)
306         DEFINE_OP(op_lshift)
307         DEFINE_OP(op_mod)
308         DEFINE_OP(op_mov)
309         DEFINE_OP(op_mul)
310         DEFINE_OP(op_negate)
311         DEFINE_OP(op_neq)
312         DEFINE_OP(op_neq_null)
313         DEFINE_OP(op_new_array)
314         DEFINE_OP(op_new_array_with_size)
315         DEFINE_OP(op_new_array_buffer)
316         DEFINE_OP(op_new_func)
317         DEFINE_OP(op_new_func_exp)
318         DEFINE_OP(op_new_object)
319         DEFINE_OP(op_new_regexp)
320         DEFINE_OP(op_next_pname)
321         DEFINE_OP(op_not)
322         DEFINE_OP(op_nstricteq)
323         DEFINE_OP(op_pop_scope)
324         DEFINE_OP(op_post_dec)
325         DEFINE_OP(op_post_inc)
326         DEFINE_OP(op_pre_dec)
327         DEFINE_OP(op_pre_inc)
328         DEFINE_OP(op_profile_did_call)
329         DEFINE_OP(op_profile_will_call)
330         DEFINE_OP(op_push_name_scope)
331         DEFINE_OP(op_push_with_scope)
332         case op_put_by_id_out_of_line:
333         case op_put_by_id_transition_direct:
334         case op_put_by_id_transition_normal:
335         case op_put_by_id_transition_direct_out_of_line:
336         case op_put_by_id_transition_normal_out_of_line:
337         DEFINE_OP(op_put_by_id)
338         DEFINE_OP(op_put_by_index)
339         DEFINE_OP(op_put_by_val)
340         DEFINE_OP(op_put_getter_setter)
341         case op_init_global_const_nop:
342             NEXT_OPCODE(op_init_global_const_nop);
343         DEFINE_OP(op_init_global_const)
344         DEFINE_OP(op_init_global_const_check)
345
346         case op_resolve_global_property:
347         case op_resolve_global_var:
348         case op_resolve_scoped_var:
349         case op_resolve_scoped_var_on_top_scope:
350         case op_resolve_scoped_var_with_top_scope_check:
351         DEFINE_OP(op_resolve)
352
353         case op_resolve_base_to_global:
354         case op_resolve_base_to_global_dynamic:
355         case op_resolve_base_to_scope:
356         case op_resolve_base_to_scope_with_top_scope_check:
357         DEFINE_OP(op_resolve_base)
358
359         case op_put_to_base_variable:
360         DEFINE_OP(op_put_to_base)
361
362         DEFINE_OP(op_ensure_property_exists)
363         DEFINE_OP(op_resolve_with_base)
364         DEFINE_OP(op_resolve_with_this)
365         DEFINE_OP(op_ret)
366         DEFINE_OP(op_call_put_result)
367         DEFINE_OP(op_ret_object_or_this)
368         DEFINE_OP(op_rshift)
369         DEFINE_OP(op_urshift)
370         DEFINE_OP(op_strcat)
371         DEFINE_OP(op_stricteq)
372         DEFINE_OP(op_sub)
373         DEFINE_OP(op_switch_char)
374         DEFINE_OP(op_switch_imm)
375         DEFINE_OP(op_switch_string)
376         DEFINE_OP(op_tear_off_activation)
377         DEFINE_OP(op_tear_off_arguments)
378         DEFINE_OP(op_throw)
379         DEFINE_OP(op_throw_static_error)
380         DEFINE_OP(op_to_jsnumber)
381         DEFINE_OP(op_to_primitive)
382
383         case op_get_by_id_chain:
384         case op_get_by_id_generic:
385         case op_get_by_id_proto:
386         case op_get_by_id_self:
387         case op_get_by_id_getter_chain:
388         case op_get_by_id_getter_proto:
389         case op_get_by_id_getter_self:
390         case op_get_by_id_custom_chain:
391         case op_get_by_id_custom_proto:
392         case op_get_by_id_custom_self:
393         case op_get_string_length:
394         case op_put_by_id_generic:
395         case op_put_by_id_replace:
396         case op_put_by_id_transition:
397             ASSERT_NOT_REACHED();
398         }
399     }
400
401     ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
402
403 #ifndef NDEBUG
404     // Reset this, in order to guard its use with ASSERTs.
405     m_bytecodeOffset = (unsigned)-1;
406 #endif
407 }
408
409 void JIT::privateCompileLinkPass()
410 {
411     unsigned jmpTableCount = m_jmpTable.size();
412     for (unsigned i = 0; i < jmpTableCount; ++i)
413         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
414     m_jmpTable.clear();
415 }
416
417 void JIT::privateCompileSlowCases()
418 {
419     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
420
421     m_propertyAccessInstructionIndex = 0;
422     m_byValInstructionIndex = 0;
423     m_globalResolveInfoIndex = 0;
424     m_callLinkInfoIndex = 0;
425     
426 #if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
427     // Use this to assert that slow-path code associates new profiling sites with existing
428     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
429     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
430     // instructions and the slow-path executions. Furthermore, if the slow-path code created
431     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
432     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
433     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
434 #endif
435
436     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
437 #if USE(JSVALUE64)
438         killLastResultRegister();
439 #endif
440
441         m_bytecodeOffset = iter->to;
442 #ifndef NDEBUG
443         unsigned firstTo = m_bytecodeOffset;
444 #endif
445         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
446         
447 #if ENABLE(VALUE_PROFILER)
448         RareCaseProfile* rareCaseProfile = 0;
449         if (shouldEmitProfiling())
450             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
451 #endif
452
453 #if ENABLE(JIT_VERBOSE)
454         dataLog("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
455 #endif
456         
457         if (m_disassembler)
458             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
459
460         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
461         DEFINE_SLOWCASE_OP(op_add)
462         DEFINE_SLOWCASE_OP(op_bitand)
463         DEFINE_SLOWCASE_OP(op_bitor)
464         DEFINE_SLOWCASE_OP(op_bitxor)
465         DEFINE_SLOWCASE_OP(op_call)
466         DEFINE_SLOWCASE_OP(op_call_eval)
467         DEFINE_SLOWCASE_OP(op_call_varargs)
468         DEFINE_SLOWCASE_OP(op_construct)
469         DEFINE_SLOWCASE_OP(op_convert_this)
470         DEFINE_SLOWCASE_OP(op_create_this)
471         DEFINE_SLOWCASE_OP(op_div)
472         DEFINE_SLOWCASE_OP(op_eq)
473         case op_get_by_id_out_of_line:
474         case op_get_array_length:
475         DEFINE_SLOWCASE_OP(op_get_by_id)
476         DEFINE_SLOWCASE_OP(op_get_arguments_length)
477         DEFINE_SLOWCASE_OP(op_get_by_val)
478         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
479         DEFINE_SLOWCASE_OP(op_get_by_pname)
480         DEFINE_SLOWCASE_OP(op_check_has_instance)
481         DEFINE_SLOWCASE_OP(op_instanceof)
482         DEFINE_SLOWCASE_OP(op_jfalse)
483         DEFINE_SLOWCASE_OP(op_jless)
484         DEFINE_SLOWCASE_OP(op_jlesseq)
485         DEFINE_SLOWCASE_OP(op_jgreater)
486         DEFINE_SLOWCASE_OP(op_jgreatereq)
487         DEFINE_SLOWCASE_OP(op_jnless)
488         DEFINE_SLOWCASE_OP(op_jnlesseq)
489         DEFINE_SLOWCASE_OP(op_jngreater)
490         DEFINE_SLOWCASE_OP(op_jngreatereq)
491         DEFINE_SLOWCASE_OP(op_jtrue)
492         DEFINE_SLOWCASE_OP(op_loop_if_less)
493         DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
494         DEFINE_SLOWCASE_OP(op_loop_if_greater)
495         DEFINE_SLOWCASE_OP(op_loop_if_greatereq)
496         DEFINE_SLOWCASE_OP(op_loop_if_true)
497         DEFINE_SLOWCASE_OP(op_loop_if_false)
498         DEFINE_SLOWCASE_OP(op_lshift)
499         DEFINE_SLOWCASE_OP(op_mod)
500         DEFINE_SLOWCASE_OP(op_mul)
501         DEFINE_SLOWCASE_OP(op_negate)
502         DEFINE_SLOWCASE_OP(op_neq)
503         DEFINE_SLOWCASE_OP(op_new_object)
504         DEFINE_SLOWCASE_OP(op_not)
505         DEFINE_SLOWCASE_OP(op_nstricteq)
506         DEFINE_SLOWCASE_OP(op_post_dec)
507         DEFINE_SLOWCASE_OP(op_post_inc)
508         DEFINE_SLOWCASE_OP(op_pre_dec)
509         DEFINE_SLOWCASE_OP(op_pre_inc)
510         case op_put_by_id_out_of_line:
511         case op_put_by_id_transition_direct:
512         case op_put_by_id_transition_normal:
513         case op_put_by_id_transition_direct_out_of_line:
514         case op_put_by_id_transition_normal_out_of_line:
515         DEFINE_SLOWCASE_OP(op_put_by_id)
516         DEFINE_SLOWCASE_OP(op_put_by_val)
517         DEFINE_SLOWCASE_OP(op_init_global_const_check);
518         DEFINE_SLOWCASE_OP(op_rshift)
519         DEFINE_SLOWCASE_OP(op_urshift)
520         DEFINE_SLOWCASE_OP(op_stricteq)
521         DEFINE_SLOWCASE_OP(op_sub)
522         DEFINE_SLOWCASE_OP(op_to_jsnumber)
523         DEFINE_SLOWCASE_OP(op_to_primitive)
524
525         case op_resolve_global_property:
526         case op_resolve_global_var:
527         case op_resolve_scoped_var:
528         case op_resolve_scoped_var_on_top_scope:
529         case op_resolve_scoped_var_with_top_scope_check:
530         DEFINE_SLOWCASE_OP(op_resolve)
531
532         case op_resolve_base_to_global:
533         case op_resolve_base_to_global_dynamic:
534         case op_resolve_base_to_scope:
535         case op_resolve_base_to_scope_with_top_scope_check:
536         DEFINE_SLOWCASE_OP(op_resolve_base)
537         DEFINE_SLOWCASE_OP(op_resolve_with_base)
538         DEFINE_SLOWCASE_OP(op_resolve_with_this)
539
540         case op_put_to_base_variable:
541         DEFINE_SLOWCASE_OP(op_put_to_base)
542
543         default:
544             ASSERT_NOT_REACHED();
545         }
546
547         ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
548         ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
549         
550 #if ENABLE(VALUE_PROFILER)
551         if (shouldEmitProfiling())
552             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
553 #endif
554
555         emitJumpSlowToHot(jump(), 0);
556     }
557
558     ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
559     ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
560 #if ENABLE(VALUE_PROFILER)
561     ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
562 #endif
563
564 #ifndef NDEBUG
565     // Reset this, in order to guard its use with ASSERTs.
566     m_bytecodeOffset = (unsigned)-1;
567 #endif
568 }
569
570 ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
571 {
572     ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
573     info.bytecodeIndex = bytecodeIndex;
574     info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
575     info.hotPathBegin = linkBuffer.locationOf(hotPathBegin);
576
577     switch (m_type) {
578     case GetById: {
579         CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
580         info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
581         info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
582         info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
583 #if USE(JSVALUE64)
584         info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
585 #else
586         info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1));
587         info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2));
588 #endif
589         info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult));
590         info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation));
591         break;
592     }
593     case PutById:
594         CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
595         info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
596         info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
597 #if USE(JSVALUE64)
598         info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
599 #else
600         info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1));
601         info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2));
602 #endif
603         break;
604     }
605 }
606
607 JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort)
608 {
609 #if ENABLE(JIT_VERBOSE_OSR)
610     printf("Compiling JIT code!\n");
611 #endif
612     
613 #if ENABLE(VALUE_PROFILER)
614     DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG();
615     switch (level) {
616     case DFG::CannotCompile:
617         m_canBeOptimized = false;
618         m_shouldEmitProfiling = false;
619         break;
620     case DFG::ShouldProfile:
621         m_canBeOptimized = false;
622         m_shouldEmitProfiling = true;
623         break;
624     case DFG::CanCompile:
625         m_canBeOptimized = true;
626         m_shouldEmitProfiling = true;
627         break;
628     default:
629         ASSERT_NOT_REACHED();
630         break;
631     }
632 #endif
633     
634     if (Options::showDisassembly())
635         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
636     
637     if (m_disassembler)
638         m_disassembler->setStartOfCode(label());
639
640     // Just add a little bit of randomness to the codegen
641     if (m_randomGenerator.getUint32() & 1)
642         nop();
643
644     preserveReturnAddressAfterCall(regT2);
645     emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
646     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
647
648     Label beginLabel(this);
649
650     sampleCodeBlock(m_codeBlock);
651 #if ENABLE(OPCODE_SAMPLING)
652     sampleInstruction(m_codeBlock->instructions().begin());
653 #endif
654
655     Jump stackCheck;
656     if (m_codeBlock->codeType() == FunctionCode) {
657 #if ENABLE(DFG_JIT)
658 #if DFG_ENABLE(SUCCESS_STATS)
659         static SamplingCounter counter("orignalJIT");
660         emitCount(counter);
661 #endif
662 #endif
663
664 #if ENABLE(VALUE_PROFILER)
665         ASSERT(m_bytecodeOffset == (unsigned)-1);
666         if (shouldEmitProfiling()) {
667             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
668                 // If this is a constructor, then we want to put in a dummy profiling site (to
669                 // keep things consistent) but we don't actually want to record the dummy value.
670                 if (m_codeBlock->m_isConstructor && !argument)
671                     continue;
672                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
673 #if USE(JSVALUE64)
674                 load64(Address(callFrameRegister, offset), regT0);
675 #elif USE(JSVALUE32_64)
676                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
677                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
678 #endif
679                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
680             }
681         }
682 #endif
683
684         addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
685         stackCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1);
686     }
687
688     Label functionBody = label();
689     
690 #if ENABLE(VALUE_PROFILER)
691     if (canBeOptimized())
692         add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
693 #endif
694
695     privateCompileMainPass();
696     privateCompileLinkPass();
697     privateCompileSlowCases();
698     
699     if (m_disassembler)
700         m_disassembler->setEndOfSlowPath(label());
701
702     Label arityCheck;
703     if (m_codeBlock->codeType() == FunctionCode) {
704         stackCheck.link(this);
705         m_bytecodeOffset = 0;
706         JITStubCall(this, cti_stack_check).call();
707 #ifndef NDEBUG
708         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
709 #endif
710         jump(functionBody);
711
712         arityCheck = label();
713         preserveReturnAddressAfterCall(regT2);
714         emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
715         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
716
717         load32(payloadFor(JSStack::ArgumentCount), regT1);
718         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
719
720         m_bytecodeOffset = 0;
721         JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
722 #if !ASSERT_DISABLED
723         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
724 #endif
725
726         jump(beginLabel);
727     }
728
729     ASSERT(m_jmpTable.isEmpty());
730     
731     if (m_disassembler)
732         m_disassembler->setEndOfCode(label());
733
734     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock, effort);
735     if (patchBuffer.didFailToAllocate())
736         return JITCode();
737
738     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
739     for (unsigned i = 0; i < m_switches.size(); ++i) {
740         SwitchRecord record = m_switches[i];
741         unsigned bytecodeOffset = record.bytecodeOffset;
742
743         if (record.type != SwitchRecord::String) {
744             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
745             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
746
747             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
748
749             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
750                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
751                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
752             }
753         } else {
754             ASSERT(record.type == SwitchRecord::String);
755
756             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
757
758             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
759             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
760                 unsigned offset = it->value.branchOffset;
761                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
762             }
763         }
764     }
765
766     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
767         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
768         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
769     }
770
771     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
772         if (iter->to)
773             patchBuffer.link(iter->from, FunctionPtr(iter->to));
774     }
775
776     m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
777     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
778         m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
779
780     m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
781     for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
782         m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
783     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
784     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
785         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
786         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
787         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
788         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
789         
790         m_codeBlock->byValInfo(i) = ByValInfo(
791             m_byValCompilationInfo[i].bytecodeIndex,
792             badTypeJump,
793             m_byValCompilationInfo[i].arrayMode,
794             differenceBetweenCodePtr(badTypeJump, doneTarget),
795             differenceBetweenCodePtr(returnAddress, slowPathTarget));
796     }
797     m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
798     for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
799         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
800         info.callType = m_callStructureStubCompilationInfo[i].callType;
801         info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
802         info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
803         info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
804         info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
805         info.calleeGPR = regT0;
806     }
807
808 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
809     if (canBeOptimized()
810 #if ENABLE(LLINT)
811         || true
812 #endif
813         ) {
814         CompactJITCodeMap::Encoder jitCodeMapEncoder;
815         for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
816             if (m_labels[bytecodeOffset].isSet())
817                 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
818         }
819         m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
820     }
821 #endif
822
823     if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
824         *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
825
826     if (m_disassembler)
827         m_disassembler->dump(patchBuffer);
828     
829     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
830     
831     m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
832         static_cast<double>(result.size()) /
833         static_cast<double>(m_codeBlock->instructions().size()));
834     
835     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
836     
837 #if ENABLE(JIT_VERBOSE)
838     dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
839 #endif
840     
841     return JITCode(result, JITCode::BaselineJIT);
842 }
843
844 void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, JSGlobalData* globalData, CodeSpecializationKind kind)
845 {
846     RepatchBuffer repatchBuffer(callerCodeBlock);
847
848     ASSERT(!callLinkInfo->isLinked());
849     callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
850     callLinkInfo->lastSeenCallee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
851     repatchBuffer.relink(callLinkInfo->hotPathOther, code);
852
853     if (calleeCodeBlock)
854         calleeCodeBlock->linkIncomingCall(callLinkInfo);
855
856     // Patch the slow patch so we do not continue to try to link.
857     if (kind == CodeForCall) {
858         repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
859         return;
860     }
861
862     ASSERT(kind == CodeForConstruct);
863     repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
864 }
865
866 } // namespace JSC
867
868 #endif // ENABLE(JIT)