Move back primary header includes next to config.h
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 // This probably does not belong here; adding here for now as a quick Windows build fix.
33 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
34 #include "MacroAssembler.h"
35 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
36 #endif
37
38 #include "ArityCheckFailReturnThunks.h"
39 #include "CodeBlock.h"
40 #include "DFGCapabilities.h"
41 #include "Interpreter.h"
42 #include "JITInlines.h"
43 #include "JITOperations.h"
44 #include "JSArray.h"
45 #include "JSFunction.h"
46 #include "LinkBuffer.h"
47 #include "MaxFrameExtentForSlowPathCall.h"
48 #include "JSCInlines.h"
49 #include "RepatchBuffer.h"
50 #include "ResultType.h"
51 #include "SamplingTool.h"
52 #include "SlowPathCall.h"
53 #include "StackAlignment.h"
54 #include <wtf/CryptographicallyRandomNumber.h>
55
56 using namespace std;
57
58 namespace JSC {
59
60 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
61 {
62     RepatchBuffer repatchBuffer(codeblock);
63     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
64 }
65
66 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
67 {
68     RepatchBuffer repatchBuffer(codeblock);
69     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
70 }
71
72 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
73 {
74     RepatchBuffer repatchBuffer(codeblock);
75     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
76 }
77
78 JIT::JIT(VM* vm, CodeBlock* codeBlock)
79     : JSInterfaceJIT(vm, codeBlock)
80     , m_interpreter(vm->interpreter)
81     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
82     , m_bytecodeOffset((unsigned)-1)
83     , m_getByIdIndex(UINT_MAX)
84     , m_putByIdIndex(UINT_MAX)
85     , m_byValInstructionIndex(UINT_MAX)
86     , m_callLinkInfoIndex(UINT_MAX)
87     , m_randomGenerator(cryptographicallyRandomNumber())
88     , m_canBeOptimized(false)
89     , m_shouldEmitProfiling(false)
90 {
91 }
92
93 #if ENABLE(DFG_JIT)
94 void JIT::emitEnterOptimizationCheck()
95 {
96     if (!canBeOptimized())
97         return;
98
99     JumpList skipOptimize;
100     
101     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
102     ASSERT(!m_bytecodeOffset);
103     callOperation(operationOptimize, m_bytecodeOffset);
104     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
105     move(returnValueGPR2, stackPointerRegister);
106     jump(returnValueGPR);
107     skipOptimize.link(this);
108 }
109 #endif
110
111 #define NEXT_OPCODE(name) \
112     m_bytecodeOffset += OPCODE_LENGTH(name); \
113     break;
114
115 #define DEFINE_SLOW_OP(name) \
116     case op_##name: { \
117         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
118         slowPathCall.call(); \
119         NEXT_OPCODE(op_##name); \
120     }
121
122 #define DEFINE_OP(name) \
123     case name: { \
124         emit_##name(currentInstruction); \
125         NEXT_OPCODE(name); \
126     }
127
128 #define DEFINE_SLOWCASE_OP(name) \
129     case name: { \
130         emitSlow_##name(currentInstruction, iter); \
131         NEXT_OPCODE(name); \
132     }
133
134 void JIT::privateCompileMainPass()
135 {
136     jitAssertTagsInPlace();
137     jitAssertArgumentCountSane();
138     
139     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
140     unsigned instructionCount = m_codeBlock->instructions().size();
141
142     m_callLinkInfoIndex = 0;
143
144     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
145         if (m_disassembler)
146             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
147         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
148         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
149
150 #if ENABLE(OPCODE_SAMPLING)
151         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
152             sampleInstruction(currentInstruction);
153 #endif
154
155         m_labels[m_bytecodeOffset] = label();
156
157 #if ENABLE(JIT_VERBOSE)
158         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
159 #endif
160         
161         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
162
163         if (m_compilation) {
164             add64(
165                 TrustedImm32(1),
166                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
167                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
168         }
169
170         switch (opcodeID) {
171         DEFINE_SLOW_OP(del_by_val)
172         DEFINE_SLOW_OP(in)
173         DEFINE_SLOW_OP(less)
174         DEFINE_SLOW_OP(lesseq)
175         DEFINE_SLOW_OP(greater)
176         DEFINE_SLOW_OP(greatereq)
177         DEFINE_SLOW_OP(is_function)
178         DEFINE_SLOW_OP(is_object)
179         DEFINE_SLOW_OP(typeof)
180
181         DEFINE_OP(op_touch_entry)
182         DEFINE_OP(op_add)
183         DEFINE_OP(op_bitand)
184         DEFINE_OP(op_bitor)
185         DEFINE_OP(op_bitxor)
186         DEFINE_OP(op_call)
187         DEFINE_OP(op_call_eval)
188         DEFINE_OP(op_call_varargs)
189         DEFINE_OP(op_catch)
190         DEFINE_OP(op_construct)
191         DEFINE_OP(op_get_callee)
192         DEFINE_OP(op_create_this)
193         DEFINE_OP(op_to_this)
194         DEFINE_OP(op_init_lazy_reg)
195         DEFINE_OP(op_create_arguments)
196         DEFINE_OP(op_debug)
197         DEFINE_OP(op_del_by_id)
198         DEFINE_OP(op_div)
199         DEFINE_OP(op_end)
200         DEFINE_OP(op_enter)
201         DEFINE_OP(op_create_activation)
202         DEFINE_OP(op_eq)
203         DEFINE_OP(op_eq_null)
204         case op_get_by_id_out_of_line:
205         case op_get_array_length:
206         DEFINE_OP(op_get_by_id)
207         DEFINE_OP(op_get_arguments_length)
208         DEFINE_OP(op_get_by_val)
209         DEFINE_OP(op_get_argument_by_val)
210         DEFINE_OP(op_get_by_pname)
211         DEFINE_OP(op_get_pnames)
212         DEFINE_OP(op_check_has_instance)
213         DEFINE_OP(op_instanceof)
214         DEFINE_OP(op_is_undefined)
215         DEFINE_OP(op_is_boolean)
216         DEFINE_OP(op_is_number)
217         DEFINE_OP(op_is_string)
218         DEFINE_OP(op_jeq_null)
219         DEFINE_OP(op_jfalse)
220         DEFINE_OP(op_jmp)
221         DEFINE_OP(op_jneq_null)
222         DEFINE_OP(op_jneq_ptr)
223         DEFINE_OP(op_jless)
224         DEFINE_OP(op_jlesseq)
225         DEFINE_OP(op_jgreater)
226         DEFINE_OP(op_jgreatereq)
227         DEFINE_OP(op_jnless)
228         DEFINE_OP(op_jnlesseq)
229         DEFINE_OP(op_jngreater)
230         DEFINE_OP(op_jngreatereq)
231         DEFINE_OP(op_jtrue)
232         DEFINE_OP(op_loop_hint)
233         DEFINE_OP(op_lshift)
234         DEFINE_OP(op_mod)
235         DEFINE_OP(op_captured_mov)
236         DEFINE_OP(op_mov)
237         DEFINE_OP(op_mul)
238         DEFINE_OP(op_negate)
239         DEFINE_OP(op_neq)
240         DEFINE_OP(op_neq_null)
241         DEFINE_OP(op_new_array)
242         DEFINE_OP(op_new_array_with_size)
243         DEFINE_OP(op_new_array_buffer)
244         DEFINE_OP(op_new_func)
245         DEFINE_OP(op_new_captured_func)
246         DEFINE_OP(op_new_func_exp)
247         DEFINE_OP(op_new_object)
248         DEFINE_OP(op_new_regexp)
249         DEFINE_OP(op_next_pname)
250         DEFINE_OP(op_not)
251         DEFINE_OP(op_nstricteq)
252         DEFINE_OP(op_pop_scope)
253         DEFINE_OP(op_dec)
254         DEFINE_OP(op_inc)
255         DEFINE_OP(op_profile_did_call)
256         DEFINE_OP(op_profile_will_call)
257         DEFINE_OP(op_push_name_scope)
258         DEFINE_OP(op_push_with_scope)
259         case op_put_by_id_out_of_line:
260         case op_put_by_id_transition_direct:
261         case op_put_by_id_transition_normal:
262         case op_put_by_id_transition_direct_out_of_line:
263         case op_put_by_id_transition_normal_out_of_line:
264         DEFINE_OP(op_put_by_id)
265         DEFINE_OP(op_put_by_index)
266         case op_put_by_val_direct:
267         DEFINE_OP(op_put_by_val)
268         DEFINE_OP(op_put_getter_setter)
269         case op_init_global_const_nop:
270             NEXT_OPCODE(op_init_global_const_nop);
271         DEFINE_OP(op_init_global_const)
272
273         DEFINE_OP(op_ret)
274         DEFINE_OP(op_ret_object_or_this)
275         DEFINE_OP(op_rshift)
276         DEFINE_OP(op_unsigned)
277         DEFINE_OP(op_urshift)
278         DEFINE_OP(op_strcat)
279         DEFINE_OP(op_stricteq)
280         DEFINE_OP(op_sub)
281         DEFINE_OP(op_switch_char)
282         DEFINE_OP(op_switch_imm)
283         DEFINE_OP(op_switch_string)
284         DEFINE_OP(op_tear_off_activation)
285         DEFINE_OP(op_tear_off_arguments)
286         DEFINE_OP(op_throw)
287         DEFINE_OP(op_throw_static_error)
288         DEFINE_OP(op_to_number)
289         DEFINE_OP(op_to_primitive)
290
291         DEFINE_OP(op_resolve_scope)
292         DEFINE_OP(op_get_from_scope)
293         DEFINE_OP(op_put_to_scope)
294         }
295     }
296
297     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
298
299 #ifndef NDEBUG
300     // Reset this, in order to guard its use with ASSERTs.
301     m_bytecodeOffset = (unsigned)-1;
302 #endif
303 }
304
305 void JIT::privateCompileLinkPass()
306 {
307     unsigned jmpTableCount = m_jmpTable.size();
308     for (unsigned i = 0; i < jmpTableCount; ++i)
309         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
310     m_jmpTable.clear();
311 }
312
313 void JIT::privateCompileSlowCases()
314 {
315     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
316
317     m_getByIdIndex = 0;
318     m_putByIdIndex = 0;
319     m_byValInstructionIndex = 0;
320     m_callLinkInfoIndex = 0;
321     
322     // Use this to assert that slow-path code associates new profiling sites with existing
323     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
324     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
325     // instructions and the slow-path executions. Furthermore, if the slow-path code created
326     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
327     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
328     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
329
330     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
331         m_bytecodeOffset = iter->to;
332
333         unsigned firstTo = m_bytecodeOffset;
334
335         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
336         
337         RareCaseProfile* rareCaseProfile = 0;
338         if (shouldEmitProfiling())
339             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
340
341 #if ENABLE(JIT_VERBOSE)
342         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
343 #endif
344         
345         if (m_disassembler)
346             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
347
348         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
349         DEFINE_SLOWCASE_OP(op_add)
350         DEFINE_SLOWCASE_OP(op_bitand)
351         DEFINE_SLOWCASE_OP(op_bitor)
352         DEFINE_SLOWCASE_OP(op_bitxor)
353         DEFINE_SLOWCASE_OP(op_call)
354         DEFINE_SLOWCASE_OP(op_call_eval)
355         DEFINE_SLOWCASE_OP(op_call_varargs)
356         DEFINE_SLOWCASE_OP(op_construct)
357         DEFINE_SLOWCASE_OP(op_to_this)
358         DEFINE_SLOWCASE_OP(op_create_this)
359         DEFINE_SLOWCASE_OP(op_captured_mov)
360         DEFINE_SLOWCASE_OP(op_div)
361         DEFINE_SLOWCASE_OP(op_eq)
362         DEFINE_SLOWCASE_OP(op_get_callee)
363         case op_get_by_id_out_of_line:
364         case op_get_array_length:
365         DEFINE_SLOWCASE_OP(op_get_by_id)
366         DEFINE_SLOWCASE_OP(op_get_arguments_length)
367         DEFINE_SLOWCASE_OP(op_get_by_val)
368         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
369         DEFINE_SLOWCASE_OP(op_get_by_pname)
370         DEFINE_SLOWCASE_OP(op_check_has_instance)
371         DEFINE_SLOWCASE_OP(op_instanceof)
372         DEFINE_SLOWCASE_OP(op_jfalse)
373         DEFINE_SLOWCASE_OP(op_jless)
374         DEFINE_SLOWCASE_OP(op_jlesseq)
375         DEFINE_SLOWCASE_OP(op_jgreater)
376         DEFINE_SLOWCASE_OP(op_jgreatereq)
377         DEFINE_SLOWCASE_OP(op_jnless)
378         DEFINE_SLOWCASE_OP(op_jnlesseq)
379         DEFINE_SLOWCASE_OP(op_jngreater)
380         DEFINE_SLOWCASE_OP(op_jngreatereq)
381         DEFINE_SLOWCASE_OP(op_jtrue)
382         DEFINE_SLOWCASE_OP(op_loop_hint)
383         DEFINE_SLOWCASE_OP(op_lshift)
384         DEFINE_SLOWCASE_OP(op_mod)
385         DEFINE_SLOWCASE_OP(op_mul)
386         DEFINE_SLOWCASE_OP(op_negate)
387         DEFINE_SLOWCASE_OP(op_neq)
388         DEFINE_SLOWCASE_OP(op_new_object)
389         DEFINE_SLOWCASE_OP(op_not)
390         DEFINE_SLOWCASE_OP(op_nstricteq)
391         DEFINE_SLOWCASE_OP(op_dec)
392         DEFINE_SLOWCASE_OP(op_inc)
393         case op_put_by_id_out_of_line:
394         case op_put_by_id_transition_direct:
395         case op_put_by_id_transition_normal:
396         case op_put_by_id_transition_direct_out_of_line:
397         case op_put_by_id_transition_normal_out_of_line:
398         DEFINE_SLOWCASE_OP(op_put_by_id)
399         case op_put_by_val_direct:
400         DEFINE_SLOWCASE_OP(op_put_by_val)
401         DEFINE_SLOWCASE_OP(op_rshift)
402         DEFINE_SLOWCASE_OP(op_unsigned)
403         DEFINE_SLOWCASE_OP(op_urshift)
404         DEFINE_SLOWCASE_OP(op_stricteq)
405         DEFINE_SLOWCASE_OP(op_sub)
406         DEFINE_SLOWCASE_OP(op_to_number)
407         DEFINE_SLOWCASE_OP(op_to_primitive)
408
409         DEFINE_SLOWCASE_OP(op_resolve_scope)
410         DEFINE_SLOWCASE_OP(op_get_from_scope)
411         DEFINE_SLOWCASE_OP(op_put_to_scope)
412
413         default:
414             RELEASE_ASSERT_NOT_REACHED();
415         }
416
417         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
418         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
419         
420         if (shouldEmitProfiling())
421             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
422
423         emitJumpSlowToHot(jump(), 0);
424     }
425
426     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
427     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
428     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
429     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
430
431 #ifndef NDEBUG
432     // Reset this, in order to guard its use with ASSERTs.
433     m_bytecodeOffset = (unsigned)-1;
434 #endif
435 }
436
437 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
438 {
439     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
440     switch (level) {
441     case DFG::CannotCompile:
442         m_canBeOptimized = false;
443         m_canBeOptimizedOrInlined = false;
444         m_shouldEmitProfiling = false;
445         break;
446     case DFG::CanInline:
447         m_canBeOptimized = false;
448         m_canBeOptimizedOrInlined = true;
449         m_shouldEmitProfiling = true;
450         break;
451     case DFG::CanCompile:
452     case DFG::CanCompileAndInline:
453         m_canBeOptimized = true;
454         m_canBeOptimizedOrInlined = true;
455         m_shouldEmitProfiling = true;
456         break;
457     default:
458         RELEASE_ASSERT_NOT_REACHED();
459         break;
460     }
461     
462     switch (m_codeBlock->codeType()) {
463     case GlobalCode:
464     case EvalCode:
465         m_codeBlock->m_shouldAlwaysBeInlined = false;
466         break;
467     case FunctionCode:
468         // We could have already set it to false because we detected an uninlineable call.
469         // Don't override that observation.
470         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
471         break;
472     }
473     
474     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
475         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
476     if (m_vm->m_perBytecodeProfiler) {
477         m_compilation = adoptRef(
478             new Profiler::Compilation(
479                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
480                 Profiler::Baseline));
481         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
482     }
483     
484     if (m_disassembler)
485         m_disassembler->setStartOfCode(label());
486
487     // Just add a little bit of randomness to the codegen
488     if (m_randomGenerator.getUint32() & 1)
489         nop();
490
491     emitFunctionPrologue();
492     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
493
494     Label beginLabel(this);
495
496     sampleCodeBlock(m_codeBlock);
497 #if ENABLE(OPCODE_SAMPLING)
498     sampleInstruction(m_codeBlock->instructions().begin());
499 #endif
500
501     Jump stackOverflow;
502     if (m_codeBlock->codeType() == FunctionCode) {
503         ASSERT(m_bytecodeOffset == (unsigned)-1);
504         if (shouldEmitProfiling()) {
505             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
506                 // If this is a constructor, then we want to put in a dummy profiling site (to
507                 // keep things consistent) but we don't actually want to record the dummy value.
508                 if (m_codeBlock->m_isConstructor && !argument)
509                     continue;
510                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
511 #if USE(JSVALUE64)
512                 load64(Address(callFrameRegister, offset), regT0);
513 #elif USE(JSVALUE32_64)
514                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
515                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
516 #endif
517                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
518             }
519         }
520
521         addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
522         stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
523     }
524
525     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
526     checkStackPointerAlignment();
527
528     privateCompileMainPass();
529     privateCompileLinkPass();
530     privateCompileSlowCases();
531     
532     if (m_disassembler)
533         m_disassembler->setEndOfSlowPath(label());
534
535     Label arityCheck;
536     if (m_codeBlock->codeType() == FunctionCode) {
537         stackOverflow.link(this);
538         m_bytecodeOffset = 0;
539         if (maxFrameExtentForSlowPathCall)
540             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
541         callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
542
543         arityCheck = label();
544         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
545         emitFunctionPrologue();
546         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
547
548         load32(payloadFor(JSStack::ArgumentCount), regT1);
549         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
550
551         m_bytecodeOffset = 0;
552
553         if (maxFrameExtentForSlowPathCall)
554             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
555         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
556         if (maxFrameExtentForSlowPathCall)
557             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
558         if (returnValueGPR != regT0)
559             move(returnValueGPR, regT0);
560         branchTest32(Zero, regT0).linkTo(beginLabel, this);
561         move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), regT5);
562         loadPtr(BaseIndex(regT5, regT0, timesPtr()), regT5);
563         emitNakedCall(m_vm->getCTIStub(arityFixup).code());
564
565 #if !ASSERT_DISABLED
566         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
567 #endif
568
569         jump(beginLabel);
570     }
571
572     ASSERT(m_jmpTable.isEmpty());
573     
574     privateCompileExceptionHandlers();
575     
576     if (m_disassembler)
577         m_disassembler->setEndOfCode(label());
578
579     LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
580     if (patchBuffer.didFailToAllocate())
581         return CompilationFailed;
582
583     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
584     for (unsigned i = 0; i < m_switches.size(); ++i) {
585         SwitchRecord record = m_switches[i];
586         unsigned bytecodeOffset = record.bytecodeOffset;
587
588         if (record.type != SwitchRecord::String) {
589             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
590             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
591
592             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
593
594             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
595                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
596                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
597             }
598         } else {
599             ASSERT(record.type == SwitchRecord::String);
600
601             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
602
603             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
604             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
605                 unsigned offset = it->value.branchOffset;
606                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
607             }
608         }
609     }
610
611     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
612         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
613         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
614     }
615
616     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
617         if (iter->to)
618             patchBuffer.link(iter->from, FunctionPtr(iter->to));
619     }
620
621     for (unsigned i = m_getByIds.size(); i--;)
622         m_getByIds[i].finalize(patchBuffer);
623     for (unsigned i = m_putByIds.size(); i--;)
624         m_putByIds[i].finalize(patchBuffer);
625
626     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
627     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
628         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
629         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
630         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
631         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
632         
633         m_codeBlock->byValInfo(i) = ByValInfo(
634             m_byValCompilationInfo[i].bytecodeIndex,
635             badTypeJump,
636             m_byValCompilationInfo[i].arrayMode,
637             differenceBetweenCodePtr(badTypeJump, doneTarget),
638             differenceBetweenCodePtr(returnAddress, slowPathTarget));
639     }
640     m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
641     for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
642         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
643         info.callType = m_callStructureStubCompilationInfo[i].callType;
644         info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
645         info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
646         info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
647         info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
648         info.calleeGPR = regT0;
649     }
650
651     CompactJITCodeMap::Encoder jitCodeMapEncoder;
652     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
653         if (m_labels[bytecodeOffset].isSet())
654             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
655     }
656     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
657
658     MacroAssemblerCodePtr withArityCheck;
659     if (m_codeBlock->codeType() == FunctionCode)
660         withArityCheck = patchBuffer.locationOf(arityCheck);
661
662     if (Options::showDisassembly())
663         m_disassembler->dump(patchBuffer);
664     if (m_compilation) {
665         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
666         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
667     }
668     
669     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
670     
671     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
672         static_cast<double>(result.size()) /
673         static_cast<double>(m_codeBlock->instructions().size()));
674     
675     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
676     m_codeBlock->setJITCode(
677         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
678     
679 #if ENABLE(JIT_VERBOSE)
680     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
681 #endif
682     
683     return CompilationSuccessful;
684 }
685
686 void JIT::privateCompileExceptionHandlers()
687 {
688     if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
689         return;
690
691     Jump doLookup;
692
693     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
694         m_exceptionChecksWithCallFrameRollback.link(this);
695         emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
696         doLookup = jump();
697     }
698
699     if (!m_exceptionChecks.empty())
700         m_exceptionChecks.link(this);
701     
702     // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
703     move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
704
705     if (doLookup.isSet())
706         doLookup.link(this);
707
708     move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
709
710 #if CPU(X86)
711     // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
712     poke(GPRInfo::argumentGPR0);
713     poke(GPRInfo::argumentGPR1, 1);
714 #endif
715     m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
716     jumpToExceptionHandler();
717 }
718
719 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
720 {
721     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
722
723     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
724 }
725
726 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
727 {
728     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
729 }
730
731 } // namespace JSC
732
733 #endif // ENABLE(JIT)