LinkBuffer should not keep a reference to the MacroAssembler
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 // This probably does not belong here; adding here for now as a quick Windows build fix.
33 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
34 #include "MacroAssembler.h"
35 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
36 #endif
37
38 #include "ArityCheckFailReturnThunks.h"
39 #include "CodeBlock.h"
40 #include "DFGCapabilities.h"
41 #include "Interpreter.h"
42 #include "JITInlines.h"
43 #include "JITOperations.h"
44 #include "JSArray.h"
45 #include "JSFunction.h"
46 #include "LinkBuffer.h"
47 #include "MaxFrameExtentForSlowPathCall.h"
48 #include "JSCInlines.h"
49 #include "ProfilerDatabase.h"
50 #include "RepatchBuffer.h"
51 #include "ResultType.h"
52 #include "SamplingTool.h"
53 #include "SlowPathCall.h"
54 #include "StackAlignment.h"
55 #include <wtf/CryptographicallyRandomNumber.h>
56
57 using namespace std;
58
59 namespace JSC {
60
61 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
62 {
63     RepatchBuffer repatchBuffer(codeblock);
64     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
65 }
66
67 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
68 {
69     RepatchBuffer repatchBuffer(codeblock);
70     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
71 }
72
73 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
74 {
75     RepatchBuffer repatchBuffer(codeblock);
76     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
77 }
78
79 JIT::JIT(VM* vm, CodeBlock* codeBlock)
80     : JSInterfaceJIT(vm, codeBlock)
81     , m_interpreter(vm->interpreter)
82     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
83     , m_bytecodeOffset((unsigned)-1)
84     , m_getByIdIndex(UINT_MAX)
85     , m_putByIdIndex(UINT_MAX)
86     , m_byValInstructionIndex(UINT_MAX)
87     , m_callLinkInfoIndex(UINT_MAX)
88     , m_randomGenerator(cryptographicallyRandomNumber())
89     , m_canBeOptimized(false)
90     , m_shouldEmitProfiling(false)
91 {
92 }
93
94 #if ENABLE(DFG_JIT)
95 void JIT::emitEnterOptimizationCheck()
96 {
97     if (!canBeOptimized())
98         return;
99
100     JumpList skipOptimize;
101     
102     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
103     ASSERT(!m_bytecodeOffset);
104     callOperation(operationOptimize, m_bytecodeOffset);
105     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
106     move(returnValueGPR2, stackPointerRegister);
107     jump(returnValueGPR);
108     skipOptimize.link(this);
109 }
110 #endif
111
112 #define NEXT_OPCODE(name) \
113     m_bytecodeOffset += OPCODE_LENGTH(name); \
114     break;
115
116 #define DEFINE_SLOW_OP(name) \
117     case op_##name: { \
118         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
119         slowPathCall.call(); \
120         NEXT_OPCODE(op_##name); \
121     }
122
123 #define DEFINE_OP(name) \
124     case name: { \
125         emit_##name(currentInstruction); \
126         NEXT_OPCODE(name); \
127     }
128
129 #define DEFINE_SLOWCASE_OP(name) \
130     case name: { \
131         emitSlow_##name(currentInstruction, iter); \
132         NEXT_OPCODE(name); \
133     }
134
135 void JIT::privateCompileMainPass()
136 {
137     jitAssertTagsInPlace();
138     jitAssertArgumentCountSane();
139     
140     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
141     unsigned instructionCount = m_codeBlock->instructions().size();
142
143     m_callLinkInfoIndex = 0;
144
145     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
146         if (m_disassembler)
147             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
148         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
149         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
150
151 #if ENABLE(OPCODE_SAMPLING)
152         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
153             sampleInstruction(currentInstruction);
154 #endif
155
156         m_labels[m_bytecodeOffset] = label();
157
158 #if ENABLE(JIT_VERBOSE)
159         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
160 #endif
161         
162         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
163
164         if (m_compilation) {
165             add64(
166                 TrustedImm32(1),
167                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
168                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
169         }
170
171         switch (opcodeID) {
172         DEFINE_SLOW_OP(del_by_val)
173         DEFINE_SLOW_OP(in)
174         DEFINE_SLOW_OP(less)
175         DEFINE_SLOW_OP(lesseq)
176         DEFINE_SLOW_OP(greater)
177         DEFINE_SLOW_OP(greatereq)
178         DEFINE_SLOW_OP(is_function)
179         DEFINE_SLOW_OP(is_object)
180         DEFINE_SLOW_OP(typeof)
181
182         DEFINE_OP(op_touch_entry)
183         DEFINE_OP(op_add)
184         DEFINE_OP(op_bitand)
185         DEFINE_OP(op_bitor)
186         DEFINE_OP(op_bitxor)
187         DEFINE_OP(op_call)
188         DEFINE_OP(op_call_eval)
189         DEFINE_OP(op_call_varargs)
190         DEFINE_OP(op_construct_varargs)
191         DEFINE_OP(op_catch)
192         DEFINE_OP(op_construct)
193         DEFINE_OP(op_get_callee)
194         DEFINE_OP(op_create_this)
195         DEFINE_OP(op_to_this)
196         DEFINE_OP(op_init_lazy_reg)
197         DEFINE_OP(op_create_arguments)
198         DEFINE_OP(op_debug)
199         DEFINE_OP(op_del_by_id)
200         DEFINE_OP(op_div)
201         DEFINE_OP(op_end)
202         DEFINE_OP(op_enter)
203         DEFINE_OP(op_create_activation)
204         DEFINE_OP(op_eq)
205         DEFINE_OP(op_eq_null)
206         case op_get_by_id_out_of_line:
207         case op_get_array_length:
208         DEFINE_OP(op_get_by_id)
209         DEFINE_OP(op_get_arguments_length)
210         DEFINE_OP(op_get_by_val)
211         DEFINE_OP(op_get_argument_by_val)
212         DEFINE_OP(op_get_by_pname)
213         DEFINE_OP(op_get_pnames)
214         DEFINE_OP(op_check_has_instance)
215         DEFINE_OP(op_instanceof)
216         DEFINE_OP(op_is_undefined)
217         DEFINE_OP(op_is_boolean)
218         DEFINE_OP(op_is_number)
219         DEFINE_OP(op_is_string)
220         DEFINE_OP(op_jeq_null)
221         DEFINE_OP(op_jfalse)
222         DEFINE_OP(op_jmp)
223         DEFINE_OP(op_jneq_null)
224         DEFINE_OP(op_jneq_ptr)
225         DEFINE_OP(op_jless)
226         DEFINE_OP(op_jlesseq)
227         DEFINE_OP(op_jgreater)
228         DEFINE_OP(op_jgreatereq)
229         DEFINE_OP(op_jnless)
230         DEFINE_OP(op_jnlesseq)
231         DEFINE_OP(op_jngreater)
232         DEFINE_OP(op_jngreatereq)
233         DEFINE_OP(op_jtrue)
234         DEFINE_OP(op_loop_hint)
235         DEFINE_OP(op_lshift)
236         DEFINE_OP(op_mod)
237         DEFINE_OP(op_captured_mov)
238         DEFINE_OP(op_mov)
239         DEFINE_OP(op_mul)
240         DEFINE_OP(op_negate)
241         DEFINE_OP(op_neq)
242         DEFINE_OP(op_neq_null)
243         DEFINE_OP(op_new_array)
244         DEFINE_OP(op_new_array_with_size)
245         DEFINE_OP(op_new_array_buffer)
246         DEFINE_OP(op_new_func)
247         DEFINE_OP(op_new_captured_func)
248         DEFINE_OP(op_new_func_exp)
249         DEFINE_OP(op_new_object)
250         DEFINE_OP(op_new_regexp)
251         DEFINE_OP(op_next_pname)
252         DEFINE_OP(op_not)
253         DEFINE_OP(op_nstricteq)
254         DEFINE_OP(op_pop_scope)
255         DEFINE_OP(op_dec)
256         DEFINE_OP(op_inc)
257         DEFINE_OP(op_profile_did_call)
258         DEFINE_OP(op_profile_will_call)
259         DEFINE_OP(op_push_name_scope)
260         DEFINE_OP(op_push_with_scope)
261         case op_put_by_id_out_of_line:
262         case op_put_by_id_transition_direct:
263         case op_put_by_id_transition_normal:
264         case op_put_by_id_transition_direct_out_of_line:
265         case op_put_by_id_transition_normal_out_of_line:
266         DEFINE_OP(op_put_by_id)
267         DEFINE_OP(op_put_by_index)
268         case op_put_by_val_direct:
269         DEFINE_OP(op_put_by_val)
270         DEFINE_OP(op_put_getter_setter)
271         case op_init_global_const_nop:
272             NEXT_OPCODE(op_init_global_const_nop);
273         DEFINE_OP(op_init_global_const)
274
275         DEFINE_OP(op_ret)
276         DEFINE_OP(op_ret_object_or_this)
277         DEFINE_OP(op_rshift)
278         DEFINE_OP(op_unsigned)
279         DEFINE_OP(op_urshift)
280         DEFINE_OP(op_strcat)
281         DEFINE_OP(op_stricteq)
282         DEFINE_OP(op_sub)
283         DEFINE_OP(op_switch_char)
284         DEFINE_OP(op_switch_imm)
285         DEFINE_OP(op_switch_string)
286         DEFINE_OP(op_tear_off_activation)
287         DEFINE_OP(op_tear_off_arguments)
288         DEFINE_OP(op_throw)
289         DEFINE_OP(op_throw_static_error)
290         DEFINE_OP(op_to_number)
291         DEFINE_OP(op_to_primitive)
292
293         DEFINE_OP(op_resolve_scope)
294         DEFINE_OP(op_get_from_scope)
295         DEFINE_OP(op_put_to_scope)
296         default:
297             RELEASE_ASSERT_NOT_REACHED();
298         }
299     }
300
301     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
302
303 #ifndef NDEBUG
304     // Reset this, in order to guard its use with ASSERTs.
305     m_bytecodeOffset = (unsigned)-1;
306 #endif
307 }
308
309 void JIT::privateCompileLinkPass()
310 {
311     unsigned jmpTableCount = m_jmpTable.size();
312     for (unsigned i = 0; i < jmpTableCount; ++i)
313         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
314     m_jmpTable.clear();
315 }
316
317 void JIT::privateCompileSlowCases()
318 {
319     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
320
321     m_getByIdIndex = 0;
322     m_putByIdIndex = 0;
323     m_byValInstructionIndex = 0;
324     m_callLinkInfoIndex = 0;
325     
326     // Use this to assert that slow-path code associates new profiling sites with existing
327     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
328     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
329     // instructions and the slow-path executions. Furthermore, if the slow-path code created
330     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
331     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
332     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
333
334     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
335         m_bytecodeOffset = iter->to;
336
337         unsigned firstTo = m_bytecodeOffset;
338
339         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
340         
341         RareCaseProfile* rareCaseProfile = 0;
342         if (shouldEmitProfiling())
343             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
344
345 #if ENABLE(JIT_VERBOSE)
346         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
347 #endif
348         
349         if (m_disassembler)
350             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
351
352         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
353         DEFINE_SLOWCASE_OP(op_add)
354         DEFINE_SLOWCASE_OP(op_bitand)
355         DEFINE_SLOWCASE_OP(op_bitor)
356         DEFINE_SLOWCASE_OP(op_bitxor)
357         DEFINE_SLOWCASE_OP(op_call)
358         DEFINE_SLOWCASE_OP(op_call_eval)
359         DEFINE_SLOWCASE_OP(op_call_varargs)
360         DEFINE_SLOWCASE_OP(op_construct_varargs)
361         DEFINE_SLOWCASE_OP(op_construct)
362         DEFINE_SLOWCASE_OP(op_to_this)
363         DEFINE_SLOWCASE_OP(op_create_this)
364         DEFINE_SLOWCASE_OP(op_captured_mov)
365         DEFINE_SLOWCASE_OP(op_div)
366         DEFINE_SLOWCASE_OP(op_eq)
367         DEFINE_SLOWCASE_OP(op_get_callee)
368         case op_get_by_id_out_of_line:
369         case op_get_array_length:
370         DEFINE_SLOWCASE_OP(op_get_by_id)
371         DEFINE_SLOWCASE_OP(op_get_arguments_length)
372         DEFINE_SLOWCASE_OP(op_get_by_val)
373         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
374         DEFINE_SLOWCASE_OP(op_get_by_pname)
375         DEFINE_SLOWCASE_OP(op_check_has_instance)
376         DEFINE_SLOWCASE_OP(op_instanceof)
377         DEFINE_SLOWCASE_OP(op_jfalse)
378         DEFINE_SLOWCASE_OP(op_jless)
379         DEFINE_SLOWCASE_OP(op_jlesseq)
380         DEFINE_SLOWCASE_OP(op_jgreater)
381         DEFINE_SLOWCASE_OP(op_jgreatereq)
382         DEFINE_SLOWCASE_OP(op_jnless)
383         DEFINE_SLOWCASE_OP(op_jnlesseq)
384         DEFINE_SLOWCASE_OP(op_jngreater)
385         DEFINE_SLOWCASE_OP(op_jngreatereq)
386         DEFINE_SLOWCASE_OP(op_jtrue)
387         DEFINE_SLOWCASE_OP(op_loop_hint)
388         DEFINE_SLOWCASE_OP(op_lshift)
389         DEFINE_SLOWCASE_OP(op_mod)
390         DEFINE_SLOWCASE_OP(op_mul)
391         DEFINE_SLOWCASE_OP(op_negate)
392         DEFINE_SLOWCASE_OP(op_neq)
393         DEFINE_SLOWCASE_OP(op_new_object)
394         DEFINE_SLOWCASE_OP(op_not)
395         DEFINE_SLOWCASE_OP(op_nstricteq)
396         DEFINE_SLOWCASE_OP(op_dec)
397         DEFINE_SLOWCASE_OP(op_inc)
398         case op_put_by_id_out_of_line:
399         case op_put_by_id_transition_direct:
400         case op_put_by_id_transition_normal:
401         case op_put_by_id_transition_direct_out_of_line:
402         case op_put_by_id_transition_normal_out_of_line:
403         DEFINE_SLOWCASE_OP(op_put_by_id)
404         case op_put_by_val_direct:
405         DEFINE_SLOWCASE_OP(op_put_by_val)
406         DEFINE_SLOWCASE_OP(op_rshift)
407         DEFINE_SLOWCASE_OP(op_unsigned)
408         DEFINE_SLOWCASE_OP(op_urshift)
409         DEFINE_SLOWCASE_OP(op_stricteq)
410         DEFINE_SLOWCASE_OP(op_sub)
411         DEFINE_SLOWCASE_OP(op_to_number)
412         DEFINE_SLOWCASE_OP(op_to_primitive)
413
414         DEFINE_SLOWCASE_OP(op_resolve_scope)
415         DEFINE_SLOWCASE_OP(op_get_from_scope)
416         DEFINE_SLOWCASE_OP(op_put_to_scope)
417
418         default:
419             RELEASE_ASSERT_NOT_REACHED();
420         }
421
422         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
423         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
424         
425         if (shouldEmitProfiling())
426             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
427
428         emitJumpSlowToHot(jump(), 0);
429     }
430
431     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
432     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
433     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
434     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
435
436 #ifndef NDEBUG
437     // Reset this, in order to guard its use with ASSERTs.
438     m_bytecodeOffset = (unsigned)-1;
439 #endif
440 }
441
442 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
443 {
444     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
445     switch (level) {
446     case DFG::CannotCompile:
447         m_canBeOptimized = false;
448         m_canBeOptimizedOrInlined = false;
449         m_shouldEmitProfiling = false;
450         break;
451     case DFG::CanInline:
452         m_canBeOptimized = false;
453         m_canBeOptimizedOrInlined = true;
454         m_shouldEmitProfiling = true;
455         break;
456     case DFG::CanCompile:
457     case DFG::CanCompileAndInline:
458         m_canBeOptimized = true;
459         m_canBeOptimizedOrInlined = true;
460         m_shouldEmitProfiling = true;
461         break;
462     default:
463         RELEASE_ASSERT_NOT_REACHED();
464         break;
465     }
466     
467     switch (m_codeBlock->codeType()) {
468     case GlobalCode:
469     case EvalCode:
470         m_codeBlock->m_shouldAlwaysBeInlined = false;
471         break;
472     case FunctionCode:
473         // We could have already set it to false because we detected an uninlineable call.
474         // Don't override that observation.
475         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
476         break;
477     }
478     
479     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
480         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
481     if (m_vm->m_perBytecodeProfiler) {
482         m_compilation = adoptRef(
483             new Profiler::Compilation(
484                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
485                 Profiler::Baseline));
486         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
487     }
488     
489     if (m_disassembler)
490         m_disassembler->setStartOfCode(label());
491
492     // Just add a little bit of randomness to the codegen
493     if (m_randomGenerator.getUint32() & 1)
494         nop();
495
496     emitFunctionPrologue();
497     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
498
499     Label beginLabel(this);
500
501     sampleCodeBlock(m_codeBlock);
502 #if ENABLE(OPCODE_SAMPLING)
503     sampleInstruction(m_codeBlock->instructions().begin());
504 #endif
505
506     Jump stackOverflow;
507     if (m_codeBlock->codeType() == FunctionCode) {
508         ASSERT(m_bytecodeOffset == (unsigned)-1);
509         if (shouldEmitProfiling()) {
510             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
511                 // If this is a constructor, then we want to put in a dummy profiling site (to
512                 // keep things consistent) but we don't actually want to record the dummy value.
513                 if (m_codeBlock->m_isConstructor && !argument)
514                     continue;
515                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
516 #if USE(JSVALUE64)
517                 load64(Address(callFrameRegister, offset), regT0);
518 #elif USE(JSVALUE32_64)
519                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
520                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
521 #endif
522                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
523             }
524         }
525
526         addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
527         stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
528     }
529
530     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
531     checkStackPointerAlignment();
532
533     privateCompileMainPass();
534     privateCompileLinkPass();
535     privateCompileSlowCases();
536     
537     if (m_disassembler)
538         m_disassembler->setEndOfSlowPath(label());
539
540     Label arityCheck;
541     if (m_codeBlock->codeType() == FunctionCode) {
542         stackOverflow.link(this);
543         m_bytecodeOffset = 0;
544         if (maxFrameExtentForSlowPathCall)
545             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
546         callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
547
548         arityCheck = label();
549         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
550         emitFunctionPrologue();
551         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
552
553         load32(payloadFor(JSStack::ArgumentCount), regT1);
554         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
555
556         m_bytecodeOffset = 0;
557
558         if (maxFrameExtentForSlowPathCall)
559             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
560         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
561         if (maxFrameExtentForSlowPathCall)
562             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
563         if (returnValueGPR != regT0)
564             move(returnValueGPR, regT0);
565         branchTest32(Zero, regT0).linkTo(beginLabel, this);
566         GPRReg thunkReg;
567 #if USE(JSVALUE64)
568         thunkReg = GPRInfo::regT7;
569 #else
570         thunkReg = GPRInfo::regT5;
571 #endif
572         move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg);
573         loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg);
574         emitNakedCall(m_vm->getCTIStub(arityFixup).code());
575
576 #if !ASSERT_DISABLED
577         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
578 #endif
579
580         jump(beginLabel);
581     }
582
583     ASSERT(m_jmpTable.isEmpty());
584     
585     privateCompileExceptionHandlers();
586     
587     if (m_disassembler)
588         m_disassembler->setEndOfCode(label());
589
590     LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
591     if (patchBuffer.didFailToAllocate())
592         return CompilationFailed;
593
594     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
595     for (unsigned i = 0; i < m_switches.size(); ++i) {
596         SwitchRecord record = m_switches[i];
597         unsigned bytecodeOffset = record.bytecodeOffset;
598
599         if (record.type != SwitchRecord::String) {
600             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
601             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
602
603             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
604
605             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
606                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
607                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
608             }
609         } else {
610             ASSERT(record.type == SwitchRecord::String);
611
612             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
613
614             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
615             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
616                 unsigned offset = it->value.branchOffset;
617                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
618             }
619         }
620     }
621
622     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
623         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
624         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
625     }
626
627     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
628         if (iter->to)
629             patchBuffer.link(iter->from, FunctionPtr(iter->to));
630     }
631
632     for (unsigned i = m_getByIds.size(); i--;)
633         m_getByIds[i].finalize(patchBuffer);
634     for (unsigned i = m_putByIds.size(); i--;)
635         m_putByIds[i].finalize(patchBuffer);
636
637     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
638     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
639         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
640         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
641         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
642         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
643         
644         m_codeBlock->byValInfo(i) = ByValInfo(
645             m_byValCompilationInfo[i].bytecodeIndex,
646             badTypeJump,
647             m_byValCompilationInfo[i].arrayMode,
648             differenceBetweenCodePtr(badTypeJump, doneTarget),
649             differenceBetweenCodePtr(returnAddress, slowPathTarget));
650     }
651     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
652         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
653         CallLinkInfo& info = *compilationInfo.callLinkInfo;
654         info.callReturnLocation = patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation);
655         info.hotPathBegin = patchBuffer.locationOf(compilationInfo.hotPathBegin);
656         info.hotPathOther = patchBuffer.locationOfNearCall(compilationInfo.hotPathOther);
657     }
658
659     CompactJITCodeMap::Encoder jitCodeMapEncoder;
660     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
661         if (m_labels[bytecodeOffset].isSet())
662             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
663     }
664     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
665
666     MacroAssemblerCodePtr withArityCheck;
667     if (m_codeBlock->codeType() == FunctionCode)
668         withArityCheck = patchBuffer.locationOf(arityCheck);
669
670     if (Options::showDisassembly())
671         m_disassembler->dump(patchBuffer);
672     if (m_compilation) {
673         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
674         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
675     }
676     
677     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
678     
679     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
680         static_cast<double>(result.size()) /
681         static_cast<double>(m_codeBlock->instructions().size()));
682     
683     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
684     m_codeBlock->setJITCode(
685         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
686     
687 #if ENABLE(JIT_VERBOSE)
688     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
689 #endif
690     
691     return CompilationSuccessful;
692 }
693
694 void JIT::privateCompileExceptionHandlers()
695 {
696     if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
697         return;
698
699     Jump doLookup;
700
701     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
702         m_exceptionChecksWithCallFrameRollback.link(this);
703         emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
704         doLookup = jump();
705     }
706
707     if (!m_exceptionChecks.empty())
708         m_exceptionChecks.link(this);
709     
710     // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
711     move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
712
713     if (doLookup.isSet())
714         doLookup.link(this);
715
716     move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
717
718 #if CPU(X86)
719     // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
720     poke(GPRInfo::argumentGPR0);
721     poke(GPRInfo::argumentGPR1, 1);
722 #endif
723     m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
724     jumpToExceptionHandler();
725 }
726
727 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
728 {
729     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
730
731     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
732 }
733
734 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
735 {
736     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
737 }
738
739 } // namespace JSC
740
741 #endif // ENABLE(JIT)