Introducing VMEntryScope to update the VM stack limit.
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
35 #endif
36
37 #include "CodeBlock.h"
38 #include "DFGCapabilities.h"
39 #include "Interpreter.h"
40 #include "JITInlines.h"
41 #include "JITOperations.h"
42 #include "JSArray.h"
43 #include "JSFunction.h"
44 #include "LinkBuffer.h"
45 #include "Operations.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
49 #include "SlowPathCall.h"
50 #include <wtf/CryptographicallyRandomNumber.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
57 {
58     RepatchBuffer repatchBuffer(codeblock);
59     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 }
61
62 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
63 {
64     RepatchBuffer repatchBuffer(codeblock);
65     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 }
67
68 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
69 {
70     RepatchBuffer repatchBuffer(codeblock);
71     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 }
73
74 JIT::JIT(VM* vm, CodeBlock* codeBlock)
75     : JSInterfaceJIT(vm, codeBlock)
76     , m_interpreter(vm->interpreter)
77     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
78     , m_bytecodeOffset((unsigned)-1)
79     , m_getByIdIndex(UINT_MAX)
80     , m_putByIdIndex(UINT_MAX)
81     , m_byValInstructionIndex(UINT_MAX)
82     , m_callLinkInfoIndex(UINT_MAX)
83     , m_randomGenerator(cryptographicallyRandomNumber())
84 #if ENABLE(VALUE_PROFILER)
85     , m_canBeOptimized(false)
86     , m_shouldEmitProfiling(false)
87 #endif
88 {
89 }
90
91 #if ENABLE(DFG_JIT)
92 void JIT::emitEnterOptimizationCheck()
93 {
94     if (!canBeOptimized())
95         return;
96
97     JumpList skipOptimize;
98     
99     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
100     ASSERT(!m_bytecodeOffset);
101     callOperation(operationOptimize, m_bytecodeOffset);
102     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
103     jump(returnValueGPR);
104     skipOptimize.link(this);
105 }
106 #endif
107
108 #define NEXT_OPCODE(name) \
109     m_bytecodeOffset += OPCODE_LENGTH(name); \
110     break;
111
112 #if USE(JSVALUE32_64)
113 #define DEFINE_BINARY_OP(name) \
114     case op_##name: { \
115         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
116         slowPathCall.call(); \
117         NEXT_OPCODE(op_##name); \
118     }
119
120 #define DEFINE_UNARY_OP(name) \
121     case op_##name: { \
122         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
123         slowPathCall.call(); \
124         NEXT_OPCODE(op_##name); \
125     }
126
127 #else // USE(JSVALUE32_64)
128
129 #define DEFINE_BINARY_OP(name) \
130     case op_##name: { \
131         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
132         slowPathCall.call(); \
133         NEXT_OPCODE(op_##name); \
134     }
135
136 #define DEFINE_UNARY_OP(name) \
137     case op_##name: { \
138         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
139         slowPathCall.call(); \
140         NEXT_OPCODE(op_##name); \
141     }
142 #endif // USE(JSVALUE32_64)
143
144 #define DEFINE_OP(name) \
145     case name: { \
146         emit_##name(currentInstruction); \
147         NEXT_OPCODE(name); \
148     }
149
150 #define DEFINE_SLOWCASE_OP(name) \
151     case name: { \
152         emitSlow_##name(currentInstruction, iter); \
153         NEXT_OPCODE(name); \
154     }
155
156 void JIT::privateCompileMainPass()
157 {
158     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
159     unsigned instructionCount = m_codeBlock->instructions().size();
160
161     m_callLinkInfoIndex = 0;
162
163     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
164         if (m_disassembler)
165             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
166         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
167         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
168
169 #if ENABLE(OPCODE_SAMPLING)
170         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
171             sampleInstruction(currentInstruction);
172 #endif
173
174         m_labels[m_bytecodeOffset] = label();
175
176 #if ENABLE(JIT_VERBOSE)
177         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
178 #endif
179         
180         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
181
182         if (m_compilation) {
183             add64(
184                 TrustedImm32(1),
185                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
186                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
187         }
188
189         switch (opcodeID) {
190         DEFINE_BINARY_OP(del_by_val)
191         DEFINE_BINARY_OP(in)
192         DEFINE_BINARY_OP(less)
193         DEFINE_BINARY_OP(lesseq)
194         DEFINE_BINARY_OP(greater)
195         DEFINE_BINARY_OP(greatereq)
196         DEFINE_UNARY_OP(is_function)
197         DEFINE_UNARY_OP(is_object)
198         DEFINE_UNARY_OP(typeof)
199
200         DEFINE_OP(op_add)
201         DEFINE_OP(op_bitand)
202         DEFINE_OP(op_bitor)
203         DEFINE_OP(op_bitxor)
204         DEFINE_OP(op_call)
205         DEFINE_OP(op_call_eval)
206         DEFINE_OP(op_call_varargs)
207         DEFINE_OP(op_catch)
208         DEFINE_OP(op_construct)
209         DEFINE_OP(op_get_callee)
210         DEFINE_OP(op_create_this)
211         DEFINE_OP(op_to_this)
212         DEFINE_OP(op_init_lazy_reg)
213         DEFINE_OP(op_create_arguments)
214         DEFINE_OP(op_debug)
215         DEFINE_OP(op_del_by_id)
216         DEFINE_OP(op_div)
217         DEFINE_OP(op_end)
218         DEFINE_OP(op_enter)
219         DEFINE_OP(op_create_activation)
220         DEFINE_OP(op_eq)
221         DEFINE_OP(op_eq_null)
222         case op_get_by_id_out_of_line:
223         case op_get_array_length:
224         DEFINE_OP(op_get_by_id)
225         DEFINE_OP(op_get_arguments_length)
226         DEFINE_OP(op_get_by_val)
227         DEFINE_OP(op_get_argument_by_val)
228         DEFINE_OP(op_get_by_pname)
229         DEFINE_OP(op_get_pnames)
230         DEFINE_OP(op_check_has_instance)
231         DEFINE_OP(op_instanceof)
232         DEFINE_OP(op_is_undefined)
233         DEFINE_OP(op_is_boolean)
234         DEFINE_OP(op_is_number)
235         DEFINE_OP(op_is_string)
236         DEFINE_OP(op_jeq_null)
237         DEFINE_OP(op_jfalse)
238         DEFINE_OP(op_jmp)
239         DEFINE_OP(op_jneq_null)
240         DEFINE_OP(op_jneq_ptr)
241         DEFINE_OP(op_jless)
242         DEFINE_OP(op_jlesseq)
243         DEFINE_OP(op_jgreater)
244         DEFINE_OP(op_jgreatereq)
245         DEFINE_OP(op_jnless)
246         DEFINE_OP(op_jnlesseq)
247         DEFINE_OP(op_jngreater)
248         DEFINE_OP(op_jngreatereq)
249         DEFINE_OP(op_jtrue)
250         DEFINE_OP(op_loop_hint)
251         DEFINE_OP(op_lshift)
252         DEFINE_OP(op_mod)
253         DEFINE_OP(op_mov)
254         DEFINE_OP(op_mul)
255         DEFINE_OP(op_negate)
256         DEFINE_OP(op_neq)
257         DEFINE_OP(op_neq_null)
258         DEFINE_OP(op_new_array)
259         DEFINE_OP(op_new_array_with_size)
260         DEFINE_OP(op_new_array_buffer)
261         DEFINE_OP(op_new_func)
262         DEFINE_OP(op_new_func_exp)
263         DEFINE_OP(op_new_object)
264         DEFINE_OP(op_new_regexp)
265         DEFINE_OP(op_next_pname)
266         DEFINE_OP(op_not)
267         DEFINE_OP(op_nstricteq)
268         DEFINE_OP(op_pop_scope)
269         DEFINE_OP(op_dec)
270         DEFINE_OP(op_inc)
271         DEFINE_OP(op_profile_did_call)
272         DEFINE_OP(op_profile_will_call)
273         DEFINE_OP(op_push_name_scope)
274         DEFINE_OP(op_push_with_scope)
275         case op_put_by_id_out_of_line:
276         case op_put_by_id_transition_direct:
277         case op_put_by_id_transition_normal:
278         case op_put_by_id_transition_direct_out_of_line:
279         case op_put_by_id_transition_normal_out_of_line:
280         DEFINE_OP(op_put_by_id)
281         DEFINE_OP(op_put_by_index)
282         case op_put_by_val_direct:
283         DEFINE_OP(op_put_by_val)
284         DEFINE_OP(op_put_getter_setter)
285         case op_init_global_const_nop:
286             NEXT_OPCODE(op_init_global_const_nop);
287         DEFINE_OP(op_init_global_const)
288
289         DEFINE_OP(op_ret)
290         DEFINE_OP(op_ret_object_or_this)
291         DEFINE_OP(op_rshift)
292         DEFINE_OP(op_urshift)
293         DEFINE_OP(op_strcat)
294         DEFINE_OP(op_stricteq)
295         DEFINE_OP(op_sub)
296         DEFINE_OP(op_switch_char)
297         DEFINE_OP(op_switch_imm)
298         DEFINE_OP(op_switch_string)
299         DEFINE_OP(op_tear_off_activation)
300         DEFINE_OP(op_tear_off_arguments)
301         DEFINE_OP(op_throw)
302         DEFINE_OP(op_throw_static_error)
303         DEFINE_OP(op_to_number)
304         DEFINE_OP(op_to_primitive)
305
306         DEFINE_OP(op_resolve_scope)
307         DEFINE_OP(op_get_from_scope)
308         DEFINE_OP(op_put_to_scope)
309
310         case op_get_by_id_chain:
311         case op_get_by_id_generic:
312         case op_get_by_id_proto:
313         case op_get_by_id_self:
314         case op_get_by_id_getter_chain:
315         case op_get_by_id_getter_proto:
316         case op_get_by_id_getter_self:
317         case op_get_by_id_custom_chain:
318         case op_get_by_id_custom_proto:
319         case op_get_by_id_custom_self:
320         case op_get_string_length:
321         case op_put_by_id_generic:
322         case op_put_by_id_replace:
323         case op_put_by_id_transition:
324             RELEASE_ASSERT_NOT_REACHED();
325         }
326     }
327
328     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
329
330 #ifndef NDEBUG
331     // Reset this, in order to guard its use with ASSERTs.
332     m_bytecodeOffset = (unsigned)-1;
333 #endif
334 }
335
336 void JIT::privateCompileLinkPass()
337 {
338     unsigned jmpTableCount = m_jmpTable.size();
339     for (unsigned i = 0; i < jmpTableCount; ++i)
340         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
341     m_jmpTable.clear();
342 }
343
344 void JIT::privateCompileSlowCases()
345 {
346     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
347
348     m_getByIdIndex = 0;
349     m_putByIdIndex = 0;
350     m_byValInstructionIndex = 0;
351     m_callLinkInfoIndex = 0;
352     
353 #if ENABLE(VALUE_PROFILER)
354     // Use this to assert that slow-path code associates new profiling sites with existing
355     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
356     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
357     // instructions and the slow-path executions. Furthermore, if the slow-path code created
358     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
359     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
360     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
361 #endif
362
363     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
364         m_bytecodeOffset = iter->to;
365
366         unsigned firstTo = m_bytecodeOffset;
367
368         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
369         
370 #if ENABLE(VALUE_PROFILER)
371         RareCaseProfile* rareCaseProfile = 0;
372         if (shouldEmitProfiling())
373             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
374 #endif
375
376 #if ENABLE(JIT_VERBOSE)
377         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
378 #endif
379         
380         if (m_disassembler)
381             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
382
383         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
384         DEFINE_SLOWCASE_OP(op_add)
385         DEFINE_SLOWCASE_OP(op_bitand)
386         DEFINE_SLOWCASE_OP(op_bitor)
387         DEFINE_SLOWCASE_OP(op_bitxor)
388         DEFINE_SLOWCASE_OP(op_call)
389         DEFINE_SLOWCASE_OP(op_call_eval)
390         DEFINE_SLOWCASE_OP(op_call_varargs)
391         DEFINE_SLOWCASE_OP(op_construct)
392         DEFINE_SLOWCASE_OP(op_to_this)
393         DEFINE_SLOWCASE_OP(op_create_this)
394         DEFINE_SLOWCASE_OP(op_div)
395         DEFINE_SLOWCASE_OP(op_eq)
396         DEFINE_SLOWCASE_OP(op_get_callee)
397         case op_get_by_id_out_of_line:
398         case op_get_array_length:
399         DEFINE_SLOWCASE_OP(op_get_by_id)
400         DEFINE_SLOWCASE_OP(op_get_arguments_length)
401         DEFINE_SLOWCASE_OP(op_get_by_val)
402         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
403         DEFINE_SLOWCASE_OP(op_get_by_pname)
404         DEFINE_SLOWCASE_OP(op_check_has_instance)
405         DEFINE_SLOWCASE_OP(op_instanceof)
406         DEFINE_SLOWCASE_OP(op_jfalse)
407         DEFINE_SLOWCASE_OP(op_jless)
408         DEFINE_SLOWCASE_OP(op_jlesseq)
409         DEFINE_SLOWCASE_OP(op_jgreater)
410         DEFINE_SLOWCASE_OP(op_jgreatereq)
411         DEFINE_SLOWCASE_OP(op_jnless)
412         DEFINE_SLOWCASE_OP(op_jnlesseq)
413         DEFINE_SLOWCASE_OP(op_jngreater)
414         DEFINE_SLOWCASE_OP(op_jngreatereq)
415         DEFINE_SLOWCASE_OP(op_jtrue)
416         DEFINE_SLOWCASE_OP(op_loop_hint)
417         DEFINE_SLOWCASE_OP(op_lshift)
418         DEFINE_SLOWCASE_OP(op_mod)
419         DEFINE_SLOWCASE_OP(op_mul)
420         DEFINE_SLOWCASE_OP(op_negate)
421         DEFINE_SLOWCASE_OP(op_neq)
422         DEFINE_SLOWCASE_OP(op_new_object)
423         DEFINE_SLOWCASE_OP(op_not)
424         DEFINE_SLOWCASE_OP(op_nstricteq)
425         DEFINE_SLOWCASE_OP(op_dec)
426         DEFINE_SLOWCASE_OP(op_inc)
427         case op_put_by_id_out_of_line:
428         case op_put_by_id_transition_direct:
429         case op_put_by_id_transition_normal:
430         case op_put_by_id_transition_direct_out_of_line:
431         case op_put_by_id_transition_normal_out_of_line:
432         DEFINE_SLOWCASE_OP(op_put_by_id)
433         case op_put_by_val_direct:
434         DEFINE_SLOWCASE_OP(op_put_by_val)
435         DEFINE_SLOWCASE_OP(op_rshift)
436         DEFINE_SLOWCASE_OP(op_urshift)
437         DEFINE_SLOWCASE_OP(op_stricteq)
438         DEFINE_SLOWCASE_OP(op_sub)
439         DEFINE_SLOWCASE_OP(op_to_number)
440         DEFINE_SLOWCASE_OP(op_to_primitive)
441
442         DEFINE_SLOWCASE_OP(op_resolve_scope)
443         DEFINE_SLOWCASE_OP(op_get_from_scope)
444         DEFINE_SLOWCASE_OP(op_put_to_scope)
445
446         default:
447             RELEASE_ASSERT_NOT_REACHED();
448         }
449
450         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
451         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
452         
453 #if ENABLE(VALUE_PROFILER)
454         if (shouldEmitProfiling())
455             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
456 #endif
457
458         emitJumpSlowToHot(jump(), 0);
459     }
460
461     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
462     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
463     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
464 #if ENABLE(VALUE_PROFILER)
465     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
466 #endif
467
468 #ifndef NDEBUG
469     // Reset this, in order to guard its use with ASSERTs.
470     m_bytecodeOffset = (unsigned)-1;
471 #endif
472 }
473
474 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
475 {
476 #if ENABLE(VALUE_PROFILER)
477     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
478     switch (level) {
479     case DFG::CannotCompile:
480         m_canBeOptimized = false;
481         m_canBeOptimizedOrInlined = false;
482         m_shouldEmitProfiling = false;
483         break;
484     case DFG::CanInline:
485         m_canBeOptimized = false;
486         m_canBeOptimizedOrInlined = true;
487         m_shouldEmitProfiling = true;
488         break;
489     case DFG::CanCompile:
490     case DFG::CanCompileAndInline:
491         m_canBeOptimized = true;
492         m_canBeOptimizedOrInlined = true;
493         m_shouldEmitProfiling = true;
494         break;
495     default:
496         RELEASE_ASSERT_NOT_REACHED();
497         break;
498     }
499     
500     switch (m_codeBlock->codeType()) {
501     case GlobalCode:
502     case EvalCode:
503         m_codeBlock->m_shouldAlwaysBeInlined = false;
504         break;
505     case FunctionCode:
506         // We could have already set it to false because we detected an uninlineable call.
507         // Don't override that observation.
508         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
509         break;
510     }
511 #endif
512     
513     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
514         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
515     if (m_vm->m_perBytecodeProfiler) {
516         m_compilation = adoptRef(
517             new Profiler::Compilation(
518                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
519                 Profiler::Baseline));
520         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
521     }
522     
523     if (m_disassembler)
524         m_disassembler->setStartOfCode(label());
525
526     // Just add a little bit of randomness to the codegen
527     if (m_randomGenerator.getUint32() & 1)
528         nop();
529
530     preserveReturnAddressAfterCall(regT2);
531     emitPutReturnPCToCallFrameHeader(regT2);
532     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
533
534     Label beginLabel(this);
535
536     sampleCodeBlock(m_codeBlock);
537 #if ENABLE(OPCODE_SAMPLING)
538     sampleInstruction(m_codeBlock->instructions().begin());
539 #endif
540
541     Jump stackCheck;
542     if (m_codeBlock->codeType() == FunctionCode) {
543 #if ENABLE(DFG_JIT)
544 #if DFG_ENABLE(SUCCESS_STATS)
545         static SamplingCounter counter("orignalJIT");
546         emitCount(counter);
547 #endif
548 #endif
549
550 #if ENABLE(VALUE_PROFILER)
551         ASSERT(m_bytecodeOffset == (unsigned)-1);
552         if (shouldEmitProfiling()) {
553             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
554                 // If this is a constructor, then we want to put in a dummy profiling site (to
555                 // keep things consistent) but we don't actually want to record the dummy value.
556                 if (m_codeBlock->m_isConstructor && !argument)
557                     continue;
558                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
559 #if USE(JSVALUE64)
560                 load64(Address(callFrameRegister, offset), regT0);
561 #elif USE(JSVALUE32_64)
562                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
563                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
564 #endif
565                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), regT4);
566             }
567         }
568 #endif
569
570         addPtr(TrustedImm32(virtualRegisterForLocal(m_codeBlock->m_numCalleeRegisters).offset() * sizeof(Register)), callFrameRegister, regT1);
571         stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
572     }
573
574     Label functionBody = label();
575     
576     privateCompileMainPass();
577     privateCompileLinkPass();
578     privateCompileSlowCases();
579     
580     if (m_disassembler)
581         m_disassembler->setEndOfSlowPath(label());
582
583     Label arityCheck;
584     if (m_codeBlock->codeType() == FunctionCode) {
585         stackCheck.link(this);
586         m_bytecodeOffset = 0;
587         callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
588 #ifndef NDEBUG
589         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
590 #endif
591         jump(functionBody);
592
593         arityCheck = label();
594         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
595         preserveReturnAddressAfterCall(regT2);
596         emitPutReturnPCToCallFrameHeader(regT2);
597         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
598
599         load32(payloadFor(JSStack::ArgumentCount), regT1);
600         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
601
602         m_bytecodeOffset = 0;
603
604         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
605         if (returnValueGPR != regT0)
606             move(returnValueGPR, regT0);
607         branchTest32(Zero, regT0).linkTo(beginLabel, this);
608         emitNakedCall(m_vm->getCTIStub(arityFixup).code());
609
610 #if !ASSERT_DISABLED
611         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
612 #endif
613
614         jump(beginLabel);
615     }
616
617     ASSERT(m_jmpTable.isEmpty());
618     
619     privateCompileExceptionHandlers();
620     
621     if (m_disassembler)
622         m_disassembler->setEndOfCode(label());
623
624     LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
625     if (patchBuffer.didFailToAllocate())
626         return CompilationFailed;
627
628     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
629     for (unsigned i = 0; i < m_switches.size(); ++i) {
630         SwitchRecord record = m_switches[i];
631         unsigned bytecodeOffset = record.bytecodeOffset;
632
633         if (record.type != SwitchRecord::String) {
634             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
635             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
636
637             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
638
639             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
640                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
641                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
642             }
643         } else {
644             ASSERT(record.type == SwitchRecord::String);
645
646             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
647
648             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
649             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
650                 unsigned offset = it->value.branchOffset;
651                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
652             }
653         }
654     }
655
656     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
657         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
658         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
659     }
660
661     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
662         if (iter->to)
663             patchBuffer.link(iter->from, FunctionPtr(iter->to));
664     }
665
666     for (unsigned i = m_getByIds.size(); i--;)
667         m_getByIds[i].finalize(patchBuffer);
668     for (unsigned i = m_putByIds.size(); i--;)
669         m_putByIds[i].finalize(patchBuffer);
670
671     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
672     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
673         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
674         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
675         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
676         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
677         
678         m_codeBlock->byValInfo(i) = ByValInfo(
679             m_byValCompilationInfo[i].bytecodeIndex,
680             badTypeJump,
681             m_byValCompilationInfo[i].arrayMode,
682             differenceBetweenCodePtr(badTypeJump, doneTarget),
683             differenceBetweenCodePtr(returnAddress, slowPathTarget));
684     }
685     m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
686     for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
687         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
688         info.callType = m_callStructureStubCompilationInfo[i].callType;
689         info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
690         info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
691         info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
692         info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
693         info.calleeGPR = regT0;
694     }
695
696 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
697     if (canBeOptimizedOrInlined()
698 #if ENABLE(LLINT)
699         || true
700 #endif
701         ) {
702         CompactJITCodeMap::Encoder jitCodeMapEncoder;
703         for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
704             if (m_labels[bytecodeOffset].isSet())
705                 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
706         }
707         m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
708     }
709 #endif
710
711     MacroAssemblerCodePtr withArityCheck;
712     if (m_codeBlock->codeType() == FunctionCode)
713         withArityCheck = patchBuffer.locationOf(arityCheck);
714
715     if (Options::showDisassembly())
716         m_disassembler->dump(patchBuffer);
717     if (m_compilation) {
718         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
719         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
720     }
721     
722     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
723     
724     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
725         static_cast<double>(result.size()) /
726         static_cast<double>(m_codeBlock->instructions().size()));
727     
728     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
729     m_codeBlock->setJITCode(
730         adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)),
731         withArityCheck);
732     
733 #if ENABLE(JIT_VERBOSE)
734     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
735 #endif
736     
737     return CompilationSuccessful;
738 }
739
740 void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
741 {
742     RepatchBuffer repatchBuffer(callerCodeBlock);
743
744     ASSERT(!callLinkInfo->isLinked());
745     callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
746     callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
747     repatchBuffer.relink(callLinkInfo->hotPathOther, code);
748
749     if (calleeCodeBlock)
750         calleeCodeBlock->linkIncomingCall(exec, callLinkInfo);
751
752     // Patch the slow patch so we do not continue to try to link.
753     if (kind == CodeForCall) {
754         ASSERT(callLinkInfo->callType == CallLinkInfo::Call
755                || callLinkInfo->callType == CallLinkInfo::CallVarargs);
756         if (callLinkInfo->callType == CallLinkInfo::Call) {
757             repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
758             return;
759         }
760
761         repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
762         return;
763     }
764
765     ASSERT(kind == CodeForConstruct);
766     repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
767 }
768
769 void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
770 {
771     RepatchBuffer repatchBuffer(callerCodeBlock);
772
773     repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code());
774 }
775
776 void JIT::privateCompileExceptionHandlers()
777 {
778     if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
779         return;
780
781     Jump doLookup;
782
783     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
784         m_exceptionChecksWithCallFrameRollback.link(this);
785         emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0);
786         doLookup = jump();
787     }
788
789     if (!m_exceptionChecks.empty())
790         m_exceptionChecks.link(this);
791     
792     // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
793     move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
794
795     if (doLookup.isSet())
796         doLookup.link(this);
797
798 #if CPU(X86)
799     // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
800     poke(GPRInfo::argumentGPR0);
801 #endif
802     m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
803     jumpToExceptionHandler();
804 }
805
806
807 } // namespace JSC
808
809 #endif // ENABLE(JIT)