Add "get scope" byte code
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "ArityCheckFailReturnThunks.h"
33 #include "CodeBlock.h"
34 #include "DFGCapabilities.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
37 #include "JITOperations.h"
38 #include "JSArray.h"
39 #include "JSFunction.h"
40 #include "LinkBuffer.h"
41 #include "MaxFrameExtentForSlowPathCall.h"
42 #include "JSCInlines.h"
43 #include "ProfilerDatabase.h"
44 #include "RepatchBuffer.h"
45 #include "ResultType.h"
46 #include "SamplingTool.h"
47 #include "SlowPathCall.h"
48 #include "StackAlignment.h"
49 #include "TypeProfilerLog.h"
50 #include <wtf/CryptographicallyRandomNumber.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
57 {
58     RepatchBuffer repatchBuffer(codeblock);
59     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 }
61
62 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
63 {
64     RepatchBuffer repatchBuffer(codeblock);
65     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 }
67
68 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
69 {
70     RepatchBuffer repatchBuffer(codeblock);
71     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 }
73
74 JIT::JIT(VM* vm, CodeBlock* codeBlock)
75     : JSInterfaceJIT(vm, codeBlock)
76     , m_interpreter(vm->interpreter)
77     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
78     , m_bytecodeOffset((unsigned)-1)
79     , m_getByIdIndex(UINT_MAX)
80     , m_putByIdIndex(UINT_MAX)
81     , m_byValInstructionIndex(UINT_MAX)
82     , m_callLinkInfoIndex(UINT_MAX)
83     , m_randomGenerator(cryptographicallyRandomNumber())
84     , m_canBeOptimized(false)
85     , m_shouldEmitProfiling(false)
86 {
87 }
88
89 #if ENABLE(DFG_JIT)
90 void JIT::emitEnterOptimizationCheck()
91 {
92     if (!canBeOptimized())
93         return;
94
95     JumpList skipOptimize;
96     
97     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
98     ASSERT(!m_bytecodeOffset);
99     callOperation(operationOptimize, m_bytecodeOffset);
100     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
101     move(returnValueGPR2, stackPointerRegister);
102     jump(returnValueGPR);
103     skipOptimize.link(this);
104 }
105 #endif
106
107 void JIT::assertStackPointerOffset()
108 {
109     if (ASSERT_DISABLED)
110         return;
111     
112     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
113     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
114     breakpoint();
115     ok.link(this);
116 }
117
118 #define NEXT_OPCODE(name) \
119     m_bytecodeOffset += OPCODE_LENGTH(name); \
120     break;
121
122 #define DEFINE_SLOW_OP(name) \
123     case op_##name: { \
124         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
125         slowPathCall.call(); \
126         NEXT_OPCODE(op_##name); \
127     }
128
129 #define DEFINE_OP(name) \
130     case name: { \
131         emit_##name(currentInstruction); \
132         NEXT_OPCODE(name); \
133     }
134
135 #define DEFINE_SLOWCASE_OP(name) \
136     case name: { \
137         emitSlow_##name(currentInstruction, iter); \
138         NEXT_OPCODE(name); \
139     }
140
141 void JIT::privateCompileMainPass()
142 {
143     jitAssertTagsInPlace();
144     jitAssertArgumentCountSane();
145     
146     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
147     unsigned instructionCount = m_codeBlock->instructions().size();
148
149     m_callLinkInfoIndex = 0;
150
151     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
152         if (m_disassembler)
153             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
154         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
155         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
156
157 #if ENABLE(OPCODE_SAMPLING)
158         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
159             sampleInstruction(currentInstruction);
160 #endif
161
162         m_labels[m_bytecodeOffset] = label();
163
164 #if ENABLE(JIT_VERBOSE)
165         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
166 #endif
167         
168         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
169
170         if (m_compilation) {
171             add64(
172                 TrustedImm32(1),
173                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
174                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
175         }
176         
177         if (Options::eagerlyUpdateTopCallFrame())
178             updateTopCallFrame();
179         
180         switch (opcodeID) {
181         DEFINE_SLOW_OP(del_by_val)
182         DEFINE_SLOW_OP(in)
183         DEFINE_SLOW_OP(less)
184         DEFINE_SLOW_OP(lesseq)
185         DEFINE_SLOW_OP(greater)
186         DEFINE_SLOW_OP(greatereq)
187         DEFINE_SLOW_OP(is_function)
188         DEFINE_SLOW_OP(is_object)
189         DEFINE_SLOW_OP(typeof)
190
191         DEFINE_OP(op_touch_entry)
192         DEFINE_OP(op_add)
193         DEFINE_OP(op_bitand)
194         DEFINE_OP(op_bitor)
195         DEFINE_OP(op_bitxor)
196         DEFINE_OP(op_call)
197         DEFINE_OP(op_call_eval)
198         DEFINE_OP(op_call_varargs)
199         DEFINE_OP(op_construct_varargs)
200         DEFINE_OP(op_catch)
201         DEFINE_OP(op_construct)
202         DEFINE_OP(op_get_callee)
203         DEFINE_OP(op_create_this)
204         DEFINE_OP(op_to_this)
205         DEFINE_OP(op_init_lazy_reg)
206         DEFINE_OP(op_create_arguments)
207         DEFINE_OP(op_debug)
208         DEFINE_OP(op_del_by_id)
209         DEFINE_OP(op_div)
210         DEFINE_OP(op_end)
211         DEFINE_OP(op_enter)
212         DEFINE_OP(op_create_lexical_environment)
213         DEFINE_OP(op_get_scope)
214         DEFINE_OP(op_eq)
215         DEFINE_OP(op_eq_null)
216         case op_get_by_id_out_of_line:
217         case op_get_array_length:
218         DEFINE_OP(op_get_by_id)
219         DEFINE_OP(op_get_arguments_length)
220         DEFINE_OP(op_get_by_val)
221         DEFINE_OP(op_get_argument_by_val)
222         DEFINE_OP(op_check_has_instance)
223         DEFINE_OP(op_instanceof)
224         DEFINE_OP(op_is_undefined)
225         DEFINE_OP(op_is_boolean)
226         DEFINE_OP(op_is_number)
227         DEFINE_OP(op_is_string)
228         DEFINE_OP(op_jeq_null)
229         DEFINE_OP(op_jfalse)
230         DEFINE_OP(op_jmp)
231         DEFINE_OP(op_jneq_null)
232         DEFINE_OP(op_jneq_ptr)
233         DEFINE_OP(op_jless)
234         DEFINE_OP(op_jlesseq)
235         DEFINE_OP(op_jgreater)
236         DEFINE_OP(op_jgreatereq)
237         DEFINE_OP(op_jnless)
238         DEFINE_OP(op_jnlesseq)
239         DEFINE_OP(op_jngreater)
240         DEFINE_OP(op_jngreatereq)
241         DEFINE_OP(op_jtrue)
242         DEFINE_OP(op_loop_hint)
243         DEFINE_OP(op_lshift)
244         DEFINE_OP(op_mod)
245         DEFINE_OP(op_mov)
246         DEFINE_OP(op_mul)
247         DEFINE_OP(op_negate)
248         DEFINE_OP(op_neq)
249         DEFINE_OP(op_neq_null)
250         DEFINE_OP(op_new_array)
251         DEFINE_OP(op_new_array_with_size)
252         DEFINE_OP(op_new_array_buffer)
253         DEFINE_OP(op_new_func)
254         DEFINE_OP(op_new_func_exp)
255         DEFINE_OP(op_new_object)
256         DEFINE_OP(op_new_regexp)
257         DEFINE_OP(op_not)
258         DEFINE_OP(op_nstricteq)
259         DEFINE_OP(op_pop_scope)
260         DEFINE_OP(op_dec)
261         DEFINE_OP(op_inc)
262         DEFINE_OP(op_profile_did_call)
263         DEFINE_OP(op_profile_will_call)
264         DEFINE_OP(op_profile_type)
265         DEFINE_OP(op_push_name_scope)
266         DEFINE_OP(op_push_with_scope)
267         case op_put_by_id_out_of_line:
268         case op_put_by_id_transition_direct:
269         case op_put_by_id_transition_normal:
270         case op_put_by_id_transition_direct_out_of_line:
271         case op_put_by_id_transition_normal_out_of_line:
272         DEFINE_OP(op_put_by_id)
273         DEFINE_OP(op_put_by_index)
274         case op_put_by_val_direct:
275         DEFINE_OP(op_put_by_val)
276         DEFINE_OP(op_put_getter_setter)
277         case op_init_global_const_nop:
278             NEXT_OPCODE(op_init_global_const_nop);
279         DEFINE_OP(op_init_global_const)
280
281         DEFINE_OP(op_ret)
282         DEFINE_OP(op_ret_object_or_this)
283         DEFINE_OP(op_rshift)
284         DEFINE_OP(op_unsigned)
285         DEFINE_OP(op_urshift)
286         DEFINE_OP(op_strcat)
287         DEFINE_OP(op_stricteq)
288         DEFINE_OP(op_sub)
289         DEFINE_OP(op_switch_char)
290         DEFINE_OP(op_switch_imm)
291         DEFINE_OP(op_switch_string)
292         DEFINE_OP(op_tear_off_arguments)
293         DEFINE_OP(op_throw)
294         DEFINE_OP(op_throw_static_error)
295         DEFINE_OP(op_to_number)
296         DEFINE_OP(op_to_primitive)
297
298         DEFINE_OP(op_resolve_scope)
299         DEFINE_OP(op_get_from_scope)
300         DEFINE_OP(op_put_to_scope)
301
302         DEFINE_OP(op_get_enumerable_length)
303         DEFINE_OP(op_has_generic_property)
304         DEFINE_OP(op_has_structure_property)
305         DEFINE_OP(op_has_indexed_property)
306         DEFINE_OP(op_get_direct_pname)
307         DEFINE_OP(op_get_structure_property_enumerator)
308         DEFINE_OP(op_get_generic_property_enumerator)
309         DEFINE_OP(op_next_enumerator_pname)
310         DEFINE_OP(op_to_index_string)
311         default:
312             RELEASE_ASSERT_NOT_REACHED();
313         }
314     }
315
316     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
317
318 #ifndef NDEBUG
319     // Reset this, in order to guard its use with ASSERTs.
320     m_bytecodeOffset = (unsigned)-1;
321 #endif
322 }
323
324 void JIT::privateCompileLinkPass()
325 {
326     unsigned jmpTableCount = m_jmpTable.size();
327     for (unsigned i = 0; i < jmpTableCount; ++i)
328         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
329     m_jmpTable.clear();
330 }
331
332 void JIT::privateCompileSlowCases()
333 {
334     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
335
336     m_getByIdIndex = 0;
337     m_putByIdIndex = 0;
338     m_byValInstructionIndex = 0;
339     m_callLinkInfoIndex = 0;
340     
341     // Use this to assert that slow-path code associates new profiling sites with existing
342     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
343     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
344     // instructions and the slow-path executions. Furthermore, if the slow-path code created
345     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
346     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
347     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
348
349     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
350         m_bytecodeOffset = iter->to;
351
352         unsigned firstTo = m_bytecodeOffset;
353
354         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
355         
356         RareCaseProfile* rareCaseProfile = 0;
357         if (shouldEmitProfiling())
358             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
359
360 #if ENABLE(JIT_VERBOSE)
361         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
362 #endif
363         
364         if (m_disassembler)
365             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
366
367         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
368         DEFINE_SLOWCASE_OP(op_add)
369         DEFINE_SLOWCASE_OP(op_bitand)
370         DEFINE_SLOWCASE_OP(op_bitor)
371         DEFINE_SLOWCASE_OP(op_bitxor)
372         DEFINE_SLOWCASE_OP(op_call)
373         DEFINE_SLOWCASE_OP(op_call_eval)
374         DEFINE_SLOWCASE_OP(op_call_varargs)
375         DEFINE_SLOWCASE_OP(op_construct_varargs)
376         DEFINE_SLOWCASE_OP(op_construct)
377         DEFINE_SLOWCASE_OP(op_to_this)
378         DEFINE_SLOWCASE_OP(op_create_this)
379         DEFINE_SLOWCASE_OP(op_div)
380         DEFINE_SLOWCASE_OP(op_eq)
381         DEFINE_SLOWCASE_OP(op_get_callee)
382         case op_get_by_id_out_of_line:
383         case op_get_array_length:
384         DEFINE_SLOWCASE_OP(op_get_by_id)
385         DEFINE_SLOWCASE_OP(op_get_arguments_length)
386         DEFINE_SLOWCASE_OP(op_get_by_val)
387         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
388         DEFINE_SLOWCASE_OP(op_check_has_instance)
389         DEFINE_SLOWCASE_OP(op_instanceof)
390         DEFINE_SLOWCASE_OP(op_jfalse)
391         DEFINE_SLOWCASE_OP(op_jless)
392         DEFINE_SLOWCASE_OP(op_jlesseq)
393         DEFINE_SLOWCASE_OP(op_jgreater)
394         DEFINE_SLOWCASE_OP(op_jgreatereq)
395         DEFINE_SLOWCASE_OP(op_jnless)
396         DEFINE_SLOWCASE_OP(op_jnlesseq)
397         DEFINE_SLOWCASE_OP(op_jngreater)
398         DEFINE_SLOWCASE_OP(op_jngreatereq)
399         DEFINE_SLOWCASE_OP(op_jtrue)
400         DEFINE_SLOWCASE_OP(op_loop_hint)
401         DEFINE_SLOWCASE_OP(op_lshift)
402         DEFINE_SLOWCASE_OP(op_mod)
403         DEFINE_SLOWCASE_OP(op_mul)
404         DEFINE_SLOWCASE_OP(op_negate)
405         DEFINE_SLOWCASE_OP(op_neq)
406         DEFINE_SLOWCASE_OP(op_new_object)
407         DEFINE_SLOWCASE_OP(op_not)
408         DEFINE_SLOWCASE_OP(op_nstricteq)
409         DEFINE_SLOWCASE_OP(op_dec)
410         DEFINE_SLOWCASE_OP(op_inc)
411         case op_put_by_id_out_of_line:
412         case op_put_by_id_transition_direct:
413         case op_put_by_id_transition_normal:
414         case op_put_by_id_transition_direct_out_of_line:
415         case op_put_by_id_transition_normal_out_of_line:
416         DEFINE_SLOWCASE_OP(op_put_by_id)
417         case op_put_by_val_direct:
418         DEFINE_SLOWCASE_OP(op_put_by_val)
419         DEFINE_SLOWCASE_OP(op_rshift)
420         DEFINE_SLOWCASE_OP(op_unsigned)
421         DEFINE_SLOWCASE_OP(op_urshift)
422         DEFINE_SLOWCASE_OP(op_stricteq)
423         DEFINE_SLOWCASE_OP(op_sub)
424         DEFINE_SLOWCASE_OP(op_to_number)
425         DEFINE_SLOWCASE_OP(op_to_primitive)
426         DEFINE_SLOWCASE_OP(op_has_indexed_property)
427         DEFINE_SLOWCASE_OP(op_has_structure_property)
428         DEFINE_SLOWCASE_OP(op_get_direct_pname)
429
430         DEFINE_SLOWCASE_OP(op_resolve_scope)
431         DEFINE_SLOWCASE_OP(op_get_from_scope)
432         DEFINE_SLOWCASE_OP(op_put_to_scope)
433
434         default:
435             RELEASE_ASSERT_NOT_REACHED();
436         }
437
438         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
439         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
440         
441         if (shouldEmitProfiling())
442             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
443
444         emitJumpSlowToHot(jump(), 0);
445     }
446
447     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
448     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
449     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
450     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
451
452 #ifndef NDEBUG
453     // Reset this, in order to guard its use with ASSERTs.
454     m_bytecodeOffset = (unsigned)-1;
455 #endif
456 }
457
458 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
459 {
460     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
461     switch (level) {
462     case DFG::CannotCompile:
463         m_canBeOptimized = false;
464         m_canBeOptimizedOrInlined = false;
465         m_shouldEmitProfiling = false;
466         break;
467     case DFG::CanInline:
468         m_canBeOptimized = false;
469         m_canBeOptimizedOrInlined = true;
470         m_shouldEmitProfiling = true;
471         break;
472     case DFG::CanCompile:
473     case DFG::CanCompileAndInline:
474         m_canBeOptimized = true;
475         m_canBeOptimizedOrInlined = true;
476         m_shouldEmitProfiling = true;
477         break;
478     default:
479         RELEASE_ASSERT_NOT_REACHED();
480         break;
481     }
482     
483     switch (m_codeBlock->codeType()) {
484     case GlobalCode:
485     case EvalCode:
486         m_codeBlock->m_shouldAlwaysBeInlined = false;
487         break;
488     case FunctionCode:
489         // We could have already set it to false because we detected an uninlineable call.
490         // Don't override that observation.
491         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
492         break;
493     }
494
495     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
496     if (m_vm->typeProfiler())
497         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
498     
499     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
500         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
501     if (m_vm->m_perBytecodeProfiler) {
502         m_compilation = adoptRef(
503             new Profiler::Compilation(
504                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
505                 Profiler::Baseline));
506         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
507     }
508     
509     if (m_disassembler)
510         m_disassembler->setStartOfCode(label());
511
512     // Just add a little bit of randomness to the codegen
513     if (m_randomGenerator.getUint32() & 1)
514         nop();
515
516     emitFunctionPrologue();
517     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
518
519     Label beginLabel(this);
520
521     sampleCodeBlock(m_codeBlock);
522 #if ENABLE(OPCODE_SAMPLING)
523     sampleInstruction(m_codeBlock->instructions().begin());
524 #endif
525
526     Jump stackOverflow;
527     if (m_codeBlock->codeType() == FunctionCode) {
528         ASSERT(m_bytecodeOffset == (unsigned)-1);
529         if (shouldEmitProfiling()) {
530             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
531                 // If this is a constructor, then we want to put in a dummy profiling site (to
532                 // keep things consistent) but we don't actually want to record the dummy value.
533                 if (m_codeBlock->m_isConstructor && !argument)
534                     continue;
535                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
536 #if USE(JSVALUE64)
537                 load64(Address(callFrameRegister, offset), regT0);
538 #elif USE(JSVALUE32_64)
539                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
540                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
541 #endif
542                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
543             }
544         }
545
546         addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
547         stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
548     }
549
550     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
551     checkStackPointerAlignment();
552
553     privateCompileMainPass();
554     privateCompileLinkPass();
555     privateCompileSlowCases();
556     
557     if (m_disassembler)
558         m_disassembler->setEndOfSlowPath(label());
559
560     Label arityCheck;
561     if (m_codeBlock->codeType() == FunctionCode) {
562         stackOverflow.link(this);
563         m_bytecodeOffset = 0;
564         if (maxFrameExtentForSlowPathCall)
565             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
566         callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
567
568         arityCheck = label();
569         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
570         emitFunctionPrologue();
571         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
572
573         load32(payloadFor(JSStack::ArgumentCount), regT1);
574         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
575
576         m_bytecodeOffset = 0;
577
578         if (maxFrameExtentForSlowPathCall)
579             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
580         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
581         if (maxFrameExtentForSlowPathCall)
582             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
583         if (returnValueGPR != regT0)
584             move(returnValueGPR, regT0);
585         branchTest32(Zero, regT0).linkTo(beginLabel, this);
586         GPRReg thunkReg;
587 #if USE(JSVALUE64)
588         thunkReg = GPRInfo::regT7;
589 #else
590         thunkReg = GPRInfo::regT5;
591 #endif
592         move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg);
593         loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg);
594         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
595
596 #if !ASSERT_DISABLED
597         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
598 #endif
599
600         jump(beginLabel);
601     }
602
603     ASSERT(m_jmpTable.isEmpty());
604     
605     privateCompileExceptionHandlers();
606     
607     if (m_disassembler)
608         m_disassembler->setEndOfCode(label());
609
610     LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
611     if (patchBuffer.didFailToAllocate())
612         return CompilationFailed;
613
614     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
615     for (unsigned i = 0; i < m_switches.size(); ++i) {
616         SwitchRecord record = m_switches[i];
617         unsigned bytecodeOffset = record.bytecodeOffset;
618
619         if (record.type != SwitchRecord::String) {
620             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
621             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
622
623             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
624
625             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
626                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
627                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
628             }
629         } else {
630             ASSERT(record.type == SwitchRecord::String);
631
632             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
633
634             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
635             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
636                 unsigned offset = it->value.branchOffset;
637                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
638             }
639         }
640     }
641
642     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
643         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
644         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
645     }
646
647     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
648         if (iter->to)
649             patchBuffer.link(iter->from, FunctionPtr(iter->to));
650     }
651
652     for (unsigned i = m_getByIds.size(); i--;)
653         m_getByIds[i].finalize(patchBuffer);
654     for (unsigned i = m_putByIds.size(); i--;)
655         m_putByIds[i].finalize(patchBuffer);
656
657     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
658     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
659         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
660         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
661         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
662         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
663         
664         m_codeBlock->byValInfo(i) = ByValInfo(
665             m_byValCompilationInfo[i].bytecodeIndex,
666             badTypeJump,
667             m_byValCompilationInfo[i].arrayMode,
668             differenceBetweenCodePtr(badTypeJump, doneTarget),
669             differenceBetweenCodePtr(returnAddress, slowPathTarget));
670     }
671     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
672         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
673         CallLinkInfo& info = *compilationInfo.callLinkInfo;
674         info.callReturnLocation = patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation);
675         info.hotPathBegin = patchBuffer.locationOf(compilationInfo.hotPathBegin);
676         info.hotPathOther = patchBuffer.locationOfNearCall(compilationInfo.hotPathOther);
677     }
678
679     CompactJITCodeMap::Encoder jitCodeMapEncoder;
680     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
681         if (m_labels[bytecodeOffset].isSet())
682             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
683     }
684     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
685
686     MacroAssemblerCodePtr withArityCheck;
687     if (m_codeBlock->codeType() == FunctionCode)
688         withArityCheck = patchBuffer.locationOf(arityCheck);
689
690     if (Options::showDisassembly())
691         m_disassembler->dump(patchBuffer);
692     if (m_compilation) {
693         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
694         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
695     }
696     
697     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
698     
699     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
700         static_cast<double>(result.size()) /
701         static_cast<double>(m_codeBlock->instructions().size()));
702     
703     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
704     m_codeBlock->setJITCode(
705         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
706     
707 #if ENABLE(JIT_VERBOSE)
708     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
709 #endif
710     
711     return CompilationSuccessful;
712 }
713
714 void JIT::privateCompileExceptionHandlers()
715 {
716     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
717         m_exceptionChecksWithCallFrameRollback.link(this);
718
719         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
720
721         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
722         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
723
724 #if CPU(X86)
725         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
726         poke(GPRInfo::argumentGPR0);
727         poke(GPRInfo::argumentGPR1, 1);
728 #endif
729         m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
730         jumpToExceptionHandler();
731     }
732
733     if (!m_exceptionChecks.empty()) {
734         m_exceptionChecks.link(this);
735
736         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
737         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
738         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
739
740 #if CPU(X86)
741         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
742         poke(GPRInfo::argumentGPR0);
743         poke(GPRInfo::argumentGPR1, 1);
744 #endif
745         m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
746         jumpToExceptionHandler();
747     }
748 }
749
750 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
751 {
752     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
753
754     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
755 }
756
757 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
758 {
759     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
760 }
761
762 } // namespace JSC
763
764 #endif // ENABLE(JIT)