baseline JIT should emit better code for UnresolvedProperty in resolve_scope/get_from...
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "ArityCheckFailReturnThunks.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGCapabilities.h"
36 #include "Interpreter.h"
37 #include "JITInlines.h"
38 #include "JITOperations.h"
39 #include "JSArray.h"
40 #include "JSFunction.h"
41 #include "LinkBuffer.h"
42 #include "MaxFrameExtentForSlowPathCall.h"
43 #include "JSCInlines.h"
44 #include "ProfilerDatabase.h"
45 #include "ResultType.h"
46 #include "SamplingTool.h"
47 #include "SlowPathCall.h"
48 #include "StackAlignment.h"
49 #include "TypeProfilerLog.h"
50 #include <wtf/CryptographicallyRandomNumber.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
57 {
58     MacroAssembler::repatchCall(
59         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
60         newCalleeFunction);
61 }
62
63 JIT::JIT(VM* vm, CodeBlock* codeBlock)
64     : JSInterfaceJIT(vm, codeBlock)
65     , m_interpreter(vm->interpreter)
66     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
67     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
68     , m_getByIdIndex(UINT_MAX)
69     , m_putByIdIndex(UINT_MAX)
70     , m_byValInstructionIndex(UINT_MAX)
71     , m_callLinkInfoIndex(UINT_MAX)
72     , m_randomGenerator(cryptographicallyRandomNumber())
73     , m_canBeOptimized(false)
74     , m_shouldEmitProfiling(false)
75 {
76 }
77
78 #if ENABLE(DFG_JIT)
79 void JIT::emitEnterOptimizationCheck()
80 {
81     if (!canBeOptimized())
82         return;
83
84     JumpList skipOptimize;
85     
86     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
87     ASSERT(!m_bytecodeOffset);
88     callOperation(operationOptimize, m_bytecodeOffset);
89     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
90     move(returnValueGPR2, stackPointerRegister);
91     jump(returnValueGPR);
92     skipOptimize.link(this);
93 }
94 #endif
95
96 void JIT::emitNotifyWrite(WatchpointSet* set)
97 {
98     if (!set || set->state() == IsInvalidated)
99         return;
100     
101     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
102 }
103
104 void JIT::emitNotifyWrite(GPRReg pointerToSet)
105 {
106     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
107 }
108
109 void JIT::assertStackPointerOffset()
110 {
111     if (ASSERT_DISABLED)
112         return;
113     
114     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
115     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
116     breakpoint();
117     ok.link(this);
118 }
119
120 #define NEXT_OPCODE(name) \
121     m_bytecodeOffset += OPCODE_LENGTH(name); \
122     break;
123
124 #define DEFINE_SLOW_OP(name) \
125     case op_##name: { \
126         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
127         slowPathCall.call(); \
128         NEXT_OPCODE(op_##name); \
129     }
130
131 #define DEFINE_OP(name) \
132     case name: { \
133         emit_##name(currentInstruction); \
134         NEXT_OPCODE(name); \
135     }
136
137 #define DEFINE_SLOWCASE_OP(name) \
138     case name: { \
139         emitSlow_##name(currentInstruction, iter); \
140         NEXT_OPCODE(name); \
141     }
142
143 void JIT::privateCompileMainPass()
144 {
145     jitAssertTagsInPlace();
146     jitAssertArgumentCountSane();
147     
148     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
149     unsigned instructionCount = m_codeBlock->instructions().size();
150
151     m_callLinkInfoIndex = 0;
152
153     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
154         if (m_disassembler)
155             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
156         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
157         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
158
159 #if ENABLE(OPCODE_SAMPLING)
160         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
161             sampleInstruction(currentInstruction);
162 #endif
163
164         m_labels[m_bytecodeOffset] = label();
165
166 #if ENABLE(JIT_VERBOSE)
167         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
168 #endif
169         
170         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
171
172         if (m_compilation) {
173             add64(
174                 TrustedImm32(1),
175                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
176                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
177         }
178         
179         if (Options::eagerlyUpdateTopCallFrame())
180             updateTopCallFrame();
181         
182         switch (opcodeID) {
183         DEFINE_SLOW_OP(del_by_val)
184         DEFINE_SLOW_OP(in)
185         DEFINE_SLOW_OP(less)
186         DEFINE_SLOW_OP(lesseq)
187         DEFINE_SLOW_OP(greater)
188         DEFINE_SLOW_OP(greatereq)
189         DEFINE_SLOW_OP(is_function)
190         DEFINE_SLOW_OP(is_object_or_null)
191         DEFINE_SLOW_OP(typeof)
192
193         DEFINE_OP(op_add)
194         DEFINE_OP(op_bitand)
195         DEFINE_OP(op_bitor)
196         DEFINE_OP(op_bitxor)
197         DEFINE_OP(op_call)
198         DEFINE_OP(op_call_eval)
199         DEFINE_OP(op_call_varargs)
200         DEFINE_OP(op_construct_varargs)
201         DEFINE_OP(op_catch)
202         DEFINE_OP(op_construct)
203         DEFINE_OP(op_create_this)
204         DEFINE_OP(op_to_this)
205         DEFINE_OP(op_create_direct_arguments)
206         DEFINE_OP(op_create_scoped_arguments)
207         DEFINE_OP(op_create_out_of_band_arguments)
208         DEFINE_OP(op_check_tdz)
209         DEFINE_OP(op_debug)
210         DEFINE_OP(op_del_by_id)
211         DEFINE_OP(op_div)
212         DEFINE_OP(op_end)
213         DEFINE_OP(op_enter)
214         DEFINE_OP(op_get_scope)
215         DEFINE_OP(op_load_arrowfunction_this)
216         DEFINE_OP(op_eq)
217         DEFINE_OP(op_eq_null)
218         case op_get_by_id_out_of_line:
219         case op_get_array_length:
220         DEFINE_OP(op_get_by_id)
221         DEFINE_OP(op_get_by_val)
222         DEFINE_OP(op_check_has_instance)
223         DEFINE_OP(op_instanceof)
224         DEFINE_OP(op_is_undefined)
225         DEFINE_OP(op_is_boolean)
226         DEFINE_OP(op_is_number)
227         DEFINE_OP(op_is_string)
228         DEFINE_OP(op_is_object)
229         DEFINE_OP(op_jeq_null)
230         DEFINE_OP(op_jfalse)
231         DEFINE_OP(op_jmp)
232         DEFINE_OP(op_jneq_null)
233         DEFINE_OP(op_jneq_ptr)
234         DEFINE_OP(op_jless)
235         DEFINE_OP(op_jlesseq)
236         DEFINE_OP(op_jgreater)
237         DEFINE_OP(op_jgreatereq)
238         DEFINE_OP(op_jnless)
239         DEFINE_OP(op_jnlesseq)
240         DEFINE_OP(op_jngreater)
241         DEFINE_OP(op_jngreatereq)
242         DEFINE_OP(op_jtrue)
243         DEFINE_OP(op_loop_hint)
244         DEFINE_OP(op_lshift)
245         DEFINE_OP(op_mod)
246         DEFINE_OP(op_mov)
247         DEFINE_OP(op_mul)
248         DEFINE_OP(op_negate)
249         DEFINE_OP(op_neq)
250         DEFINE_OP(op_neq_null)
251         DEFINE_OP(op_new_array)
252         DEFINE_OP(op_new_array_with_size)
253         DEFINE_OP(op_new_array_buffer)
254         DEFINE_OP(op_new_func)
255         DEFINE_OP(op_new_func_exp)
256         DEFINE_OP(op_new_arrow_func_exp) 
257         DEFINE_OP(op_new_object)
258         DEFINE_OP(op_new_regexp)
259         DEFINE_OP(op_not)
260         DEFINE_OP(op_nstricteq)
261         DEFINE_OP(op_dec)
262         DEFINE_OP(op_inc)
263         DEFINE_OP(op_profile_did_call)
264         DEFINE_OP(op_profile_will_call)
265         DEFINE_OP(op_profile_type)
266         DEFINE_OP(op_profile_control_flow)
267         DEFINE_OP(op_push_with_scope)
268         DEFINE_OP(op_create_lexical_environment)
269         DEFINE_OP(op_get_parent_scope)
270         case op_put_by_id_out_of_line:
271         case op_put_by_id_transition_direct:
272         case op_put_by_id_transition_normal:
273         case op_put_by_id_transition_direct_out_of_line:
274         case op_put_by_id_transition_normal_out_of_line:
275         DEFINE_OP(op_put_by_id)
276         DEFINE_OP(op_put_by_index)
277         case op_put_by_val_direct:
278         DEFINE_OP(op_put_by_val)
279         DEFINE_OP(op_put_getter_by_id)
280         DEFINE_OP(op_put_setter_by_id)
281         DEFINE_OP(op_put_getter_setter)
282
283         DEFINE_OP(op_ret)
284         DEFINE_OP(op_rshift)
285         DEFINE_OP(op_unsigned)
286         DEFINE_OP(op_urshift)
287         DEFINE_OP(op_strcat)
288         DEFINE_OP(op_stricteq)
289         DEFINE_OP(op_sub)
290         DEFINE_OP(op_switch_char)
291         DEFINE_OP(op_switch_imm)
292         DEFINE_OP(op_switch_string)
293         DEFINE_OP(op_throw)
294         DEFINE_OP(op_throw_static_error)
295         DEFINE_OP(op_to_number)
296         DEFINE_OP(op_to_string)
297         DEFINE_OP(op_to_primitive)
298
299         DEFINE_OP(op_resolve_scope)
300         DEFINE_OP(op_get_from_scope)
301         DEFINE_OP(op_put_to_scope)
302         DEFINE_OP(op_get_from_arguments)
303         DEFINE_OP(op_put_to_arguments)
304
305         DEFINE_OP(op_get_enumerable_length)
306         DEFINE_OP(op_has_generic_property)
307         DEFINE_OP(op_has_structure_property)
308         DEFINE_OP(op_has_indexed_property)
309         DEFINE_OP(op_get_direct_pname)
310         DEFINE_OP(op_get_property_enumerator)
311         DEFINE_OP(op_enumerator_structure_pname)
312         DEFINE_OP(op_enumerator_generic_pname)
313         DEFINE_OP(op_to_index_string)
314         default:
315             RELEASE_ASSERT_NOT_REACHED();
316         }
317     }
318
319     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
320
321 #ifndef NDEBUG
322     // Reset this, in order to guard its use with ASSERTs.
323     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
324 #endif
325 }
326
327 void JIT::privateCompileLinkPass()
328 {
329     unsigned jmpTableCount = m_jmpTable.size();
330     for (unsigned i = 0; i < jmpTableCount; ++i)
331         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
332     m_jmpTable.clear();
333 }
334
335 void JIT::privateCompileSlowCases()
336 {
337     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
338
339     m_getByIdIndex = 0;
340     m_putByIdIndex = 0;
341     m_byValInstructionIndex = 0;
342     m_callLinkInfoIndex = 0;
343     
344     // Use this to assert that slow-path code associates new profiling sites with existing
345     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
346     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
347     // instructions and the slow-path executions. Furthermore, if the slow-path code created
348     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
349     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
350     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
351
352     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
353         m_bytecodeOffset = iter->to;
354
355         unsigned firstTo = m_bytecodeOffset;
356
357         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
358         
359         RareCaseProfile* rareCaseProfile = 0;
360         if (shouldEmitProfiling())
361             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
362
363 #if ENABLE(JIT_VERBOSE)
364         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
365 #endif
366         
367         if (m_disassembler)
368             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
369
370         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
371         DEFINE_SLOWCASE_OP(op_add)
372         DEFINE_SLOWCASE_OP(op_bitand)
373         DEFINE_SLOWCASE_OP(op_bitor)
374         DEFINE_SLOWCASE_OP(op_bitxor)
375         DEFINE_SLOWCASE_OP(op_call)
376         DEFINE_SLOWCASE_OP(op_call_eval)
377         DEFINE_SLOWCASE_OP(op_call_varargs)
378         DEFINE_SLOWCASE_OP(op_construct_varargs)
379         DEFINE_SLOWCASE_OP(op_construct)
380         DEFINE_SLOWCASE_OP(op_to_this)
381         DEFINE_SLOWCASE_OP(op_check_tdz)
382         DEFINE_SLOWCASE_OP(op_create_this)
383         DEFINE_SLOWCASE_OP(op_div)
384         DEFINE_SLOWCASE_OP(op_eq)
385         case op_get_by_id_out_of_line:
386         case op_get_array_length:
387         DEFINE_SLOWCASE_OP(op_get_by_id)
388         DEFINE_SLOWCASE_OP(op_get_by_val)
389         DEFINE_SLOWCASE_OP(op_check_has_instance)
390         DEFINE_SLOWCASE_OP(op_instanceof)
391         DEFINE_SLOWCASE_OP(op_jfalse)
392         DEFINE_SLOWCASE_OP(op_jless)
393         DEFINE_SLOWCASE_OP(op_jlesseq)
394         DEFINE_SLOWCASE_OP(op_jgreater)
395         DEFINE_SLOWCASE_OP(op_jgreatereq)
396         DEFINE_SLOWCASE_OP(op_jnless)
397         DEFINE_SLOWCASE_OP(op_jnlesseq)
398         DEFINE_SLOWCASE_OP(op_jngreater)
399         DEFINE_SLOWCASE_OP(op_jngreatereq)
400         DEFINE_SLOWCASE_OP(op_jtrue)
401         DEFINE_SLOWCASE_OP(op_loop_hint)
402         DEFINE_SLOWCASE_OP(op_lshift)
403         DEFINE_SLOWCASE_OP(op_mod)
404         DEFINE_SLOWCASE_OP(op_mul)
405         DEFINE_SLOWCASE_OP(op_negate)
406         DEFINE_SLOWCASE_OP(op_neq)
407         DEFINE_SLOWCASE_OP(op_new_object)
408         DEFINE_SLOWCASE_OP(op_not)
409         DEFINE_SLOWCASE_OP(op_nstricteq)
410         DEFINE_SLOWCASE_OP(op_dec)
411         DEFINE_SLOWCASE_OP(op_inc)
412         case op_put_by_id_out_of_line:
413         case op_put_by_id_transition_direct:
414         case op_put_by_id_transition_normal:
415         case op_put_by_id_transition_direct_out_of_line:
416         case op_put_by_id_transition_normal_out_of_line:
417         DEFINE_SLOWCASE_OP(op_put_by_id)
418         case op_put_by_val_direct:
419         DEFINE_SLOWCASE_OP(op_put_by_val)
420         DEFINE_SLOWCASE_OP(op_rshift)
421         DEFINE_SLOWCASE_OP(op_unsigned)
422         DEFINE_SLOWCASE_OP(op_urshift)
423         DEFINE_SLOWCASE_OP(op_stricteq)
424         DEFINE_SLOWCASE_OP(op_sub)
425         DEFINE_SLOWCASE_OP(op_to_number)
426         DEFINE_SLOWCASE_OP(op_to_string)
427         DEFINE_SLOWCASE_OP(op_to_primitive)
428         DEFINE_SLOWCASE_OP(op_has_indexed_property)
429         DEFINE_SLOWCASE_OP(op_has_structure_property)
430         DEFINE_SLOWCASE_OP(op_get_direct_pname)
431
432         DEFINE_SLOWCASE_OP(op_resolve_scope)
433         DEFINE_SLOWCASE_OP(op_get_from_scope)
434         DEFINE_SLOWCASE_OP(op_put_to_scope)
435
436         default:
437             RELEASE_ASSERT_NOT_REACHED();
438         }
439
440         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
441         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
442         
443         if (shouldEmitProfiling())
444             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
445
446         emitJumpSlowToHot(jump(), 0);
447     }
448
449     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
450     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
451     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
452     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
453
454 #ifndef NDEBUG
455     // Reset this, in order to guard its use with ASSERTs.
456     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
457 #endif
458 }
459
460 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
461 {
462     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
463     switch (level) {
464     case DFG::CannotCompile:
465         m_canBeOptimized = false;
466         m_canBeOptimizedOrInlined = false;
467         m_shouldEmitProfiling = false;
468         break;
469     case DFG::CanCompile:
470     case DFG::CanCompileAndInline:
471         m_canBeOptimized = true;
472         m_canBeOptimizedOrInlined = true;
473         m_shouldEmitProfiling = true;
474         break;
475     default:
476         RELEASE_ASSERT_NOT_REACHED();
477         break;
478     }
479     
480     switch (m_codeBlock->codeType()) {
481     case GlobalCode:
482     case ModuleCode:
483     case EvalCode:
484         m_codeBlock->m_shouldAlwaysBeInlined = false;
485         break;
486     case FunctionCode:
487         // We could have already set it to false because we detected an uninlineable call.
488         // Don't override that observation.
489         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
490         break;
491     }
492
493     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
494     if (m_vm->typeProfiler())
495         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
496     
497     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
498         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
499     if (m_vm->m_perBytecodeProfiler) {
500         m_compilation = adoptRef(
501             new Profiler::Compilation(
502                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
503                 Profiler::Baseline));
504         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
505     }
506     
507     if (m_disassembler)
508         m_disassembler->setStartOfCode(label());
509
510     // Just add a little bit of randomness to the codegen
511     if (m_randomGenerator.getUint32() & 1)
512         nop();
513
514     emitFunctionPrologue();
515     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
516
517     Label beginLabel(this);
518
519     sampleCodeBlock(m_codeBlock);
520 #if ENABLE(OPCODE_SAMPLING)
521     sampleInstruction(m_codeBlock->instructions().begin());
522 #endif
523
524     if (m_codeBlock->codeType() == FunctionCode) {
525         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
526         if (shouldEmitProfiling()) {
527             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
528                 // If this is a constructor, then we want to put in a dummy profiling site (to
529                 // keep things consistent) but we don't actually want to record the dummy value.
530                 if (m_codeBlock->m_isConstructor && !argument)
531                     continue;
532                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
533 #if USE(JSVALUE64)
534                 load64(Address(callFrameRegister, offset), regT0);
535 #elif USE(JSVALUE32_64)
536                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
537                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
538 #endif
539                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
540             }
541         }
542     }
543
544     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
545     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
546
547     move(regT1, stackPointerRegister);
548     checkStackPointerAlignment();
549
550     privateCompileMainPass();
551     privateCompileLinkPass();
552     privateCompileSlowCases();
553     
554     if (m_disassembler)
555         m_disassembler->setEndOfSlowPath(label());
556
557     stackOverflow.link(this);
558     m_bytecodeOffset = 0;
559     if (maxFrameExtentForSlowPathCall)
560         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
561     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
562
563     Label arityCheck;
564     if (m_codeBlock->codeType() == FunctionCode) {
565         arityCheck = label();
566         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
567         emitFunctionPrologue();
568         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
569
570         load32(payloadFor(JSStack::ArgumentCount), regT1);
571         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
572
573         m_bytecodeOffset = 0;
574
575         if (maxFrameExtentForSlowPathCall)
576             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
577         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
578         if (maxFrameExtentForSlowPathCall)
579             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
580         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
581         GPRReg thunkReg = GPRInfo::argumentGPR1;
582         CodeLocationLabel* failThunkLabels =
583             m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
584         move(TrustedImmPtr(failThunkLabels), thunkReg);
585         loadPtr(BaseIndex(thunkReg, returnValueGPR, timesPtr()), thunkReg);
586         move(returnValueGPR, GPRInfo::argumentGPR0);
587         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
588
589 #if !ASSERT_DISABLED
590         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
591 #endif
592
593         jump(beginLabel);
594     }
595
596     ASSERT(m_jmpTable.isEmpty());
597     
598     privateCompileExceptionHandlers();
599     
600     if (m_disassembler)
601         m_disassembler->setEndOfCode(label());
602
603     LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
604     if (patchBuffer.didFailToAllocate())
605         return CompilationFailed;
606
607     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
608     for (unsigned i = 0; i < m_switches.size(); ++i) {
609         SwitchRecord record = m_switches[i];
610         unsigned bytecodeOffset = record.bytecodeOffset;
611
612         if (record.type != SwitchRecord::String) {
613             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
614             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
615
616             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
617
618             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
619                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
620                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
621             }
622         } else {
623             ASSERT(record.type == SwitchRecord::String);
624
625             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
626
627             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
628             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
629                 unsigned offset = it->value.branchOffset;
630                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
631             }
632         }
633     }
634
635     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
636         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
637         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
638     }
639
640     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
641         if (iter->to)
642             patchBuffer.link(iter->from, FunctionPtr(iter->to));
643     }
644
645     for (unsigned i = m_getByIds.size(); i--;)
646         m_getByIds[i].finalize(patchBuffer);
647     for (unsigned i = m_putByIds.size(); i--;)
648         m_putByIds[i].finalize(patchBuffer);
649
650     for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
651         PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
652         CodeLocationJump notIndexJump = CodeLocationJump();
653         if (Jump(patchableNotIndexJump).isSet())
654             notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
655         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
656         CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
657         CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
658         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
659         CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
660
661         *byValCompilationInfo.byValInfo = ByValInfo(
662             byValCompilationInfo.bytecodeIndex,
663             notIndexJump,
664             badTypeJump,
665             byValCompilationInfo.arrayMode,
666             byValCompilationInfo.arrayProfile,
667             differenceBetweenCodePtr(badTypeJump, doneTarget),
668             differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
669             differenceBetweenCodePtr(returnAddress, slowPathTarget));
670     }
671     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
672         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
673         CallLinkInfo& info = *compilationInfo.callLinkInfo;
674         info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation),
675             patchBuffer.locationOf(compilationInfo.hotPathBegin),
676             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
677     }
678
679     CompactJITCodeMap::Encoder jitCodeMapEncoder;
680     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
681         if (m_labels[bytecodeOffset].isSet())
682             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
683     }
684     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
685
686     MacroAssemblerCodePtr withArityCheck;
687     if (m_codeBlock->codeType() == FunctionCode)
688         withArityCheck = patchBuffer.locationOf(arityCheck);
689
690     if (Options::showDisassembly()) {
691         m_disassembler->dump(patchBuffer);
692         patchBuffer.didAlreadyDisassemble();
693     }
694     if (m_compilation) {
695         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
696         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
697     }
698     
699     CodeRef result = FINALIZE_CODE(
700         patchBuffer,
701         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
702     
703     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
704         static_cast<double>(result.size()) /
705         static_cast<double>(m_codeBlock->instructions().size()));
706     
707     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
708     m_codeBlock->setJITCode(
709         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
710     
711 #if ENABLE(JIT_VERBOSE)
712     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
713 #endif
714     
715     return CompilationSuccessful;
716 }
717
718 void JIT::privateCompileExceptionHandlers()
719 {
720     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
721         m_exceptionChecksWithCallFrameRollback.link(this);
722
723         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
724
725         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
726         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
727
728 #if CPU(X86)
729         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
730         poke(GPRInfo::argumentGPR0);
731         poke(GPRInfo::argumentGPR1, 1);
732 #endif
733         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
734         jumpToExceptionHandler();
735     }
736
737     if (!m_exceptionChecks.empty()) {
738         m_exceptionChecks.link(this);
739
740         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
741         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
742         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
743
744 #if CPU(X86)
745         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
746         poke(GPRInfo::argumentGPR0);
747         poke(GPRInfo::argumentGPR1, 1);
748 #endif
749         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
750         jumpToExceptionHandler();
751     }
752 }
753
754 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
755 {
756     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
757
758     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
759 }
760
761 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
762 {
763     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
764 }
765
766 } // namespace JSC
767
768 #endif // ENABLE(JIT)