The DFG should be able to tier-up and OSR enter into the FTL
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
35 #endif
36
37 #include "CodeBlock.h"
38 #include "DFGCapabilities.h"
39 #include "Interpreter.h"
40 #include "JITInlines.h"
41 #include "JITStubCall.h"
42 #include "JSArray.h"
43 #include "JSFunction.h"
44 #include "LinkBuffer.h"
45 #include "Operations.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
49 #include "SlowPathCall.h"
50 #include <wtf/CryptographicallyRandomNumber.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
57 {
58     RepatchBuffer repatchBuffer(codeblock);
59     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 }
61
62 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
63 {
64     RepatchBuffer repatchBuffer(codeblock);
65     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 }
67
68 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
69 {
70     RepatchBuffer repatchBuffer(codeblock);
71     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 }
73
74 JIT::JIT(VM* vm, CodeBlock* codeBlock)
75     : m_interpreter(vm->interpreter)
76     , m_vm(vm)
77     , m_codeBlock(codeBlock)
78     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
79     , m_bytecodeOffset((unsigned)-1)
80     , m_propertyAccessInstructionIndex(UINT_MAX)
81     , m_byValInstructionIndex(UINT_MAX)
82     , m_callLinkInfoIndex(UINT_MAX)
83 #if USE(JSVALUE32_64)
84     , m_jumpTargetIndex(0)
85     , m_mappedBytecodeOffset((unsigned)-1)
86     , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
87     , m_mappedTag((RegisterID)-1)
88     , m_mappedPayload((RegisterID)-1)
89 #else
90     , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
91     , m_jumpTargetsPosition(0)
92 #endif
93     , m_randomGenerator(cryptographicallyRandomNumber())
94 #if ENABLE(VALUE_PROFILER)
95     , m_canBeOptimized(false)
96     , m_shouldEmitProfiling(false)
97 #endif
98 {
99 }
100
101 #if ENABLE(DFG_JIT)
102 void JIT::emitEnterOptimizationCheck()
103 {
104     if (!canBeOptimized())
105         return;
106
107     Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
108     JITStubCall stubCall(this, cti_optimize);
109     stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
110     ASSERT(!m_bytecodeOffset);
111     stubCall.call();
112     skipOptimize.link(this);
113 }
114 #endif
115
116 #define NEXT_OPCODE(name) \
117     m_bytecodeOffset += OPCODE_LENGTH(name); \
118     break;
119
120 #if USE(JSVALUE32_64)
121 #define DEFINE_BINARY_OP(name) \
122     case op_##name: { \
123         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
124         slowPathCall.call(); \
125         NEXT_OPCODE(op_##name); \
126     }
127
128 #define DEFINE_UNARY_OP(name) \
129     case op_##name: { \
130         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
131         slowPathCall.call(); \
132         NEXT_OPCODE(op_##name); \
133     }
134
135 #else // USE(JSVALUE32_64)
136
137 #define DEFINE_BINARY_OP(name) \
138     case op_##name: { \
139         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
140         slowPathCall.call(); \
141         NEXT_OPCODE(op_##name); \
142     }
143
144 #define DEFINE_UNARY_OP(name) \
145     case op_##name: { \
146         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
147         slowPathCall.call(); \
148         NEXT_OPCODE(op_##name); \
149     }
150 #endif // USE(JSVALUE32_64)
151
152 #define DEFINE_OP(name) \
153     case name: { \
154         emit_##name(currentInstruction); \
155         NEXT_OPCODE(name); \
156     }
157
158 #define DEFINE_SLOWCASE_OP(name) \
159     case name: { \
160         emitSlow_##name(currentInstruction, iter); \
161         NEXT_OPCODE(name); \
162     }
163
164 void JIT::privateCompileMainPass()
165 {
166     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
167     unsigned instructionCount = m_codeBlock->instructions().size();
168
169     m_callLinkInfoIndex = 0;
170
171     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
172         if (m_disassembler)
173             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
174         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
175         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
176
177 #if ENABLE(OPCODE_SAMPLING)
178         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
179             sampleInstruction(currentInstruction);
180 #endif
181
182 #if USE(JSVALUE64)
183         if (atJumpTarget())
184             killLastResultRegister();
185 #endif
186
187         m_labels[m_bytecodeOffset] = label();
188
189 #if ENABLE(JIT_VERBOSE)
190         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
191 #endif
192         
193         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
194
195         if (m_compilation) {
196             add64(
197                 TrustedImm32(1),
198                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
199                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
200         }
201
202         switch (opcodeID) {
203         DEFINE_BINARY_OP(del_by_val)
204         DEFINE_BINARY_OP(in)
205         DEFINE_BINARY_OP(less)
206         DEFINE_BINARY_OP(lesseq)
207         DEFINE_BINARY_OP(greater)
208         DEFINE_BINARY_OP(greatereq)
209         DEFINE_UNARY_OP(is_function)
210         DEFINE_UNARY_OP(is_object)
211         DEFINE_UNARY_OP(typeof)
212
213         DEFINE_OP(op_add)
214         DEFINE_OP(op_bitand)
215         DEFINE_OP(op_bitor)
216         DEFINE_OP(op_bitxor)
217         DEFINE_OP(op_call)
218         DEFINE_OP(op_call_eval)
219         DEFINE_OP(op_call_varargs)
220         DEFINE_OP(op_catch)
221         DEFINE_OP(op_construct)
222         DEFINE_OP(op_get_callee)
223         DEFINE_OP(op_create_this)
224         DEFINE_OP(op_to_this)
225         DEFINE_OP(op_init_lazy_reg)
226         DEFINE_OP(op_create_arguments)
227         DEFINE_OP(op_debug)
228         DEFINE_OP(op_del_by_id)
229         DEFINE_OP(op_div)
230         DEFINE_OP(op_end)
231         DEFINE_OP(op_enter)
232         DEFINE_OP(op_create_activation)
233         DEFINE_OP(op_eq)
234         DEFINE_OP(op_eq_null)
235         case op_get_by_id_out_of_line:
236         case op_get_array_length:
237         DEFINE_OP(op_get_by_id)
238         DEFINE_OP(op_get_arguments_length)
239         DEFINE_OP(op_get_by_val)
240         DEFINE_OP(op_get_argument_by_val)
241         DEFINE_OP(op_get_by_pname)
242         DEFINE_OP(op_get_pnames)
243         DEFINE_OP(op_check_has_instance)
244         DEFINE_OP(op_instanceof)
245         DEFINE_OP(op_is_undefined)
246         DEFINE_OP(op_is_boolean)
247         DEFINE_OP(op_is_number)
248         DEFINE_OP(op_is_string)
249         DEFINE_OP(op_jeq_null)
250         DEFINE_OP(op_jfalse)
251         DEFINE_OP(op_jmp)
252         DEFINE_OP(op_jneq_null)
253         DEFINE_OP(op_jneq_ptr)
254         DEFINE_OP(op_jless)
255         DEFINE_OP(op_jlesseq)
256         DEFINE_OP(op_jgreater)
257         DEFINE_OP(op_jgreatereq)
258         DEFINE_OP(op_jnless)
259         DEFINE_OP(op_jnlesseq)
260         DEFINE_OP(op_jngreater)
261         DEFINE_OP(op_jngreatereq)
262         DEFINE_OP(op_jtrue)
263         DEFINE_OP(op_loop_hint)
264         DEFINE_OP(op_lshift)
265         DEFINE_OP(op_mod)
266         DEFINE_OP(op_mov)
267         DEFINE_OP(op_mul)
268         DEFINE_OP(op_negate)
269         DEFINE_OP(op_neq)
270         DEFINE_OP(op_neq_null)
271         DEFINE_OP(op_new_array)
272         DEFINE_OP(op_new_array_with_size)
273         DEFINE_OP(op_new_array_buffer)
274         DEFINE_OP(op_new_func)
275         DEFINE_OP(op_new_func_exp)
276         DEFINE_OP(op_new_object)
277         DEFINE_OP(op_new_regexp)
278         DEFINE_OP(op_next_pname)
279         DEFINE_OP(op_not)
280         DEFINE_OP(op_nstricteq)
281         DEFINE_OP(op_pop_scope)
282         DEFINE_OP(op_dec)
283         DEFINE_OP(op_inc)
284         DEFINE_OP(op_profile_did_call)
285         DEFINE_OP(op_profile_will_call)
286         DEFINE_OP(op_push_name_scope)
287         DEFINE_OP(op_push_with_scope)
288         case op_put_by_id_out_of_line:
289         case op_put_by_id_transition_direct:
290         case op_put_by_id_transition_normal:
291         case op_put_by_id_transition_direct_out_of_line:
292         case op_put_by_id_transition_normal_out_of_line:
293         DEFINE_OP(op_put_by_id)
294         DEFINE_OP(op_put_by_index)
295         DEFINE_OP(op_put_by_val)
296         DEFINE_OP(op_put_getter_setter)
297         case op_init_global_const_nop:
298             NEXT_OPCODE(op_init_global_const_nop);
299         DEFINE_OP(op_init_global_const)
300
301         DEFINE_OP(op_ret)
302         DEFINE_OP(op_ret_object_or_this)
303         DEFINE_OP(op_rshift)
304         DEFINE_OP(op_urshift)
305         DEFINE_OP(op_strcat)
306         DEFINE_OP(op_stricteq)
307         DEFINE_OP(op_sub)
308         DEFINE_OP(op_switch_char)
309         DEFINE_OP(op_switch_imm)
310         DEFINE_OP(op_switch_string)
311         DEFINE_OP(op_tear_off_activation)
312         DEFINE_OP(op_tear_off_arguments)
313         DEFINE_OP(op_throw)
314         DEFINE_OP(op_throw_static_error)
315         DEFINE_OP(op_to_number)
316         DEFINE_OP(op_to_primitive)
317
318         DEFINE_OP(op_resolve_scope)
319         DEFINE_OP(op_get_from_scope)
320         DEFINE_OP(op_put_to_scope)
321
322         case op_get_by_id_chain:
323         case op_get_by_id_generic:
324         case op_get_by_id_proto:
325         case op_get_by_id_self:
326         case op_get_by_id_getter_chain:
327         case op_get_by_id_getter_proto:
328         case op_get_by_id_getter_self:
329         case op_get_by_id_custom_chain:
330         case op_get_by_id_custom_proto:
331         case op_get_by_id_custom_self:
332         case op_get_string_length:
333         case op_put_by_id_generic:
334         case op_put_by_id_replace:
335         case op_put_by_id_transition:
336             RELEASE_ASSERT_NOT_REACHED();
337         }
338     }
339
340     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
341
342 #ifndef NDEBUG
343     // Reset this, in order to guard its use with ASSERTs.
344     m_bytecodeOffset = (unsigned)-1;
345 #endif
346 }
347
348 void JIT::privateCompileLinkPass()
349 {
350     unsigned jmpTableCount = m_jmpTable.size();
351     for (unsigned i = 0; i < jmpTableCount; ++i)
352         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
353     m_jmpTable.clear();
354 }
355
356 void JIT::privateCompileSlowCases()
357 {
358     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
359
360     m_propertyAccessInstructionIndex = 0;
361     m_byValInstructionIndex = 0;
362     m_callLinkInfoIndex = 0;
363     
364 #if ENABLE(VALUE_PROFILER)
365     // Use this to assert that slow-path code associates new profiling sites with existing
366     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
367     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
368     // instructions and the slow-path executions. Furthermore, if the slow-path code created
369     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
370     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
371     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
372 #endif
373
374     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
375 #if USE(JSVALUE64)
376         killLastResultRegister();
377 #endif
378
379         m_bytecodeOffset = iter->to;
380
381         unsigned firstTo = m_bytecodeOffset;
382
383         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
384         
385 #if ENABLE(VALUE_PROFILER)
386         RareCaseProfile* rareCaseProfile = 0;
387         if (shouldEmitProfiling())
388             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
389 #endif
390
391 #if ENABLE(JIT_VERBOSE)
392         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
393 #endif
394         
395         if (m_disassembler)
396             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
397
398         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
399         DEFINE_SLOWCASE_OP(op_add)
400         DEFINE_SLOWCASE_OP(op_bitand)
401         DEFINE_SLOWCASE_OP(op_bitor)
402         DEFINE_SLOWCASE_OP(op_bitxor)
403         DEFINE_SLOWCASE_OP(op_call)
404         DEFINE_SLOWCASE_OP(op_call_eval)
405         DEFINE_SLOWCASE_OP(op_call_varargs)
406         DEFINE_SLOWCASE_OP(op_construct)
407         DEFINE_SLOWCASE_OP(op_to_this)
408         DEFINE_SLOWCASE_OP(op_create_this)
409         DEFINE_SLOWCASE_OP(op_div)
410         DEFINE_SLOWCASE_OP(op_eq)
411         case op_get_by_id_out_of_line:
412         case op_get_array_length:
413         DEFINE_SLOWCASE_OP(op_get_by_id)
414         DEFINE_SLOWCASE_OP(op_get_arguments_length)
415         DEFINE_SLOWCASE_OP(op_get_by_val)
416         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
417         DEFINE_SLOWCASE_OP(op_get_by_pname)
418         DEFINE_SLOWCASE_OP(op_check_has_instance)
419         DEFINE_SLOWCASE_OP(op_instanceof)
420         DEFINE_SLOWCASE_OP(op_jfalse)
421         DEFINE_SLOWCASE_OP(op_jless)
422         DEFINE_SLOWCASE_OP(op_jlesseq)
423         DEFINE_SLOWCASE_OP(op_jgreater)
424         DEFINE_SLOWCASE_OP(op_jgreatereq)
425         DEFINE_SLOWCASE_OP(op_jnless)
426         DEFINE_SLOWCASE_OP(op_jnlesseq)
427         DEFINE_SLOWCASE_OP(op_jngreater)
428         DEFINE_SLOWCASE_OP(op_jngreatereq)
429         DEFINE_SLOWCASE_OP(op_jtrue)
430         DEFINE_SLOWCASE_OP(op_loop_hint)
431         DEFINE_SLOWCASE_OP(op_lshift)
432         DEFINE_SLOWCASE_OP(op_mod)
433         DEFINE_SLOWCASE_OP(op_mul)
434         DEFINE_SLOWCASE_OP(op_negate)
435         DEFINE_SLOWCASE_OP(op_neq)
436         DEFINE_SLOWCASE_OP(op_new_object)
437         DEFINE_SLOWCASE_OP(op_not)
438         DEFINE_SLOWCASE_OP(op_nstricteq)
439         DEFINE_SLOWCASE_OP(op_dec)
440         DEFINE_SLOWCASE_OP(op_inc)
441         case op_put_by_id_out_of_line:
442         case op_put_by_id_transition_direct:
443         case op_put_by_id_transition_normal:
444         case op_put_by_id_transition_direct_out_of_line:
445         case op_put_by_id_transition_normal_out_of_line:
446         DEFINE_SLOWCASE_OP(op_put_by_id)
447         DEFINE_SLOWCASE_OP(op_put_by_val)
448         DEFINE_SLOWCASE_OP(op_rshift)
449         DEFINE_SLOWCASE_OP(op_urshift)
450         DEFINE_SLOWCASE_OP(op_stricteq)
451         DEFINE_SLOWCASE_OP(op_sub)
452         DEFINE_SLOWCASE_OP(op_to_number)
453         DEFINE_SLOWCASE_OP(op_to_primitive)
454
455         DEFINE_SLOWCASE_OP(op_resolve_scope)
456         DEFINE_SLOWCASE_OP(op_get_from_scope)
457         DEFINE_SLOWCASE_OP(op_put_to_scope)
458
459         default:
460             RELEASE_ASSERT_NOT_REACHED();
461         }
462
463         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
464         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
465         
466 #if ENABLE(VALUE_PROFILER)
467         if (shouldEmitProfiling())
468             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
469 #endif
470
471         emitJumpSlowToHot(jump(), 0);
472     }
473
474     RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
475     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
476 #if ENABLE(VALUE_PROFILER)
477     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
478 #endif
479
480 #ifndef NDEBUG
481     // Reset this, in order to guard its use with ASSERTs.
482     m_bytecodeOffset = (unsigned)-1;
483 #endif
484 }
485
486 ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
487 {
488     ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
489     info.codeOrigin = CodeOrigin(bytecodeIndex);
490     info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
491     info.hotPathBegin = linkBuffer.locationOf(hotPathBegin);
492
493     switch (m_type) {
494     case GetById: {
495         CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
496         info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
497         info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
498         info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
499 #if USE(JSVALUE64)
500         info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
501 #else
502         info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1));
503         info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2));
504 #endif
505         info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult));
506         info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation));
507         break;
508     }
509     case PutById:
510         CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
511         info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
512         info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
513 #if USE(JSVALUE64)
514         info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
515 #else
516         info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1));
517         info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2));
518 #endif
519         break;
520     }
521 }
522
523 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
524 {
525 #if ENABLE(VALUE_PROFILER)
526     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
527     switch (level) {
528     case DFG::CannotCompile:
529         m_canBeOptimized = false;
530         m_canBeOptimizedOrInlined = false;
531         m_shouldEmitProfiling = false;
532         break;
533     case DFG::CanInline:
534         m_canBeOptimized = false;
535         m_canBeOptimizedOrInlined = true;
536         m_shouldEmitProfiling = true;
537         break;
538     case DFG::CanCompile:
539     case DFG::CanCompileAndInline:
540         m_canBeOptimized = true;
541         m_canBeOptimizedOrInlined = true;
542         m_shouldEmitProfiling = true;
543         break;
544     default:
545         RELEASE_ASSERT_NOT_REACHED();
546         break;
547     }
548     
549     switch (m_codeBlock->codeType()) {
550     case GlobalCode:
551     case EvalCode:
552         m_codeBlock->m_shouldAlwaysBeInlined = false;
553         break;
554     case FunctionCode:
555         // We could have already set it to false because we detected an uninlineable call.
556         // Don't override that observation.
557         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
558         break;
559     }
560 #endif
561     
562     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
563         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
564     if (m_vm->m_perBytecodeProfiler) {
565         m_compilation = adoptRef(
566             new Profiler::Compilation(
567                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
568                 Profiler::Baseline));
569         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
570     }
571     
572     if (m_disassembler)
573         m_disassembler->setStartOfCode(label());
574
575     // Just add a little bit of randomness to the codegen
576     if (m_randomGenerator.getUint32() & 1)
577         nop();
578
579     preserveReturnAddressAfterCall(regT2);
580     emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
581     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
582
583     Label beginLabel(this);
584
585     sampleCodeBlock(m_codeBlock);
586 #if ENABLE(OPCODE_SAMPLING)
587     sampleInstruction(m_codeBlock->instructions().begin());
588 #endif
589
590     Jump stackCheck;
591     if (m_codeBlock->codeType() == FunctionCode) {
592 #if ENABLE(DFG_JIT)
593 #if DFG_ENABLE(SUCCESS_STATS)
594         static SamplingCounter counter("orignalJIT");
595         emitCount(counter);
596 #endif
597 #endif
598
599 #if ENABLE(VALUE_PROFILER)
600         ASSERT(m_bytecodeOffset == (unsigned)-1);
601         if (shouldEmitProfiling()) {
602             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
603                 // If this is a constructor, then we want to put in a dummy profiling site (to
604                 // keep things consistent) but we don't actually want to record the dummy value.
605                 if (m_codeBlock->m_isConstructor && !argument)
606                     continue;
607                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
608 #if USE(JSVALUE64)
609                 load64(Address(callFrameRegister, offset), regT0);
610 #elif USE(JSVALUE32_64)
611                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
612                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
613 #endif
614                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), regT4);
615             }
616         }
617 #endif
618
619         addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
620         stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1);
621     }
622
623     Label functionBody = label();
624     
625     privateCompileMainPass();
626     privateCompileLinkPass();
627     privateCompileSlowCases();
628     
629     if (m_disassembler)
630         m_disassembler->setEndOfSlowPath(label());
631
632     Label arityCheck;
633     if (m_codeBlock->codeType() == FunctionCode) {
634         stackCheck.link(this);
635         m_bytecodeOffset = 0;
636         JITStubCall(this, cti_stack_check).call();
637 #ifndef NDEBUG
638         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
639 #endif
640         jump(functionBody);
641
642         arityCheck = label();
643         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
644         preserveReturnAddressAfterCall(regT2);
645         emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
646         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
647
648         load32(payloadFor(JSStack::ArgumentCount), regT1);
649         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
650
651         m_bytecodeOffset = 0;
652
653         JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(regT0);
654         branchTest32(Zero, regT0).linkTo(beginLabel, this);
655         emitNakedCall(m_vm->getCTIStub(arityFixup).code());
656
657 #if !ASSERT_DISABLED
658         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
659 #endif
660
661         jump(beginLabel);
662     }
663
664     ASSERT(m_jmpTable.isEmpty());
665     
666     if (m_disassembler)
667         m_disassembler->setEndOfCode(label());
668
669     LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
670     if (patchBuffer.didFailToAllocate())
671         return CompilationFailed;
672
673     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
674     for (unsigned i = 0; i < m_switches.size(); ++i) {
675         SwitchRecord record = m_switches[i];
676         unsigned bytecodeOffset = record.bytecodeOffset;
677
678         if (record.type != SwitchRecord::String) {
679             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
680             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
681
682             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
683
684             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
685                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
686                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
687             }
688         } else {
689             ASSERT(record.type == SwitchRecord::String);
690
691             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
692
693             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
694             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
695                 unsigned offset = it->value.branchOffset;
696                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
697             }
698         }
699     }
700
701     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
702         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
703         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
704     }
705
706     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
707         if (iter->to)
708             patchBuffer.link(iter->from, FunctionPtr(iter->to));
709     }
710
711     m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
712     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
713         m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
714
715     m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
716     for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
717         m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
718     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
719     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
720         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
721         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
722         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
723         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
724         
725         m_codeBlock->byValInfo(i) = ByValInfo(
726             m_byValCompilationInfo[i].bytecodeIndex,
727             badTypeJump,
728             m_byValCompilationInfo[i].arrayMode,
729             differenceBetweenCodePtr(badTypeJump, doneTarget),
730             differenceBetweenCodePtr(returnAddress, slowPathTarget));
731     }
732     m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
733     for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
734         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
735         info.callType = m_callStructureStubCompilationInfo[i].callType;
736         info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
737         info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
738         info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
739         info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
740         info.calleeGPR = regT0;
741     }
742
743 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
744     if (canBeOptimizedOrInlined()
745 #if ENABLE(LLINT)
746         || true
747 #endif
748         ) {
749         CompactJITCodeMap::Encoder jitCodeMapEncoder;
750         for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
751             if (m_labels[bytecodeOffset].isSet())
752                 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
753         }
754         m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
755     }
756 #endif
757
758     MacroAssemblerCodePtr withArityCheck;
759     if (m_codeBlock->codeType() == FunctionCode)
760         withArityCheck = patchBuffer.locationOf(arityCheck);
761
762     if (Options::showDisassembly())
763         m_disassembler->dump(patchBuffer);
764     if (m_compilation) {
765         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
766         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
767     }
768     
769     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
770     
771     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
772         static_cast<double>(result.size()) /
773         static_cast<double>(m_codeBlock->instructions().size()));
774     
775     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
776     m_codeBlock->setJITCode(
777         adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)),
778         withArityCheck);
779     
780 #if ENABLE(JIT_VERBOSE)
781     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
782 #endif
783     
784     return CompilationSuccessful;
785 }
786
787 void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
788 {
789     RepatchBuffer repatchBuffer(callerCodeBlock);
790
791     ASSERT(!callLinkInfo->isLinked());
792     callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
793     callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
794     repatchBuffer.relink(callLinkInfo->hotPathOther, code);
795
796     if (calleeCodeBlock)
797         calleeCodeBlock->linkIncomingCall(exec, callLinkInfo);
798
799     // Patch the slow patch so we do not continue to try to link.
800     if (kind == CodeForCall) {
801         ASSERT(callLinkInfo->callType == CallLinkInfo::Call
802                || callLinkInfo->callType == CallLinkInfo::CallVarargs);
803         if (callLinkInfo->callType == CallLinkInfo::Call) {
804             repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallGenerator).code());
805             return;
806         }
807
808         repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallGenerator).code());
809         return;
810     }
811
812     ASSERT(kind == CodeForConstruct);
813     repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructGenerator).code());
814 }
815
816 void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
817 {
818     RepatchBuffer repatchBuffer(callerCodeBlock);
819
820     repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallGenerator).code());
821 }
822
823 } // namespace JSC
824
825 #endif // ENABLE(JIT)