Unreviewed, another ARM64 build fix.
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
35 #endif
36
37 #include "CodeBlock.h"
38 #include "DFGCapabilities.h"
39 #include "Interpreter.h"
40 #include "JITInlines.h"
41 #include "JITOperations.h"
42 #include "JSArray.h"
43 #include "JSFunction.h"
44 #include "LinkBuffer.h"
45 #include "Operations.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
49 #include "SlowPathCall.h"
50 #include <wtf/CryptographicallyRandomNumber.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
57 {
58     RepatchBuffer repatchBuffer(codeblock);
59     repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 }
61
62 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
63 {
64     RepatchBuffer repatchBuffer(codeblock);
65     repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 }
67
68 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
69 {
70     RepatchBuffer repatchBuffer(codeblock);
71     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 }
73
74 JIT::JIT(VM* vm, CodeBlock* codeBlock)
75     : JSInterfaceJIT(vm, codeBlock)
76     , m_interpreter(vm->interpreter)
77     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
78     , m_bytecodeOffset((unsigned)-1)
79     , m_propertyAccessInstructionIndex(UINT_MAX)
80     , m_byValInstructionIndex(UINT_MAX)
81     , m_callLinkInfoIndex(UINT_MAX)
82 #if USE(JSVALUE32_64)
83     , m_jumpTargetIndex(0)
84     , m_mappedBytecodeOffset((unsigned)-1)
85     , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
86     , m_mappedTag((RegisterID)-1)
87     , m_mappedPayload((RegisterID)-1)
88 #else
89     , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
90     , m_jumpTargetsPosition(0)
91 #endif
92     , m_randomGenerator(cryptographicallyRandomNumber())
93 #if ENABLE(VALUE_PROFILER)
94     , m_canBeOptimized(false)
95     , m_shouldEmitProfiling(false)
96 #endif
97 {
98 }
99
100 #if ENABLE(DFG_JIT)
101 void JIT::emitEnterOptimizationCheck()
102 {
103     if (!canBeOptimized())
104         return;
105
106     JumpList skipOptimize;
107     
108     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
109     ASSERT(!m_bytecodeOffset);
110     callOperation(operationOptimize, m_bytecodeOffset);
111     skipOptimize.append(branchTestPtr(Zero, returnValueRegister));
112     jump(returnValueRegister);
113     skipOptimize.link(this);
114 }
115 #endif
116
117 #define NEXT_OPCODE(name) \
118     m_bytecodeOffset += OPCODE_LENGTH(name); \
119     break;
120
121 #if USE(JSVALUE32_64)
122 #define DEFINE_BINARY_OP(name) \
123     case op_##name: { \
124         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
125         slowPathCall.call(); \
126         NEXT_OPCODE(op_##name); \
127     }
128
129 #define DEFINE_UNARY_OP(name) \
130     case op_##name: { \
131         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
132         slowPathCall.call(); \
133         NEXT_OPCODE(op_##name); \
134     }
135
136 #else // USE(JSVALUE32_64)
137
138 #define DEFINE_BINARY_OP(name) \
139     case op_##name: { \
140         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
141         slowPathCall.call(); \
142         NEXT_OPCODE(op_##name); \
143     }
144
145 #define DEFINE_UNARY_OP(name) \
146     case op_##name: { \
147         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
148         slowPathCall.call(); \
149         NEXT_OPCODE(op_##name); \
150     }
151 #endif // USE(JSVALUE32_64)
152
153 #define DEFINE_OP(name) \
154     case name: { \
155         emit_##name(currentInstruction); \
156         NEXT_OPCODE(name); \
157     }
158
159 #define DEFINE_SLOWCASE_OP(name) \
160     case name: { \
161         emitSlow_##name(currentInstruction, iter); \
162         NEXT_OPCODE(name); \
163     }
164
165 void JIT::privateCompileMainPass()
166 {
167     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
168     unsigned instructionCount = m_codeBlock->instructions().size();
169
170     m_callLinkInfoIndex = 0;
171
172     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
173         if (m_disassembler)
174             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
175         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
176         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
177
178 #if ENABLE(OPCODE_SAMPLING)
179         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
180             sampleInstruction(currentInstruction);
181 #endif
182
183 #if USE(JSVALUE64)
184         if (atJumpTarget())
185             killLastResultRegister();
186 #endif
187
188         m_labels[m_bytecodeOffset] = label();
189
190 #if ENABLE(JIT_VERBOSE)
191         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
192 #endif
193         
194         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
195
196         if (m_compilation) {
197             add64(
198                 TrustedImm32(1),
199                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
200                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
201         }
202
203         switch (opcodeID) {
204         DEFINE_BINARY_OP(del_by_val)
205         DEFINE_BINARY_OP(in)
206         DEFINE_BINARY_OP(less)
207         DEFINE_BINARY_OP(lesseq)
208         DEFINE_BINARY_OP(greater)
209         DEFINE_BINARY_OP(greatereq)
210         DEFINE_UNARY_OP(is_function)
211         DEFINE_UNARY_OP(is_object)
212         DEFINE_UNARY_OP(typeof)
213
214         DEFINE_OP(op_add)
215         DEFINE_OP(op_bitand)
216         DEFINE_OP(op_bitor)
217         DEFINE_OP(op_bitxor)
218         DEFINE_OP(op_call)
219         DEFINE_OP(op_call_eval)
220         DEFINE_OP(op_call_varargs)
221         DEFINE_OP(op_catch)
222         DEFINE_OP(op_construct)
223         DEFINE_OP(op_get_callee)
224         DEFINE_OP(op_create_this)
225         DEFINE_OP(op_to_this)
226         DEFINE_OP(op_init_lazy_reg)
227         DEFINE_OP(op_create_arguments)
228         DEFINE_OP(op_debug)
229         DEFINE_OP(op_del_by_id)
230         DEFINE_OP(op_div)
231         DEFINE_OP(op_end)
232         DEFINE_OP(op_enter)
233         DEFINE_OP(op_create_activation)
234         DEFINE_OP(op_eq)
235         DEFINE_OP(op_eq_null)
236         case op_get_by_id_out_of_line:
237         case op_get_array_length:
238         DEFINE_OP(op_get_by_id)
239         DEFINE_OP(op_get_arguments_length)
240         DEFINE_OP(op_get_by_val)
241         DEFINE_OP(op_get_argument_by_val)
242         DEFINE_OP(op_get_by_pname)
243         DEFINE_OP(op_get_pnames)
244         DEFINE_OP(op_check_has_instance)
245         DEFINE_OP(op_instanceof)
246         DEFINE_OP(op_is_undefined)
247         DEFINE_OP(op_is_boolean)
248         DEFINE_OP(op_is_number)
249         DEFINE_OP(op_is_string)
250         DEFINE_OP(op_jeq_null)
251         DEFINE_OP(op_jfalse)
252         DEFINE_OP(op_jmp)
253         DEFINE_OP(op_jneq_null)
254         DEFINE_OP(op_jneq_ptr)
255         DEFINE_OP(op_jless)
256         DEFINE_OP(op_jlesseq)
257         DEFINE_OP(op_jgreater)
258         DEFINE_OP(op_jgreatereq)
259         DEFINE_OP(op_jnless)
260         DEFINE_OP(op_jnlesseq)
261         DEFINE_OP(op_jngreater)
262         DEFINE_OP(op_jngreatereq)
263         DEFINE_OP(op_jtrue)
264         DEFINE_OP(op_loop_hint)
265         DEFINE_OP(op_lshift)
266         DEFINE_OP(op_mod)
267         DEFINE_OP(op_mov)
268         DEFINE_OP(op_mul)
269         DEFINE_OP(op_negate)
270         DEFINE_OP(op_neq)
271         DEFINE_OP(op_neq_null)
272         DEFINE_OP(op_new_array)
273         DEFINE_OP(op_new_array_with_size)
274         DEFINE_OP(op_new_array_buffer)
275         DEFINE_OP(op_new_func)
276         DEFINE_OP(op_new_func_exp)
277         DEFINE_OP(op_new_object)
278         DEFINE_OP(op_new_regexp)
279         DEFINE_OP(op_next_pname)
280         DEFINE_OP(op_not)
281         DEFINE_OP(op_nstricteq)
282         DEFINE_OP(op_pop_scope)
283         DEFINE_OP(op_dec)
284         DEFINE_OP(op_inc)
285         DEFINE_OP(op_profile_did_call)
286         DEFINE_OP(op_profile_will_call)
287         DEFINE_OP(op_push_name_scope)
288         DEFINE_OP(op_push_with_scope)
289         case op_put_by_id_out_of_line:
290         case op_put_by_id_transition_direct:
291         case op_put_by_id_transition_normal:
292         case op_put_by_id_transition_direct_out_of_line:
293         case op_put_by_id_transition_normal_out_of_line:
294         DEFINE_OP(op_put_by_id)
295         DEFINE_OP(op_put_by_index)
296         DEFINE_OP(op_put_by_val)
297         DEFINE_OP(op_put_getter_setter)
298         case op_init_global_const_nop:
299             NEXT_OPCODE(op_init_global_const_nop);
300         DEFINE_OP(op_init_global_const)
301
302         DEFINE_OP(op_ret)
303         DEFINE_OP(op_ret_object_or_this)
304         DEFINE_OP(op_rshift)
305         DEFINE_OP(op_urshift)
306         DEFINE_OP(op_strcat)
307         DEFINE_OP(op_stricteq)
308         DEFINE_OP(op_sub)
309         DEFINE_OP(op_switch_char)
310         DEFINE_OP(op_switch_imm)
311         DEFINE_OP(op_switch_string)
312         DEFINE_OP(op_tear_off_activation)
313         DEFINE_OP(op_tear_off_arguments)
314         DEFINE_OP(op_throw)
315         DEFINE_OP(op_throw_static_error)
316         DEFINE_OP(op_to_number)
317         DEFINE_OP(op_to_primitive)
318
319         DEFINE_OP(op_resolve_scope)
320         DEFINE_OP(op_get_from_scope)
321         DEFINE_OP(op_put_to_scope)
322
323         case op_get_by_id_chain:
324         case op_get_by_id_generic:
325         case op_get_by_id_proto:
326         case op_get_by_id_self:
327         case op_get_by_id_getter_chain:
328         case op_get_by_id_getter_proto:
329         case op_get_by_id_getter_self:
330         case op_get_by_id_custom_chain:
331         case op_get_by_id_custom_proto:
332         case op_get_by_id_custom_self:
333         case op_get_string_length:
334         case op_put_by_id_generic:
335         case op_put_by_id_replace:
336         case op_put_by_id_transition:
337             RELEASE_ASSERT_NOT_REACHED();
338         }
339     }
340
341     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
342
343 #ifndef NDEBUG
344     // Reset this, in order to guard its use with ASSERTs.
345     m_bytecodeOffset = (unsigned)-1;
346 #endif
347 }
348
349 void JIT::privateCompileLinkPass()
350 {
351     unsigned jmpTableCount = m_jmpTable.size();
352     for (unsigned i = 0; i < jmpTableCount; ++i)
353         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
354     m_jmpTable.clear();
355 }
356
357 void JIT::privateCompileSlowCases()
358 {
359     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
360
361     m_propertyAccessInstructionIndex = 0;
362     m_byValInstructionIndex = 0;
363     m_callLinkInfoIndex = 0;
364     
365 #if ENABLE(VALUE_PROFILER)
366     // Use this to assert that slow-path code associates new profiling sites with existing
367     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
368     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
369     // instructions and the slow-path executions. Furthermore, if the slow-path code created
370     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
371     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
372     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
373 #endif
374
375     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
376 #if USE(JSVALUE64)
377         killLastResultRegister();
378 #endif
379
380         m_bytecodeOffset = iter->to;
381
382         unsigned firstTo = m_bytecodeOffset;
383
384         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
385         
386 #if ENABLE(VALUE_PROFILER)
387         RareCaseProfile* rareCaseProfile = 0;
388         if (shouldEmitProfiling())
389             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
390 #endif
391
392 #if ENABLE(JIT_VERBOSE)
393         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
394 #endif
395         
396         if (m_disassembler)
397             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
398
399         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
400         DEFINE_SLOWCASE_OP(op_add)
401         DEFINE_SLOWCASE_OP(op_bitand)
402         DEFINE_SLOWCASE_OP(op_bitor)
403         DEFINE_SLOWCASE_OP(op_bitxor)
404         DEFINE_SLOWCASE_OP(op_call)
405         DEFINE_SLOWCASE_OP(op_call_eval)
406         DEFINE_SLOWCASE_OP(op_call_varargs)
407         DEFINE_SLOWCASE_OP(op_construct)
408         DEFINE_SLOWCASE_OP(op_to_this)
409         DEFINE_SLOWCASE_OP(op_create_this)
410         DEFINE_SLOWCASE_OP(op_div)
411         DEFINE_SLOWCASE_OP(op_eq)
412         DEFINE_SLOWCASE_OP(op_get_callee)
413         case op_get_by_id_out_of_line:
414         case op_get_array_length:
415         DEFINE_SLOWCASE_OP(op_get_by_id)
416         DEFINE_SLOWCASE_OP(op_get_arguments_length)
417         DEFINE_SLOWCASE_OP(op_get_by_val)
418         DEFINE_SLOWCASE_OP(op_get_argument_by_val)
419         DEFINE_SLOWCASE_OP(op_get_by_pname)
420         DEFINE_SLOWCASE_OP(op_check_has_instance)
421         DEFINE_SLOWCASE_OP(op_instanceof)
422         DEFINE_SLOWCASE_OP(op_jfalse)
423         DEFINE_SLOWCASE_OP(op_jless)
424         DEFINE_SLOWCASE_OP(op_jlesseq)
425         DEFINE_SLOWCASE_OP(op_jgreater)
426         DEFINE_SLOWCASE_OP(op_jgreatereq)
427         DEFINE_SLOWCASE_OP(op_jnless)
428         DEFINE_SLOWCASE_OP(op_jnlesseq)
429         DEFINE_SLOWCASE_OP(op_jngreater)
430         DEFINE_SLOWCASE_OP(op_jngreatereq)
431         DEFINE_SLOWCASE_OP(op_jtrue)
432         DEFINE_SLOWCASE_OP(op_loop_hint)
433         DEFINE_SLOWCASE_OP(op_lshift)
434         DEFINE_SLOWCASE_OP(op_mod)
435         DEFINE_SLOWCASE_OP(op_mul)
436         DEFINE_SLOWCASE_OP(op_negate)
437         DEFINE_SLOWCASE_OP(op_neq)
438         DEFINE_SLOWCASE_OP(op_new_object)
439         DEFINE_SLOWCASE_OP(op_not)
440         DEFINE_SLOWCASE_OP(op_nstricteq)
441         DEFINE_SLOWCASE_OP(op_dec)
442         DEFINE_SLOWCASE_OP(op_inc)
443         case op_put_by_id_out_of_line:
444         case op_put_by_id_transition_direct:
445         case op_put_by_id_transition_normal:
446         case op_put_by_id_transition_direct_out_of_line:
447         case op_put_by_id_transition_normal_out_of_line:
448         DEFINE_SLOWCASE_OP(op_put_by_id)
449         DEFINE_SLOWCASE_OP(op_put_by_val)
450         DEFINE_SLOWCASE_OP(op_rshift)
451         DEFINE_SLOWCASE_OP(op_urshift)
452         DEFINE_SLOWCASE_OP(op_stricteq)
453         DEFINE_SLOWCASE_OP(op_sub)
454         DEFINE_SLOWCASE_OP(op_to_number)
455         DEFINE_SLOWCASE_OP(op_to_primitive)
456
457         DEFINE_SLOWCASE_OP(op_resolve_scope)
458         DEFINE_SLOWCASE_OP(op_get_from_scope)
459         DEFINE_SLOWCASE_OP(op_put_to_scope)
460
461         default:
462             RELEASE_ASSERT_NOT_REACHED();
463         }
464
465         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
466         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
467         
468 #if ENABLE(VALUE_PROFILER)
469         if (shouldEmitProfiling())
470             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
471 #endif
472
473         emitJumpSlowToHot(jump(), 0);
474     }
475
476     RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
477     RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
478 #if ENABLE(VALUE_PROFILER)
479     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
480 #endif
481
482 #ifndef NDEBUG
483     // Reset this, in order to guard its use with ASSERTs.
484     m_bytecodeOffset = (unsigned)-1;
485 #endif
486 }
487
488 ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
489 {
490     ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
491     info.codeOrigin = CodeOrigin(bytecodeIndex);
492     info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
493
494     info.patch.deltaCheckImmToCall = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(structureToCompare), info.callReturnLocation);
495     info.patch.deltaCallToStructCheck = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(structureCheck));
496     
497     info.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(coldPathBegin));
498     info.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(done));
499     info.patch.deltaCallToStorageLoad = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(propertyStorageLoad));
500     
501     info.patch.baseGPR = GPRInfo::regT0;
502
503     RegisterSet usedRegisters;
504     usedRegisters.set(GPRInfo::regT0);
505
506 #if USE(JSVALUE64) // JSVALUE cases
507     switch (m_type) {
508     case GetById:
509         info.patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(getDisplacementLabel));
510         info.patch.valueGPR = GPRInfo::regT0;
511         break;
512     case PutById:
513         info.patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(putDisplacementLabel));
514         info.patch.valueGPR = GPRInfo::regT1;
515         usedRegisters.set(GPRInfo::regT1);
516         break;
517     }
518 #else // JSVALUE cases
519     switch (m_type) {
520     case GetById:
521         info.patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(getDisplacementLabel2));
522         info.patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(getDisplacementLabel1));
523         info.patch.valueGPR = GPRInfo::regT0;
524         info.patch.valueTagGPR = GPRInfo::regT1;
525         usedRegisters.set(GPRInfo::regT1);
526         break;
527     case PutById:
528         info.patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(putDisplacementLabel2));
529         info.patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(info.callReturnLocation, linkBuffer.locationOf(putDisplacementLabel1));
530         info.patch.valueGPR = GPRInfo::regT2;
531         info.patch.valueTagGPR = GPRInfo::regT3;
532         usedRegisters.set(GPRInfo::regT2);
533         usedRegisters.set(GPRInfo::regT3);
534         break;
535     }
536 #endif // JSVALUE cases
537     
538     info.patch.usedRegisters = usedRegisters;
539     info.patch.registersFlushed = true;
540 }
541
542 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
543 {
544 #if ENABLE(VALUE_PROFILER)
545     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
546     switch (level) {
547     case DFG::CannotCompile:
548         m_canBeOptimized = false;
549         m_canBeOptimizedOrInlined = false;
550         m_shouldEmitProfiling = false;
551         break;
552     case DFG::CanInline:
553         m_canBeOptimized = false;
554         m_canBeOptimizedOrInlined = true;
555         m_shouldEmitProfiling = true;
556         break;
557     case DFG::CanCompile:
558     case DFG::CanCompileAndInline:
559         m_canBeOptimized = true;
560         m_canBeOptimizedOrInlined = true;
561         m_shouldEmitProfiling = true;
562         break;
563     default:
564         RELEASE_ASSERT_NOT_REACHED();
565         break;
566     }
567     
568     switch (m_codeBlock->codeType()) {
569     case GlobalCode:
570     case EvalCode:
571         m_codeBlock->m_shouldAlwaysBeInlined = false;
572         break;
573     case FunctionCode:
574         // We could have already set it to false because we detected an uninlineable call.
575         // Don't override that observation.
576         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
577         break;
578     }
579 #endif
580     
581     if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
582         m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
583     if (m_vm->m_perBytecodeProfiler) {
584         m_compilation = adoptRef(
585             new Profiler::Compilation(
586                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
587                 Profiler::Baseline));
588         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
589     }
590     
591     if (m_disassembler)
592         m_disassembler->setStartOfCode(label());
593
594     // Just add a little bit of randomness to the codegen
595     if (m_randomGenerator.getUint32() & 1)
596         nop();
597
598     preserveReturnAddressAfterCall(regT2);
599     emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
600     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
601
602     Label beginLabel(this);
603
604     sampleCodeBlock(m_codeBlock);
605 #if ENABLE(OPCODE_SAMPLING)
606     sampleInstruction(m_codeBlock->instructions().begin());
607 #endif
608
609     Jump stackCheck;
610     if (m_codeBlock->codeType() == FunctionCode) {
611 #if ENABLE(DFG_JIT)
612 #if DFG_ENABLE(SUCCESS_STATS)
613         static SamplingCounter counter("orignalJIT");
614         emitCount(counter);
615 #endif
616 #endif
617
618 #if ENABLE(VALUE_PROFILER)
619         ASSERT(m_bytecodeOffset == (unsigned)-1);
620         if (shouldEmitProfiling()) {
621             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
622                 // If this is a constructor, then we want to put in a dummy profiling site (to
623                 // keep things consistent) but we don't actually want to record the dummy value.
624                 if (m_codeBlock->m_isConstructor && !argument)
625                     continue;
626                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
627 #if USE(JSVALUE64)
628                 load64(Address(callFrameRegister, offset), regT0);
629 #elif USE(JSVALUE32_64)
630                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
631                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
632 #endif
633                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), regT4);
634             }
635         }
636 #endif
637
638         addPtr(TrustedImm32(-m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
639         stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1);
640     }
641
642     Label functionBody = label();
643     
644     privateCompileMainPass();
645     privateCompileLinkPass();
646     privateCompileSlowCases();
647     
648     if (m_disassembler)
649         m_disassembler->setEndOfSlowPath(label());
650
651     Label arityCheck;
652     if (m_codeBlock->codeType() == FunctionCode) {
653         stackCheck.link(this);
654         m_bytecodeOffset = 0;
655         callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
656 #ifndef NDEBUG
657         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
658 #endif
659         jump(functionBody);
660
661         arityCheck = label();
662         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
663         preserveReturnAddressAfterCall(regT2);
664         emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
665         emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
666
667         load32(payloadFor(JSStack::ArgumentCount), regT1);
668         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
669
670         m_bytecodeOffset = 0;
671
672         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
673         if (returnValueRegister != regT0)
674             move(returnValueRegister, regT0);
675         branchTest32(Zero, regT0).linkTo(beginLabel, this);
676         emitNakedCall(m_vm->getCTIStub(arityFixup).code());
677
678 #if !ASSERT_DISABLED
679         m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
680 #endif
681
682         jump(beginLabel);
683     }
684
685     ASSERT(m_jmpTable.isEmpty());
686     
687     privateCompileExceptionHandlers();
688     
689     if (m_disassembler)
690         m_disassembler->setEndOfCode(label());
691
692     LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
693     if (patchBuffer.didFailToAllocate())
694         return CompilationFailed;
695
696     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
697     for (unsigned i = 0; i < m_switches.size(); ++i) {
698         SwitchRecord record = m_switches[i];
699         unsigned bytecodeOffset = record.bytecodeOffset;
700
701         if (record.type != SwitchRecord::String) {
702             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
703             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
704
705             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
706
707             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
708                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
709                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
710             }
711         } else {
712             ASSERT(record.type == SwitchRecord::String);
713
714             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
715
716             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
717             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
718                 unsigned offset = it->value.branchOffset;
719                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
720             }
721         }
722     }
723
724     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
725         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
726         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
727     }
728
729     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
730         if (iter->to)
731             patchBuffer.link(iter->from, FunctionPtr(iter->to));
732     }
733
734     m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
735     for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
736         m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
737     m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
738     for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
739         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
740         CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
741         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
742         CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
743         
744         m_codeBlock->byValInfo(i) = ByValInfo(
745             m_byValCompilationInfo[i].bytecodeIndex,
746             badTypeJump,
747             m_byValCompilationInfo[i].arrayMode,
748             differenceBetweenCodePtr(badTypeJump, doneTarget),
749             differenceBetweenCodePtr(returnAddress, slowPathTarget));
750     }
751     m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
752     for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
753         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
754         info.callType = m_callStructureStubCompilationInfo[i].callType;
755         info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
756         info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
757         info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
758         info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
759         info.calleeGPR = regT0;
760     }
761
762 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
763     if (canBeOptimizedOrInlined()
764 #if ENABLE(LLINT)
765         || true
766 #endif
767         ) {
768         CompactJITCodeMap::Encoder jitCodeMapEncoder;
769         for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
770             if (m_labels[bytecodeOffset].isSet())
771                 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
772         }
773         m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
774     }
775 #endif
776
777     MacroAssemblerCodePtr withArityCheck;
778     if (m_codeBlock->codeType() == FunctionCode)
779         withArityCheck = patchBuffer.locationOf(arityCheck);
780
781     if (Options::showDisassembly())
782         m_disassembler->dump(patchBuffer);
783     if (m_compilation) {
784         m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
785         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
786     }
787     
788     CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
789     
790     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
791         static_cast<double>(result.size()) /
792         static_cast<double>(m_codeBlock->instructions().size()));
793     
794     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
795     m_codeBlock->setJITCode(
796         adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)),
797         withArityCheck);
798     
799 #if ENABLE(JIT_VERBOSE)
800     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
801 #endif
802     
803     return CompilationSuccessful;
804 }
805
806 void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
807 {
808     RepatchBuffer repatchBuffer(callerCodeBlock);
809
810     ASSERT(!callLinkInfo->isLinked());
811     callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
812     callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
813     repatchBuffer.relink(callLinkInfo->hotPathOther, code);
814
815     if (calleeCodeBlock)
816         calleeCodeBlock->linkIncomingCall(exec, callLinkInfo);
817
818     // Patch the slow patch so we do not continue to try to link.
819     if (kind == CodeForCall) {
820         ASSERT(callLinkInfo->callType == CallLinkInfo::Call
821                || callLinkInfo->callType == CallLinkInfo::CallVarargs);
822         if (callLinkInfo->callType == CallLinkInfo::Call) {
823             repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
824             return;
825         }
826
827         repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
828         return;
829     }
830
831     ASSERT(kind == CodeForConstruct);
832     repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
833 }
834
835 void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
836 {
837     RepatchBuffer repatchBuffer(callerCodeBlock);
838
839     repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code());
840 }
841
842 void JIT::privateCompileExceptionHandlers()
843 {
844     if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
845         return;
846
847     Jump doLookup;
848
849     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
850         // Remove hostCallFlag from caller
851         m_exceptionChecksWithCallFrameRollback.link(this);
852         emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::argumentGPR0);
853         andPtr(TrustedImm32(safeCast<int32_t>(~CallFrame::hostCallFrameFlag())), GPRInfo::argumentGPR0);
854         doLookup = jump();
855     }
856
857     if (!m_exceptionChecks.empty())
858         m_exceptionChecks.link(this);
859     
860     // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
861     move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
862
863     if (doLookup.isSet())
864         doLookup.link(this);
865
866 #if CPU(X86)
867     // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
868     poke(GPRInfo::argumentGPR0);
869 #endif
870     m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
871     // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
872     // and the address of the handler in returnValueGPR2.
873     jump(GPRInfo::returnValueGPR2);
874 }
875
876
877 } // namespace JSC
878
879 #endif // ENABLE(JIT)