3de2e45f775ec3eef3dffac5420e6f4166d50ad8
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "CodeBlockWithJITType.h"
34 #include "DFGCapabilities.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
37 #include "JITOperations.h"
38 #include "JSArray.h"
39 #include "JSFunction.h"
40 #include "LinkBuffer.h"
41 #include "MaxFrameExtentForSlowPathCall.h"
42 #include "JSCInlines.h"
43 #include "PCToCodeOriginMap.h"
44 #include "ProfilerDatabase.h"
45 #include "ResultType.h"
46 #include "SlowPathCall.h"
47 #include "StackAlignment.h"
48 #include "TypeProfilerLog.h"
49 #include <wtf/CryptographicallyRandomNumber.h>
50
51 using namespace std;
52
53 namespace JSC {
54
55 double totalBaselineCompileTime;
56 double totalDFGCompileTime;
57 double totalFTLCompileTime;
58 double totalFTLDFGCompileTime;
59 double totalFTLB3CompileTime;
60
61 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
62 {
63     MacroAssembler::repatchCall(
64         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
65         newCalleeFunction);
66 }
67
68 JIT::JIT(VM* vm, CodeBlock* codeBlock)
69     : JSInterfaceJIT(vm, codeBlock)
70     , m_interpreter(vm->interpreter)
71     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
72     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
73     , m_getByIdIndex(UINT_MAX)
74     , m_putByIdIndex(UINT_MAX)
75     , m_byValInstructionIndex(UINT_MAX)
76     , m_callLinkInfoIndex(UINT_MAX)
77     , m_randomGenerator(cryptographicallyRandomNumber())
78     , m_pcToCodeOriginMapBuilder(*vm)
79     , m_canBeOptimized(false)
80     , m_shouldEmitProfiling(false)
81 {
82 }
83
84 JIT::~JIT()
85 {
86 }
87
88 #if ENABLE(DFG_JIT)
89 void JIT::emitEnterOptimizationCheck()
90 {
91     if (!canBeOptimized())
92         return;
93
94     JumpList skipOptimize;
95     
96     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
97     ASSERT(!m_bytecodeOffset);
98
99     copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
100
101     callOperation(operationOptimize, m_bytecodeOffset);
102     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
103     move(returnValueGPR2, stackPointerRegister);
104     jump(returnValueGPR);
105     skipOptimize.link(this);
106 }
107 #endif
108
109 void JIT::emitNotifyWrite(WatchpointSet* set)
110 {
111     if (!set || set->state() == IsInvalidated)
112         return;
113     
114     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
115 }
116
117 void JIT::emitNotifyWrite(GPRReg pointerToSet)
118 {
119     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
120 }
121
122 void JIT::assertStackPointerOffset()
123 {
124     if (ASSERT_DISABLED)
125         return;
126     
127     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
128     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
129     breakpoint();
130     ok.link(this);
131 }
132
133 #define NEXT_OPCODE(name) \
134     m_bytecodeOffset += OPCODE_LENGTH(name); \
135     break;
136
137 #define DEFINE_SLOW_OP(name) \
138     case op_##name: { \
139         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
140         slowPathCall.call(); \
141         NEXT_OPCODE(op_##name); \
142     }
143
144 #define DEFINE_OP(name) \
145     case name: { \
146         emit_##name(currentInstruction); \
147         NEXT_OPCODE(name); \
148     }
149
150 #define DEFINE_SLOWCASE_OP(name) \
151     case name: { \
152         emitSlow_##name(currentInstruction, iter); \
153         NEXT_OPCODE(name); \
154     }
155
156 void JIT::privateCompileMainPass()
157 {
158     jitAssertTagsInPlace();
159     jitAssertArgumentCountSane();
160     
161     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
162     unsigned instructionCount = m_codeBlock->instructions().size();
163
164     m_callLinkInfoIndex = 0;
165
166     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
167         if (m_disassembler)
168             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
169         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
170         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
171
172         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
173
174 #if ENABLE(OPCODE_SAMPLING)
175         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
176             sampleInstruction(currentInstruction);
177 #endif
178
179         m_labels[m_bytecodeOffset] = label();
180
181 #if ENABLE(JIT_VERBOSE)
182         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
183 #endif
184         
185         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
186
187         if (m_compilation) {
188             add64(
189                 TrustedImm32(1),
190                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
191                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
192         }
193         
194         if (Options::eagerlyUpdateTopCallFrame())
195             updateTopCallFrame();
196
197         unsigned bytecodeOffset = m_bytecodeOffset;
198
199         switch (opcodeID) {
200         DEFINE_SLOW_OP(in)
201         DEFINE_SLOW_OP(less)
202         DEFINE_SLOW_OP(lesseq)
203         DEFINE_SLOW_OP(greater)
204         DEFINE_SLOW_OP(greatereq)
205         DEFINE_SLOW_OP(is_function)
206         DEFINE_SLOW_OP(is_object_or_null)
207         DEFINE_SLOW_OP(typeof)
208
209         DEFINE_OP(op_add)
210         DEFINE_OP(op_bitand)
211         DEFINE_OP(op_bitor)
212         DEFINE_OP(op_bitxor)
213         DEFINE_OP(op_call)
214         DEFINE_OP(op_tail_call)
215         DEFINE_OP(op_call_eval)
216         DEFINE_OP(op_call_varargs)
217         DEFINE_OP(op_tail_call_varargs)
218         DEFINE_OP(op_tail_call_forward_arguments)
219         DEFINE_OP(op_construct_varargs)
220         DEFINE_OP(op_catch)
221         DEFINE_OP(op_construct)
222         DEFINE_OP(op_create_this)
223         DEFINE_OP(op_to_this)
224         DEFINE_OP(op_create_direct_arguments)
225         DEFINE_OP(op_create_scoped_arguments)
226         DEFINE_OP(op_create_cloned_arguments)
227         DEFINE_OP(op_argument_count)
228         DEFINE_OP(op_copy_rest)
229         DEFINE_OP(op_get_rest_length)
230         DEFINE_OP(op_check_tdz)
231         DEFINE_OP(op_assert)
232         DEFINE_OP(op_save)
233         DEFINE_OP(op_resume)
234         DEFINE_OP(op_debug)
235         DEFINE_OP(op_del_by_id)
236         DEFINE_OP(op_del_by_val)
237         DEFINE_OP(op_div)
238         DEFINE_OP(op_end)
239         DEFINE_OP(op_enter)
240         DEFINE_OP(op_get_scope)
241         DEFINE_OP(op_eq)
242         DEFINE_OP(op_eq_null)
243         DEFINE_OP(op_try_get_by_id)
244         case op_get_array_length:
245         case op_get_by_id_proto_load:
246         case op_get_by_id_unset:
247         DEFINE_OP(op_get_by_id)
248         DEFINE_OP(op_get_by_id_with_this)
249         DEFINE_OP(op_get_by_val)
250         DEFINE_OP(op_get_by_val_with_this)
251         DEFINE_OP(op_overrides_has_instance)
252         DEFINE_OP(op_instanceof)
253         DEFINE_OP(op_instanceof_custom)
254         DEFINE_OP(op_is_empty)
255         DEFINE_OP(op_is_undefined)
256         DEFINE_OP(op_is_boolean)
257         DEFINE_OP(op_is_number)
258         DEFINE_OP(op_is_string)
259         DEFINE_OP(op_is_object)
260         DEFINE_OP(op_jeq_null)
261         DEFINE_OP(op_jfalse)
262         DEFINE_OP(op_jmp)
263         DEFINE_OP(op_jneq_null)
264         DEFINE_OP(op_jneq_ptr)
265         DEFINE_OP(op_jless)
266         DEFINE_OP(op_jlesseq)
267         DEFINE_OP(op_jgreater)
268         DEFINE_OP(op_jgreatereq)
269         DEFINE_OP(op_jnless)
270         DEFINE_OP(op_jnlesseq)
271         DEFINE_OP(op_jngreater)
272         DEFINE_OP(op_jngreatereq)
273         DEFINE_OP(op_jtrue)
274         DEFINE_OP(op_loop_hint)
275         DEFINE_OP(op_watchdog)
276         DEFINE_OP(op_lshift)
277         DEFINE_OP(op_mod)
278         DEFINE_OP(op_mov)
279         DEFINE_OP(op_mul)
280         DEFINE_OP(op_negate)
281         DEFINE_OP(op_neq)
282         DEFINE_OP(op_neq_null)
283         DEFINE_OP(op_new_array)
284         DEFINE_OP(op_new_array_with_size)
285         DEFINE_OP(op_new_array_buffer)
286         DEFINE_OP(op_new_func)
287         DEFINE_OP(op_new_func_exp)
288         DEFINE_OP(op_new_generator_func)
289         DEFINE_OP(op_new_generator_func_exp)
290         DEFINE_OP(op_new_object)
291         DEFINE_OP(op_new_regexp)
292         DEFINE_OP(op_not)
293         DEFINE_OP(op_nstricteq)
294         DEFINE_OP(op_dec)
295         DEFINE_OP(op_inc)
296         DEFINE_OP(op_profile_type)
297         DEFINE_OP(op_profile_control_flow)
298         DEFINE_OP(op_push_with_scope)
299         DEFINE_OP(op_create_lexical_environment)
300         DEFINE_OP(op_get_parent_scope)
301         DEFINE_OP(op_put_by_id)
302         DEFINE_OP(op_put_by_id_with_this)
303         DEFINE_OP(op_put_by_index)
304         case op_put_by_val_direct:
305         DEFINE_OP(op_put_by_val)
306         DEFINE_OP(op_put_by_val_with_this)
307         DEFINE_OP(op_put_getter_by_id)
308         DEFINE_OP(op_put_setter_by_id)
309         DEFINE_OP(op_put_getter_setter_by_id)
310         DEFINE_OP(op_put_getter_by_val)
311         DEFINE_OP(op_put_setter_by_val)
312
313         DEFINE_OP(op_ret)
314         DEFINE_OP(op_rshift)
315         DEFINE_OP(op_unsigned)
316         DEFINE_OP(op_urshift)
317         DEFINE_OP(op_set_function_name)
318         DEFINE_OP(op_strcat)
319         DEFINE_OP(op_stricteq)
320         DEFINE_OP(op_sub)
321         DEFINE_OP(op_switch_char)
322         DEFINE_OP(op_switch_imm)
323         DEFINE_OP(op_switch_string)
324         DEFINE_OP(op_throw)
325         DEFINE_OP(op_throw_static_error)
326         DEFINE_OP(op_to_number)
327         DEFINE_OP(op_to_string)
328         DEFINE_OP(op_to_primitive)
329
330         DEFINE_OP(op_resolve_scope)
331         DEFINE_OP(op_get_from_scope)
332         DEFINE_OP(op_put_to_scope)
333         DEFINE_OP(op_get_from_arguments)
334         DEFINE_OP(op_put_to_arguments)
335
336         DEFINE_OP(op_get_enumerable_length)
337         DEFINE_OP(op_has_generic_property)
338         DEFINE_OP(op_has_structure_property)
339         DEFINE_OP(op_has_indexed_property)
340         DEFINE_OP(op_get_direct_pname)
341         DEFINE_OP(op_get_property_enumerator)
342         DEFINE_OP(op_enumerator_structure_pname)
343         DEFINE_OP(op_enumerator_generic_pname)
344         DEFINE_OP(op_to_index_string)
345             
346         DEFINE_OP(op_log_shadow_chicken_prologue)
347         DEFINE_OP(op_log_shadow_chicken_tail)
348         default:
349             RELEASE_ASSERT_NOT_REACHED();
350         }
351
352         if (false)
353             dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
354     }
355
356     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
357
358 #ifndef NDEBUG
359     // Reset this, in order to guard its use with ASSERTs.
360     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
361 #endif
362 }
363
364 void JIT::privateCompileLinkPass()
365 {
366     unsigned jmpTableCount = m_jmpTable.size();
367     for (unsigned i = 0; i < jmpTableCount; ++i)
368         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
369     m_jmpTable.clear();
370 }
371
372 void JIT::privateCompileSlowCases()
373 {
374     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
375
376     m_getByIdIndex = 0;
377     m_putByIdIndex = 0;
378     m_byValInstructionIndex = 0;
379     m_callLinkInfoIndex = 0;
380     
381     // Use this to assert that slow-path code associates new profiling sites with existing
382     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
383     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
384     // instructions and the slow-path executions. Furthermore, if the slow-path code created
385     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
386     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
387     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
388
389     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
390         m_bytecodeOffset = iter->to;
391
392         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
393
394         unsigned firstTo = m_bytecodeOffset;
395
396         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
397         
398         RareCaseProfile* rareCaseProfile = 0;
399         if (shouldEmitProfiling())
400             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
401
402 #if ENABLE(JIT_VERBOSE)
403         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
404 #endif
405         
406         if (m_disassembler)
407             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
408
409         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
410         DEFINE_SLOWCASE_OP(op_add)
411         DEFINE_SLOWCASE_OP(op_bitand)
412         DEFINE_SLOWCASE_OP(op_bitor)
413         DEFINE_SLOWCASE_OP(op_bitxor)
414         DEFINE_SLOWCASE_OP(op_call)
415         DEFINE_SLOWCASE_OP(op_tail_call)
416         DEFINE_SLOWCASE_OP(op_call_eval)
417         DEFINE_SLOWCASE_OP(op_call_varargs)
418         DEFINE_SLOWCASE_OP(op_tail_call_varargs)
419         DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
420         DEFINE_SLOWCASE_OP(op_construct_varargs)
421         DEFINE_SLOWCASE_OP(op_construct)
422         DEFINE_SLOWCASE_OP(op_to_this)
423         DEFINE_SLOWCASE_OP(op_check_tdz)
424         DEFINE_SLOWCASE_OP(op_create_this)
425         DEFINE_SLOWCASE_OP(op_div)
426         DEFINE_SLOWCASE_OP(op_eq)
427         DEFINE_SLOWCASE_OP(op_try_get_by_id)
428         case op_get_array_length:
429         case op_get_by_id_proto_load:
430         case op_get_by_id_unset:
431         DEFINE_SLOWCASE_OP(op_get_by_id)
432         DEFINE_SLOWCASE_OP(op_get_by_val)
433         DEFINE_SLOWCASE_OP(op_instanceof)
434         DEFINE_SLOWCASE_OP(op_instanceof_custom)
435         DEFINE_SLOWCASE_OP(op_jfalse)
436         DEFINE_SLOWCASE_OP(op_jless)
437         DEFINE_SLOWCASE_OP(op_jlesseq)
438         DEFINE_SLOWCASE_OP(op_jgreater)
439         DEFINE_SLOWCASE_OP(op_jgreatereq)
440         DEFINE_SLOWCASE_OP(op_jnless)
441         DEFINE_SLOWCASE_OP(op_jnlesseq)
442         DEFINE_SLOWCASE_OP(op_jngreater)
443         DEFINE_SLOWCASE_OP(op_jngreatereq)
444         DEFINE_SLOWCASE_OP(op_jtrue)
445         DEFINE_SLOWCASE_OP(op_loop_hint)
446         DEFINE_SLOWCASE_OP(op_watchdog)
447         DEFINE_SLOWCASE_OP(op_lshift)
448         DEFINE_SLOWCASE_OP(op_mod)
449         DEFINE_SLOWCASE_OP(op_mul)
450         DEFINE_SLOWCASE_OP(op_negate)
451         DEFINE_SLOWCASE_OP(op_neq)
452         DEFINE_SLOWCASE_OP(op_new_object)
453         DEFINE_SLOWCASE_OP(op_not)
454         DEFINE_SLOWCASE_OP(op_nstricteq)
455         DEFINE_SLOWCASE_OP(op_dec)
456         DEFINE_SLOWCASE_OP(op_inc)
457         DEFINE_SLOWCASE_OP(op_put_by_id)
458         case op_put_by_val_direct:
459         DEFINE_SLOWCASE_OP(op_put_by_val)
460         DEFINE_SLOWCASE_OP(op_rshift)
461         DEFINE_SLOWCASE_OP(op_unsigned)
462         DEFINE_SLOWCASE_OP(op_urshift)
463         DEFINE_SLOWCASE_OP(op_stricteq)
464         DEFINE_SLOWCASE_OP(op_sub)
465         DEFINE_SLOWCASE_OP(op_to_number)
466         DEFINE_SLOWCASE_OP(op_to_string)
467         DEFINE_SLOWCASE_OP(op_to_primitive)
468         DEFINE_SLOWCASE_OP(op_has_indexed_property)
469         DEFINE_SLOWCASE_OP(op_has_structure_property)
470         DEFINE_SLOWCASE_OP(op_get_direct_pname)
471
472         DEFINE_SLOWCASE_OP(op_resolve_scope)
473         DEFINE_SLOWCASE_OP(op_get_from_scope)
474         DEFINE_SLOWCASE_OP(op_put_to_scope)
475
476         default:
477             RELEASE_ASSERT_NOT_REACHED();
478         }
479
480         if (false)
481             dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
482
483         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
484         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
485         
486         if (shouldEmitProfiling())
487             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
488
489         emitJumpSlowToHot(jump(), 0);
490     }
491
492     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
493     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
494     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
495     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
496
497 #ifndef NDEBUG
498     // Reset this, in order to guard its use with ASSERTs.
499     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
500 #endif
501 }
502
503 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
504 {
505     double before = 0;
506     if (UNLIKELY(computeCompileTimes()))
507         before = monotonicallyIncreasingTimeMS();
508
509     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
510     switch (level) {
511     case DFG::CannotCompile:
512         m_canBeOptimized = false;
513         m_canBeOptimizedOrInlined = false;
514         m_shouldEmitProfiling = false;
515         break;
516     case DFG::CanCompile:
517     case DFG::CanCompileAndInline:
518         m_canBeOptimized = true;
519         m_canBeOptimizedOrInlined = true;
520         m_shouldEmitProfiling = true;
521         break;
522     default:
523         RELEASE_ASSERT_NOT_REACHED();
524         break;
525     }
526     
527     switch (m_codeBlock->codeType()) {
528     case GlobalCode:
529     case ModuleCode:
530     case EvalCode:
531         m_codeBlock->m_shouldAlwaysBeInlined = false;
532         break;
533     case FunctionCode:
534         // We could have already set it to false because we detected an uninlineable call.
535         // Don't override that observation.
536         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
537         break;
538     }
539
540     m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value.
541
542     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
543     if (m_vm->typeProfiler())
544         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
545     
546     if (Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))
547         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
548     if (m_vm->m_perBytecodeProfiler) {
549         m_compilation = adoptRef(
550             new Profiler::Compilation(
551                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
552                 Profiler::Baseline));
553         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
554     }
555     
556     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
557
558     if (m_disassembler)
559         m_disassembler->setStartOfCode(label());
560
561     // Just add a little bit of randomness to the codegen
562     if (m_randomGenerator.getUint32() & 1)
563         nop();
564
565     emitFunctionPrologue();
566     emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
567
568     Label beginLabel(this);
569
570     sampleCodeBlock(m_codeBlock);
571 #if ENABLE(OPCODE_SAMPLING)
572     sampleInstruction(m_codeBlock->instructions().begin());
573 #endif
574
575     if (m_codeBlock->codeType() == FunctionCode) {
576         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
577         if (shouldEmitProfiling()) {
578             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
579                 // If this is a constructor, then we want to put in a dummy profiling site (to
580                 // keep things consistent) but we don't actually want to record the dummy value.
581                 if (m_codeBlock->m_isConstructor && !argument)
582                     continue;
583                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
584 #if USE(JSVALUE64)
585                 load64(Address(callFrameRegister, offset), regT0);
586 #elif USE(JSVALUE32_64)
587                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
588                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
589 #endif
590                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
591             }
592         }
593     }
594
595     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
596     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
597
598     move(regT1, stackPointerRegister);
599     checkStackPointerAlignment();
600
601     emitSaveCalleeSaves();
602     emitMaterializeTagCheckRegisters();
603
604     privateCompileMainPass();
605     privateCompileLinkPass();
606     privateCompileSlowCases();
607     
608     if (m_disassembler)
609         m_disassembler->setEndOfSlowPath(label());
610     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
611
612     stackOverflow.link(this);
613     m_bytecodeOffset = 0;
614     if (maxFrameExtentForSlowPathCall)
615         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
616     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
617
618     Label arityCheck;
619     if (m_codeBlock->codeType() == FunctionCode) {
620         arityCheck = label();
621         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
622         emitFunctionPrologue();
623         emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
624
625         load32(payloadFor(JSStack::ArgumentCount), regT1);
626         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
627
628         m_bytecodeOffset = 0;
629
630         if (maxFrameExtentForSlowPathCall)
631             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
632         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
633         if (maxFrameExtentForSlowPathCall)
634             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
635         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
636         move(returnValueGPR, GPRInfo::argumentGPR0);
637         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
638
639 #if !ASSERT_DISABLED
640         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
641 #endif
642
643         jump(beginLabel);
644     }
645
646     ASSERT(m_jmpTable.isEmpty());
647     
648     privateCompileExceptionHandlers();
649     
650     if (m_disassembler)
651         m_disassembler->setEndOfCode(label());
652     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
653
654
655     LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
656     if (patchBuffer.didFailToAllocate())
657         return CompilationFailed;
658
659     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
660     for (unsigned i = 0; i < m_switches.size(); ++i) {
661         SwitchRecord record = m_switches[i];
662         unsigned bytecodeOffset = record.bytecodeOffset;
663
664         if (record.type != SwitchRecord::String) {
665             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
666             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
667
668             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
669
670             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
671                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
672                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
673             }
674         } else {
675             ASSERT(record.type == SwitchRecord::String);
676
677             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
678
679             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
680             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
681                 unsigned offset = it->value.branchOffset;
682                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
683             }
684         }
685     }
686
687     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
688         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
689         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
690     }
691
692     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
693         if (iter->to)
694             patchBuffer.link(iter->from, FunctionPtr(iter->to));
695     }
696
697     for (unsigned i = m_getByIds.size(); i--;)
698         m_getByIds[i].finalize(patchBuffer);
699     for (unsigned i = m_putByIds.size(); i--;)
700         m_putByIds[i].finalize(patchBuffer);
701
702     for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
703         PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
704         CodeLocationJump notIndexJump = CodeLocationJump();
705         if (Jump(patchableNotIndexJump).isSet())
706             notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
707         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
708         CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
709         CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
710         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
711         CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
712
713         *byValCompilationInfo.byValInfo = ByValInfo(
714             byValCompilationInfo.bytecodeIndex,
715             notIndexJump,
716             badTypeJump,
717             byValCompilationInfo.arrayMode,
718             byValCompilationInfo.arrayProfile,
719             differenceBetweenCodePtr(badTypeJump, doneTarget),
720             differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
721             differenceBetweenCodePtr(returnAddress, slowPathTarget));
722     }
723     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
724         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
725         CallLinkInfo& info = *compilationInfo.callLinkInfo;
726         info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation),
727             patchBuffer.locationOf(compilationInfo.hotPathBegin),
728             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
729     }
730
731     CompactJITCodeMap::Encoder jitCodeMapEncoder;
732     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
733         if (m_labels[bytecodeOffset].isSet())
734             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
735     }
736     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
737
738     MacroAssemblerCodePtr withArityCheck;
739     if (m_codeBlock->codeType() == FunctionCode)
740         withArityCheck = patchBuffer.locationOf(arityCheck);
741
742     if (Options::dumpDisassembly()) {
743         m_disassembler->dump(patchBuffer);
744         patchBuffer.didAlreadyDisassemble();
745     }
746     if (m_compilation) {
747         if (Options::disassembleBaselineForProfiler())
748             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
749         m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, m_compilation);
750     }
751
752     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
753         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
754     
755     CodeRef result = FINALIZE_CODE(
756         patchBuffer,
757         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
758     
759     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
760         static_cast<double>(result.size()) /
761         static_cast<double>(m_codeBlock->instructions().size()));
762
763     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
764     m_codeBlock->setJITCode(
765         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
766
767     double after;
768     if (UNLIKELY(computeCompileTimes())) {
769         after = monotonicallyIncreasingTimeMS();
770
771         if (Options::reportTotalCompileTimes())
772             totalBaselineCompileTime += after - before;
773     }
774     if (UNLIKELY(reportCompileTimes())) {
775         CString codeBlockName = toCString(*m_codeBlock);
776         
777         dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", patchBuffer.size(), " bytes in ", after - before, " ms.\n");
778     }
779
780 #if ENABLE(JIT_VERBOSE)
781     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
782 #endif
783     
784     return CompilationSuccessful;
785 }
786
787 void JIT::privateCompileExceptionHandlers()
788 {
789     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
790         m_exceptionChecksWithCallFrameRollback.link(this);
791
792         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
793
794         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
795
796         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
797         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
798
799 #if CPU(X86)
800         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
801         poke(GPRInfo::argumentGPR0);
802         poke(GPRInfo::argumentGPR1, 1);
803 #endif
804         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
805         jumpToExceptionHandler();
806     }
807
808     if (!m_exceptionChecks.empty()) {
809         m_exceptionChecks.link(this);
810
811         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
812
813         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
814         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
815         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
816
817 #if CPU(X86)
818         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
819         poke(GPRInfo::argumentGPR0);
820         poke(GPRInfo::argumentGPR1, 1);
821 #endif
822         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
823         jumpToExceptionHandler();
824     }
825 }
826
827 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
828 {
829     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
830
831     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
832 }
833
834 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
835 {
836     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
837 }
838
839 bool JIT::reportCompileTimes()
840 {
841     return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
842 }
843
844 bool JIT::computeCompileTimes()
845 {
846     return reportCompileTimes() || Options::reportTotalCompileTimes();
847 }
848
849 HashMap<CString, double> JIT::compileTimeStats()
850 {
851     HashMap<CString, double> result;
852     if (Options::reportTotalCompileTimes()) {
853         result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
854         result.add("Baseline Compile Time", totalBaselineCompileTime);
855 #if ENABLE(DFG_JIT)
856         result.add("DFG Compile Time", totalDFGCompileTime);
857 #if ENABLE(FTL_JIT)
858         result.add("FTL Compile Time", totalFTLCompileTime);
859         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
860         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
861 #endif // ENABLE(FTL_JIT)
862 #endif // ENABLE(DFG_JIT)
863     }
864     return result;
865 }
866
867 } // namespace JSC
868
869 #endif // ENABLE(JIT)