143927a9814a049e041cab026dedbd15ebbeb7d1
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "CodeBlockWithJITType.h"
34 #include "DFGCapabilities.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
37 #include "JITOperations.h"
38 #include "JSArray.h"
39 #include "JSCInlines.h"
40 #include "JSFunction.h"
41 #include "LinkBuffer.h"
42 #include "MaxFrameExtentForSlowPathCall.h"
43 #include "PCToCodeOriginMap.h"
44 #include "ProfilerDatabase.h"
45 #include "ResultType.h"
46 #include "SlowPathCall.h"
47 #include "StackAlignment.h"
48 #include "TypeProfilerLog.h"
49 #include <wtf/CryptographicallyRandomNumber.h>
50 #include <wtf/SimpleStats.h>
51
52 using namespace std;
53
54 namespace JSC {
55
56 double totalBaselineCompileTime;
57 double totalDFGCompileTime;
58 double totalFTLCompileTime;
59 double totalFTLDFGCompileTime;
60 double totalFTLB3CompileTime;
61
62 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
63 {
64     MacroAssembler::repatchCall(
65         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
66         newCalleeFunction);
67 }
68
69 JIT::CodeRef JIT::compileCTINativeCall(VM* vm, NativeFunction func)
70 {
71     if (!vm->canUseJIT())
72         return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
73     JIT jit(vm, 0);
74     return jit.privateCompileCTINativeCall(vm, func);
75 }
76
77 JIT::JIT(VM* vm, CodeBlock* codeBlock)
78     : JSInterfaceJIT(vm, codeBlock)
79     , m_interpreter(vm->interpreter)
80     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
81     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
82     , m_getByIdIndex(UINT_MAX)
83     , m_putByIdIndex(UINT_MAX)
84     , m_byValInstructionIndex(UINT_MAX)
85     , m_callLinkInfoIndex(UINT_MAX)
86     , m_randomGenerator(cryptographicallyRandomNumber())
87     , m_pcToCodeOriginMapBuilder(*vm)
88     , m_canBeOptimized(false)
89     , m_shouldEmitProfiling(false)
90 {
91 }
92
93 JIT::~JIT()
94 {
95 }
96
97 #if ENABLE(DFG_JIT)
98 void JIT::emitEnterOptimizationCheck()
99 {
100     if (!canBeOptimized())
101         return;
102
103     JumpList skipOptimize;
104     
105     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
106     ASSERT(!m_bytecodeOffset);
107
108     copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
109
110     callOperation(operationOptimize, m_bytecodeOffset);
111     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
112     move(returnValueGPR2, stackPointerRegister);
113     jump(returnValueGPR);
114     skipOptimize.link(this);
115 }
116 #endif
117
118 void JIT::emitNotifyWrite(WatchpointSet* set)
119 {
120     if (!set || set->state() == IsInvalidated) {
121         addSlowCase(Jump());
122         return;
123     }
124     
125     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
126 }
127
128 void JIT::emitNotifyWrite(GPRReg pointerToSet)
129 {
130     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
131 }
132
133 void JIT::assertStackPointerOffset()
134 {
135     if (ASSERT_DISABLED)
136         return;
137     
138     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
139     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
140     breakpoint();
141     ok.link(this);
142 }
143
144 #define NEXT_OPCODE(name) \
145     m_bytecodeOffset += OPCODE_LENGTH(name); \
146     break;
147
148 #define DEFINE_SLOW_OP(name) \
149     case op_##name: { \
150         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
151         slowPathCall.call(); \
152         NEXT_OPCODE(op_##name); \
153     }
154
155 #define DEFINE_OP(name) \
156     case name: { \
157         emit_##name(currentInstruction); \
158         NEXT_OPCODE(name); \
159     }
160
161 #define DEFINE_SLOWCASE_OP(name) \
162     case name: { \
163         emitSlow_##name(currentInstruction, iter); \
164         NEXT_OPCODE(name); \
165     }
166
167 void JIT::privateCompileMainPass()
168 {
169     if (false)
170         dataLog("Compiling ", *m_codeBlock, "\n");
171     
172     jitAssertTagsInPlace();
173     jitAssertArgumentCountSane();
174     
175     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
176     unsigned instructionCount = m_instructions.size();
177
178     m_callLinkInfoIndex = 0;
179
180     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
181         if (m_disassembler)
182             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
183         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
184         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
185
186         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
187
188 #if ENABLE(OPCODE_SAMPLING)
189         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
190             sampleInstruction(currentInstruction);
191 #endif
192
193         m_labels[m_bytecodeOffset] = label();
194
195 #if ENABLE(JIT_VERBOSE)
196         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
197 #endif
198         
199         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
200
201         if (m_compilation) {
202             add64(
203                 TrustedImm32(1),
204                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
205                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
206         }
207         
208         if (Options::eagerlyUpdateTopCallFrame())
209             updateTopCallFrame();
210
211         unsigned bytecodeOffset = m_bytecodeOffset;
212
213         switch (opcodeID) {
214         DEFINE_SLOW_OP(in)
215         DEFINE_SLOW_OP(less)
216         DEFINE_SLOW_OP(lesseq)
217         DEFINE_SLOW_OP(greater)
218         DEFINE_SLOW_OP(greatereq)
219         DEFINE_SLOW_OP(is_function)
220         DEFINE_SLOW_OP(is_object_or_null)
221         DEFINE_SLOW_OP(typeof)
222
223         DEFINE_OP(op_add)
224         DEFINE_OP(op_bitand)
225         DEFINE_OP(op_bitor)
226         DEFINE_OP(op_bitxor)
227         DEFINE_OP(op_call)
228         DEFINE_OP(op_tail_call)
229         DEFINE_OP(op_call_eval)
230         DEFINE_OP(op_call_varargs)
231         DEFINE_OP(op_tail_call_varargs)
232         DEFINE_OP(op_tail_call_forward_arguments)
233         DEFINE_OP(op_construct_varargs)
234         DEFINE_OP(op_catch)
235         DEFINE_OP(op_construct)
236         DEFINE_OP(op_create_this)
237         DEFINE_OP(op_to_this)
238         DEFINE_OP(op_create_direct_arguments)
239         DEFINE_OP(op_create_scoped_arguments)
240         DEFINE_OP(op_create_cloned_arguments)
241         DEFINE_OP(op_get_argument)
242         DEFINE_OP(op_argument_count)
243         DEFINE_OP(op_create_rest)
244         DEFINE_OP(op_get_rest_length)
245         DEFINE_OP(op_check_tdz)
246         DEFINE_OP(op_assert)
247         DEFINE_OP(op_debug)
248         DEFINE_OP(op_del_by_id)
249         DEFINE_OP(op_del_by_val)
250         DEFINE_OP(op_div)
251         DEFINE_OP(op_end)
252         DEFINE_OP(op_enter)
253         DEFINE_OP(op_get_scope)
254         DEFINE_OP(op_eq)
255         DEFINE_OP(op_eq_null)
256         DEFINE_OP(op_try_get_by_id)
257         case op_get_array_length:
258         case op_get_by_id_proto_load:
259         case op_get_by_id_unset:
260         DEFINE_OP(op_get_by_id)
261         DEFINE_OP(op_get_by_id_with_this)
262         DEFINE_OP(op_get_by_val)
263         DEFINE_OP(op_get_by_val_with_this)
264         DEFINE_OP(op_overrides_has_instance)
265         DEFINE_OP(op_instanceof)
266         DEFINE_OP(op_instanceof_custom)
267         DEFINE_OP(op_is_empty)
268         DEFINE_OP(op_is_undefined)
269         DEFINE_OP(op_is_boolean)
270         DEFINE_OP(op_is_number)
271         DEFINE_OP(op_is_object)
272         DEFINE_OP(op_is_cell_with_type)
273         DEFINE_OP(op_jeq_null)
274         DEFINE_OP(op_jfalse)
275         DEFINE_OP(op_jmp)
276         DEFINE_OP(op_jneq_null)
277         DEFINE_OP(op_jneq_ptr)
278         DEFINE_OP(op_jless)
279         DEFINE_OP(op_jlesseq)
280         DEFINE_OP(op_jgreater)
281         DEFINE_OP(op_jgreatereq)
282         DEFINE_OP(op_jnless)
283         DEFINE_OP(op_jnlesseq)
284         DEFINE_OP(op_jngreater)
285         DEFINE_OP(op_jngreatereq)
286         DEFINE_OP(op_jtrue)
287         DEFINE_OP(op_loop_hint)
288         DEFINE_OP(op_watchdog)
289         DEFINE_OP(op_lshift)
290         DEFINE_OP(op_mod)
291         DEFINE_OP(op_mov)
292         DEFINE_OP(op_mul)
293         DEFINE_OP(op_negate)
294         DEFINE_OP(op_neq)
295         DEFINE_OP(op_neq_null)
296         DEFINE_OP(op_new_array)
297         DEFINE_OP(op_new_array_with_size)
298         DEFINE_OP(op_new_array_buffer)
299         DEFINE_OP(op_new_func)
300         DEFINE_OP(op_new_func_exp)
301         DEFINE_OP(op_new_generator_func)
302         DEFINE_OP(op_new_generator_func_exp)
303         DEFINE_OP(op_new_async_func)
304         DEFINE_OP(op_new_async_func_exp)
305         DEFINE_OP(op_new_object)
306         DEFINE_OP(op_new_regexp)
307         DEFINE_OP(op_not)
308         DEFINE_OP(op_nstricteq)
309         DEFINE_OP(op_dec)
310         DEFINE_OP(op_inc)
311         DEFINE_OP(op_pow)
312         DEFINE_OP(op_profile_type)
313         DEFINE_OP(op_profile_control_flow)
314         DEFINE_OP(op_push_with_scope)
315         DEFINE_OP(op_create_lexical_environment)
316         DEFINE_OP(op_get_parent_scope)
317         DEFINE_OP(op_put_by_id)
318         DEFINE_OP(op_put_by_id_with_this)
319         DEFINE_OP(op_put_by_index)
320         case op_put_by_val_direct:
321         DEFINE_OP(op_put_by_val)
322         DEFINE_OP(op_put_by_val_with_this)
323         DEFINE_OP(op_put_getter_by_id)
324         DEFINE_OP(op_put_setter_by_id)
325         DEFINE_OP(op_put_getter_setter_by_id)
326         DEFINE_OP(op_put_getter_by_val)
327         DEFINE_OP(op_put_setter_by_val)
328         DEFINE_OP(op_define_data_property)
329         DEFINE_OP(op_define_accessor_property)
330
331         DEFINE_OP(op_ret)
332         DEFINE_OP(op_rshift)
333         DEFINE_OP(op_unsigned)
334         DEFINE_OP(op_urshift)
335         DEFINE_OP(op_set_function_name)
336         DEFINE_OP(op_strcat)
337         DEFINE_OP(op_stricteq)
338         DEFINE_OP(op_sub)
339         DEFINE_OP(op_switch_char)
340         DEFINE_OP(op_switch_imm)
341         DEFINE_OP(op_switch_string)
342         DEFINE_OP(op_throw)
343         DEFINE_OP(op_throw_static_error)
344         DEFINE_OP(op_to_number)
345         DEFINE_OP(op_to_string)
346         DEFINE_OP(op_to_primitive)
347
348         DEFINE_OP(op_resolve_scope)
349         DEFINE_OP(op_get_from_scope)
350         DEFINE_OP(op_put_to_scope)
351         DEFINE_OP(op_get_from_arguments)
352         DEFINE_OP(op_put_to_arguments)
353
354         DEFINE_OP(op_get_enumerable_length)
355         DEFINE_OP(op_has_generic_property)
356         DEFINE_OP(op_has_structure_property)
357         DEFINE_OP(op_has_indexed_property)
358         DEFINE_OP(op_get_direct_pname)
359         DEFINE_OP(op_get_property_enumerator)
360         DEFINE_OP(op_enumerator_structure_pname)
361         DEFINE_OP(op_enumerator_generic_pname)
362         DEFINE_OP(op_to_index_string)
363             
364         DEFINE_OP(op_log_shadow_chicken_prologue)
365         DEFINE_OP(op_log_shadow_chicken_tail)
366         default:
367             RELEASE_ASSERT_NOT_REACHED();
368         }
369
370         if (false)
371             dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
372     }
373
374     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
375
376 #ifndef NDEBUG
377     // Reset this, in order to guard its use with ASSERTs.
378     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
379 #endif
380 }
381
382 void JIT::privateCompileLinkPass()
383 {
384     unsigned jmpTableCount = m_jmpTable.size();
385     for (unsigned i = 0; i < jmpTableCount; ++i)
386         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
387     m_jmpTable.clear();
388 }
389
390 void JIT::privateCompileSlowCases()
391 {
392     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
393
394     m_getByIdIndex = 0;
395     m_putByIdIndex = 0;
396     m_byValInstructionIndex = 0;
397     m_callLinkInfoIndex = 0;
398     
399     // Use this to assert that slow-path code associates new profiling sites with existing
400     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
401     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
402     // instructions and the slow-path executions. Furthermore, if the slow-path code created
403     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
404     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
405     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
406
407     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
408         m_bytecodeOffset = iter->to;
409
410         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
411
412         unsigned firstTo = m_bytecodeOffset;
413
414         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
415         
416         RareCaseProfile* rareCaseProfile = 0;
417         if (shouldEmitProfiling())
418             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
419
420 #if ENABLE(JIT_VERBOSE)
421         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
422 #endif
423         
424         if (m_disassembler)
425             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
426
427         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
428         DEFINE_SLOWCASE_OP(op_add)
429         DEFINE_SLOWCASE_OP(op_bitand)
430         DEFINE_SLOWCASE_OP(op_bitor)
431         DEFINE_SLOWCASE_OP(op_bitxor)
432         DEFINE_SLOWCASE_OP(op_call)
433         DEFINE_SLOWCASE_OP(op_tail_call)
434         DEFINE_SLOWCASE_OP(op_call_eval)
435         DEFINE_SLOWCASE_OP(op_call_varargs)
436         DEFINE_SLOWCASE_OP(op_tail_call_varargs)
437         DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
438         DEFINE_SLOWCASE_OP(op_construct_varargs)
439         DEFINE_SLOWCASE_OP(op_construct)
440         DEFINE_SLOWCASE_OP(op_to_this)
441         DEFINE_SLOWCASE_OP(op_check_tdz)
442         DEFINE_SLOWCASE_OP(op_create_this)
443         DEFINE_SLOWCASE_OP(op_div)
444         DEFINE_SLOWCASE_OP(op_eq)
445         DEFINE_SLOWCASE_OP(op_try_get_by_id)
446         case op_get_array_length:
447         case op_get_by_id_proto_load:
448         case op_get_by_id_unset:
449         DEFINE_SLOWCASE_OP(op_get_by_id)
450         DEFINE_SLOWCASE_OP(op_get_by_val)
451         DEFINE_SLOWCASE_OP(op_instanceof)
452         DEFINE_SLOWCASE_OP(op_instanceof_custom)
453         DEFINE_SLOWCASE_OP(op_jless)
454         DEFINE_SLOWCASE_OP(op_jlesseq)
455         DEFINE_SLOWCASE_OP(op_jgreater)
456         DEFINE_SLOWCASE_OP(op_jgreatereq)
457         DEFINE_SLOWCASE_OP(op_jnless)
458         DEFINE_SLOWCASE_OP(op_jnlesseq)
459         DEFINE_SLOWCASE_OP(op_jngreater)
460         DEFINE_SLOWCASE_OP(op_jngreatereq)
461         DEFINE_SLOWCASE_OP(op_loop_hint)
462         DEFINE_SLOWCASE_OP(op_watchdog)
463         DEFINE_SLOWCASE_OP(op_lshift)
464         DEFINE_SLOWCASE_OP(op_mod)
465         DEFINE_SLOWCASE_OP(op_mul)
466         DEFINE_SLOWCASE_OP(op_negate)
467         DEFINE_SLOWCASE_OP(op_neq)
468         DEFINE_SLOWCASE_OP(op_new_object)
469         DEFINE_SLOWCASE_OP(op_not)
470         DEFINE_SLOWCASE_OP(op_nstricteq)
471         DEFINE_SLOWCASE_OP(op_dec)
472         DEFINE_SLOWCASE_OP(op_inc)
473         DEFINE_SLOWCASE_OP(op_put_by_id)
474         case op_put_by_val_direct:
475         DEFINE_SLOWCASE_OP(op_put_by_val)
476         DEFINE_SLOWCASE_OP(op_rshift)
477         DEFINE_SLOWCASE_OP(op_unsigned)
478         DEFINE_SLOWCASE_OP(op_urshift)
479         DEFINE_SLOWCASE_OP(op_stricteq)
480         DEFINE_SLOWCASE_OP(op_sub)
481         DEFINE_SLOWCASE_OP(op_to_number)
482         DEFINE_SLOWCASE_OP(op_to_string)
483         DEFINE_SLOWCASE_OP(op_to_primitive)
484         DEFINE_SLOWCASE_OP(op_has_indexed_property)
485         DEFINE_SLOWCASE_OP(op_has_structure_property)
486         DEFINE_SLOWCASE_OP(op_get_direct_pname)
487
488         DEFINE_SLOWCASE_OP(op_resolve_scope)
489         DEFINE_SLOWCASE_OP(op_get_from_scope)
490         DEFINE_SLOWCASE_OP(op_put_to_scope)
491
492         default:
493             RELEASE_ASSERT_NOT_REACHED();
494         }
495
496         if (false)
497             dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
498
499         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
500         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
501         
502         if (shouldEmitProfiling())
503             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
504
505         emitJumpSlowToHot(jump(), 0);
506     }
507
508     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
509     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
510     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
511     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
512
513 #ifndef NDEBUG
514     // Reset this, in order to guard its use with ASSERTs.
515     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
516 #endif
517 }
518
519 void JIT::compileWithoutLinking(JITCompilationEffort effort)
520 {
521     double before = 0;
522     if (UNLIKELY(computeCompileTimes()))
523         before = monotonicallyIncreasingTimeMS();
524     
525     {
526         ConcurrentJITLocker locker(m_codeBlock->m_lock);
527         m_instructions = m_codeBlock->instructions().clone();
528     }
529
530     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
531     switch (level) {
532     case DFG::CannotCompile:
533         m_canBeOptimized = false;
534         m_canBeOptimizedOrInlined = false;
535         m_shouldEmitProfiling = false;
536         break;
537     case DFG::CanCompile:
538     case DFG::CanCompileAndInline:
539         m_canBeOptimized = true;
540         m_canBeOptimizedOrInlined = true;
541         m_shouldEmitProfiling = true;
542         break;
543     default:
544         RELEASE_ASSERT_NOT_REACHED();
545         break;
546     }
547     
548     switch (m_codeBlock->codeType()) {
549     case GlobalCode:
550     case ModuleCode:
551     case EvalCode:
552         m_codeBlock->m_shouldAlwaysBeInlined = false;
553         break;
554     case FunctionCode:
555         // We could have already set it to false because we detected an uninlineable call.
556         // Don't override that observation.
557         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
558         break;
559     }
560
561     if (Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))
562         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
563     if (m_vm->m_perBytecodeProfiler) {
564         m_compilation = adoptRef(
565             new Profiler::Compilation(
566                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
567                 Profiler::Baseline));
568         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
569     }
570     
571     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
572
573     if (m_disassembler)
574         m_disassembler->setStartOfCode(label());
575
576     // Just add a little bit of randomness to the codegen
577     if (m_randomGenerator.getUint32() & 1)
578         nop();
579
580     emitFunctionPrologue();
581     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
582
583     Label beginLabel(this);
584
585     sampleCodeBlock(m_codeBlock);
586 #if ENABLE(OPCODE_SAMPLING)
587     sampleInstruction(m_codeBlock->instructions().begin());
588 #endif
589
590     if (m_codeBlock->codeType() == FunctionCode) {
591         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
592         if (shouldEmitProfiling()) {
593             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
594                 // If this is a constructor, then we want to put in a dummy profiling site (to
595                 // keep things consistent) but we don't actually want to record the dummy value.
596                 if (m_codeBlock->m_isConstructor && !argument)
597                     continue;
598                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
599 #if USE(JSVALUE64)
600                 load64(Address(callFrameRegister, offset), regT0);
601 #elif USE(JSVALUE32_64)
602                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
603                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
604 #endif
605                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
606             }
607         }
608     }
609
610     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
611     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1);
612
613     move(regT1, stackPointerRegister);
614     checkStackPointerAlignment();
615
616     emitSaveCalleeSaves();
617     emitMaterializeTagCheckRegisters();
618     
619     RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
620
621     privateCompileMainPass();
622     privateCompileLinkPass();
623     privateCompileSlowCases();
624     
625     if (m_disassembler)
626         m_disassembler->setEndOfSlowPath(label());
627     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
628
629     stackOverflow.link(this);
630     m_bytecodeOffset = 0;
631     if (maxFrameExtentForSlowPathCall)
632         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
633     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
634
635     if (m_codeBlock->codeType() == FunctionCode) {
636         m_arityCheck = label();
637         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
638         emitFunctionPrologue();
639         emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
640
641         load32(payloadFor(CallFrameSlot::argumentCount), regT1);
642         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
643
644         m_bytecodeOffset = 0;
645
646         if (maxFrameExtentForSlowPathCall)
647             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
648         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
649         if (maxFrameExtentForSlowPathCall)
650             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
651         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
652         move(returnValueGPR, GPRInfo::argumentGPR0);
653         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
654
655 #if !ASSERT_DISABLED
656         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
657 #endif
658
659         jump(beginLabel);
660     }
661
662     ASSERT(m_jmpTable.isEmpty());
663     
664     privateCompileExceptionHandlers();
665     
666     if (m_disassembler)
667         m_disassembler->setEndOfCode(label());
668     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
669
670     m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*m_vm, *this, m_codeBlock, effort));
671
672     double after = 0;
673     if (UNLIKELY(computeCompileTimes())) {
674         after = monotonicallyIncreasingTimeMS();
675
676         if (Options::reportTotalCompileTimes())
677             totalBaselineCompileTime += after - before;
678     }
679     if (UNLIKELY(reportCompileTimes())) {
680         CString codeBlockName = toCString(*m_codeBlock);
681         
682         dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", after - before, " ms.\n");
683     }
684 }
685
686 CompilationResult JIT::link()
687 {
688     LinkBuffer& patchBuffer = *m_linkBuffer;
689     
690     if (patchBuffer.didFailToAllocate())
691         return CompilationFailed;
692
693     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
694     for (unsigned i = 0; i < m_switches.size(); ++i) {
695         SwitchRecord record = m_switches[i];
696         unsigned bytecodeOffset = record.bytecodeOffset;
697
698         if (record.type != SwitchRecord::String) {
699             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
700             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
701
702             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
703
704             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
705                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
706                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
707             }
708         } else {
709             ASSERT(record.type == SwitchRecord::String);
710
711             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
712
713             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
714             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
715                 unsigned offset = it->value.branchOffset;
716                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
717             }
718         }
719     }
720
721     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
722         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
723         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
724     }
725
726     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
727         if (iter->to)
728             patchBuffer.link(iter->from, FunctionPtr(iter->to));
729     }
730
731     for (unsigned i = m_getByIds.size(); i--;)
732         m_getByIds[i].finalize(patchBuffer);
733     for (unsigned i = m_putByIds.size(); i--;)
734         m_putByIds[i].finalize(patchBuffer);
735
736     if (m_byValCompilationInfo.size()) {
737         CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
738
739         for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
740             PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
741             CodeLocationJump notIndexJump = CodeLocationJump();
742             if (Jump(patchableNotIndexJump).isSet())
743                 notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
744             CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
745             CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
746             CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
747             CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
748             CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
749
750             *byValCompilationInfo.byValInfo = ByValInfo(
751                 byValCompilationInfo.bytecodeIndex,
752                 notIndexJump,
753                 badTypeJump,
754                 exceptionHandler,
755                 byValCompilationInfo.arrayMode,
756                 byValCompilationInfo.arrayProfile,
757                 differenceBetweenCodePtr(badTypeJump, doneTarget),
758                 differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
759                 differenceBetweenCodePtr(returnAddress, slowPathTarget));
760         }
761     }
762
763     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
764         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
765         CallLinkInfo& info = *compilationInfo.callLinkInfo;
766         info.setCallLocations(
767             CodeLocationLabel(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation)),
768             CodeLocationLabel(patchBuffer.locationOf(compilationInfo.hotPathBegin)),
769             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
770     }
771
772     CompactJITCodeMap::Encoder jitCodeMapEncoder;
773     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
774         if (m_labels[bytecodeOffset].isSet())
775             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
776     }
777     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
778
779     MacroAssemblerCodePtr withArityCheck;
780     if (m_codeBlock->codeType() == FunctionCode)
781         withArityCheck = patchBuffer.locationOf(m_arityCheck);
782
783     if (Options::dumpDisassembly()) {
784         m_disassembler->dump(patchBuffer);
785         patchBuffer.didAlreadyDisassemble();
786     }
787     if (m_compilation) {
788         if (Options::disassembleBaselineForProfiler())
789             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
790         m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, m_compilation);
791     }
792
793     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
794         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
795     
796     CodeRef result = FINALIZE_CODE(
797         patchBuffer,
798         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
799     
800     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
801         static_cast<double>(result.size()) /
802         static_cast<double>(m_instructions.size()));
803
804     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
805     m_codeBlock->setJITCode(
806         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
807
808 #if ENABLE(JIT_VERBOSE)
809     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
810 #endif
811     
812     return CompilationSuccessful;
813 }
814
815 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
816 {
817     doMainThreadPreparationBeforeCompile();
818     compileWithoutLinking(effort);
819     return link();
820 }
821
822 void JIT::privateCompileExceptionHandlers()
823 {
824     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
825         m_exceptionChecksWithCallFrameRollback.link(this);
826
827         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
828
829         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
830
831         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
832         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
833
834 #if CPU(X86)
835         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
836         poke(GPRInfo::argumentGPR0);
837         poke(GPRInfo::argumentGPR1, 1);
838 #endif
839         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
840         jumpToExceptionHandler();
841     }
842
843     if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
844         m_exceptionHandler = label();
845         m_exceptionChecks.link(this);
846
847         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
848
849         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
850         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
851         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
852
853 #if CPU(X86)
854         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
855         poke(GPRInfo::argumentGPR0);
856         poke(GPRInfo::argumentGPR1, 1);
857 #endif
858         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
859         jumpToExceptionHandler();
860     }
861 }
862
863 void JIT::doMainThreadPreparationBeforeCompile()
864 {
865     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
866     if (m_vm->typeProfiler())
867         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
868 }
869
870 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
871 {
872     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
873
874     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
875 }
876
877 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
878 {
879     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
880 }
881
882 bool JIT::reportCompileTimes()
883 {
884     return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
885 }
886
887 bool JIT::computeCompileTimes()
888 {
889     return reportCompileTimes() || Options::reportTotalCompileTimes();
890 }
891
892 HashMap<CString, double> JIT::compileTimeStats()
893 {
894     HashMap<CString, double> result;
895     if (Options::reportTotalCompileTimes()) {
896         result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
897         result.add("Baseline Compile Time", totalBaselineCompileTime);
898 #if ENABLE(DFG_JIT)
899         result.add("DFG Compile Time", totalDFGCompileTime);
900 #if ENABLE(FTL_JIT)
901         result.add("FTL Compile Time", totalFTLCompileTime);
902         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
903         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
904 #endif // ENABLE(FTL_JIT)
905 #endif // ENABLE(DFG_JIT)
906     }
907     return result;
908 }
909
910 } // namespace JSC
911
912 #endif // ENABLE(JIT)