Getter and setter on super are called with wrong "this" object
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2012-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "CodeBlockWithJITType.h"
34 #include "DFGCapabilities.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
37 #include "JITOperations.h"
38 #include "JSArray.h"
39 #include "JSFunction.h"
40 #include "LinkBuffer.h"
41 #include "MaxFrameExtentForSlowPathCall.h"
42 #include "JSCInlines.h"
43 #include "PCToCodeOriginMap.h"
44 #include "ProfilerDatabase.h"
45 #include "ResultType.h"
46 #include "SlowPathCall.h"
47 #include "StackAlignment.h"
48 #include "TypeProfilerLog.h"
49 #include <wtf/CryptographicallyRandomNumber.h>
50
51 using namespace std;
52
53 namespace JSC {
54
55 double totalBaselineCompileTime;
56 double totalDFGCompileTime;
57 double totalFTLCompileTime;
58 double totalFTLDFGCompileTime;
59 double totalFTLB3CompileTime;
60
61 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
62 {
63     MacroAssembler::repatchCall(
64         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
65         newCalleeFunction);
66 }
67
68 JIT::JIT(VM* vm, CodeBlock* codeBlock)
69     : JSInterfaceJIT(vm, codeBlock)
70     , m_interpreter(vm->interpreter)
71     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
72     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
73     , m_getByIdIndex(UINT_MAX)
74     , m_putByIdIndex(UINT_MAX)
75     , m_byValInstructionIndex(UINT_MAX)
76     , m_callLinkInfoIndex(UINT_MAX)
77     , m_randomGenerator(cryptographicallyRandomNumber())
78     , m_pcToCodeOriginMapBuilder(*vm)
79     , m_canBeOptimized(false)
80     , m_shouldEmitProfiling(false)
81 {
82 }
83
84 #if ENABLE(DFG_JIT)
85 void JIT::emitEnterOptimizationCheck()
86 {
87     if (!canBeOptimized())
88         return;
89
90     JumpList skipOptimize;
91     
92     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
93     ASSERT(!m_bytecodeOffset);
94
95     copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
96
97     callOperation(operationOptimize, m_bytecodeOffset);
98     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
99     move(returnValueGPR2, stackPointerRegister);
100     jump(returnValueGPR);
101     skipOptimize.link(this);
102 }
103 #endif
104
105 void JIT::emitNotifyWrite(WatchpointSet* set)
106 {
107     if (!set || set->state() == IsInvalidated)
108         return;
109     
110     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
111 }
112
113 void JIT::emitNotifyWrite(GPRReg pointerToSet)
114 {
115     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
116 }
117
118 void JIT::assertStackPointerOffset()
119 {
120     if (ASSERT_DISABLED)
121         return;
122     
123     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
124     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
125     breakpoint();
126     ok.link(this);
127 }
128
129 #define NEXT_OPCODE(name) \
130     m_bytecodeOffset += OPCODE_LENGTH(name); \
131     break;
132
133 #define DEFINE_SLOW_OP(name) \
134     case op_##name: { \
135         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
136         slowPathCall.call(); \
137         NEXT_OPCODE(op_##name); \
138     }
139
140 #define DEFINE_OP(name) \
141     case name: { \
142         emit_##name(currentInstruction); \
143         NEXT_OPCODE(name); \
144     }
145
146 #define DEFINE_SLOWCASE_OP(name) \
147     case name: { \
148         emitSlow_##name(currentInstruction, iter); \
149         NEXT_OPCODE(name); \
150     }
151
152 void JIT::privateCompileMainPass()
153 {
154     jitAssertTagsInPlace();
155     jitAssertArgumentCountSane();
156     
157     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
158     unsigned instructionCount = m_codeBlock->instructions().size();
159
160     m_callLinkInfoIndex = 0;
161
162     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
163         if (m_disassembler)
164             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
165         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
166         ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
167
168         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
169
170 #if ENABLE(OPCODE_SAMPLING)
171         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
172             sampleInstruction(currentInstruction);
173 #endif
174
175         m_labels[m_bytecodeOffset] = label();
176
177 #if ENABLE(JIT_VERBOSE)
178         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
179 #endif
180         
181         OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
182
183         if (m_compilation) {
184             add64(
185                 TrustedImm32(1),
186                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
187                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
188         }
189         
190         if (Options::eagerlyUpdateTopCallFrame())
191             updateTopCallFrame();
192
193         unsigned bytecodeOffset = m_bytecodeOffset;
194
195         switch (opcodeID) {
196         DEFINE_SLOW_OP(in)
197         DEFINE_SLOW_OP(less)
198         DEFINE_SLOW_OP(lesseq)
199         DEFINE_SLOW_OP(greater)
200         DEFINE_SLOW_OP(greatereq)
201         DEFINE_SLOW_OP(is_function)
202         DEFINE_SLOW_OP(is_object_or_null)
203         DEFINE_SLOW_OP(typeof)
204
205         DEFINE_OP(op_add)
206         DEFINE_OP(op_bitand)
207         DEFINE_OP(op_bitor)
208         DEFINE_OP(op_bitxor)
209         DEFINE_OP(op_call)
210         DEFINE_OP(op_tail_call)
211         DEFINE_OP(op_call_eval)
212         DEFINE_OP(op_call_varargs)
213         DEFINE_OP(op_tail_call_varargs)
214         DEFINE_OP(op_construct_varargs)
215         DEFINE_OP(op_catch)
216         DEFINE_OP(op_construct)
217         DEFINE_OP(op_create_this)
218         DEFINE_OP(op_to_this)
219         DEFINE_OP(op_create_direct_arguments)
220         DEFINE_OP(op_create_scoped_arguments)
221         DEFINE_OP(op_create_cloned_arguments)
222         DEFINE_OP(op_copy_rest)
223         DEFINE_OP(op_get_rest_length)
224         DEFINE_OP(op_check_tdz)
225         DEFINE_OP(op_assert)
226         DEFINE_OP(op_save)
227         DEFINE_OP(op_resume)
228         DEFINE_OP(op_debug)
229         DEFINE_OP(op_del_by_id)
230         DEFINE_OP(op_del_by_val)
231         DEFINE_OP(op_div)
232         DEFINE_OP(op_end)
233         DEFINE_OP(op_enter)
234         DEFINE_OP(op_get_scope)
235         DEFINE_OP(op_eq)
236         DEFINE_OP(op_eq_null)
237         DEFINE_OP(op_try_get_by_id)
238         case op_get_array_length:
239         DEFINE_OP(op_get_by_id)
240         DEFINE_OP(op_get_by_id_with_this)
241         DEFINE_OP(op_get_by_val)
242         DEFINE_OP(op_get_by_val_with_this)
243         DEFINE_OP(op_overrides_has_instance)
244         DEFINE_OP(op_instanceof)
245         DEFINE_OP(op_instanceof_custom)
246         DEFINE_OP(op_is_empty)
247         DEFINE_OP(op_is_undefined)
248         DEFINE_OP(op_is_boolean)
249         DEFINE_OP(op_is_number)
250         DEFINE_OP(op_is_string)
251         DEFINE_OP(op_is_object)
252         DEFINE_OP(op_jeq_null)
253         DEFINE_OP(op_jfalse)
254         DEFINE_OP(op_jmp)
255         DEFINE_OP(op_jneq_null)
256         DEFINE_OP(op_jneq_ptr)
257         DEFINE_OP(op_jless)
258         DEFINE_OP(op_jlesseq)
259         DEFINE_OP(op_jgreater)
260         DEFINE_OP(op_jgreatereq)
261         DEFINE_OP(op_jnless)
262         DEFINE_OP(op_jnlesseq)
263         DEFINE_OP(op_jngreater)
264         DEFINE_OP(op_jngreatereq)
265         DEFINE_OP(op_jtrue)
266         DEFINE_OP(op_loop_hint)
267         DEFINE_OP(op_watchdog)
268         DEFINE_OP(op_lshift)
269         DEFINE_OP(op_mod)
270         DEFINE_OP(op_mov)
271         DEFINE_OP(op_mul)
272         DEFINE_OP(op_negate)
273         DEFINE_OP(op_neq)
274         DEFINE_OP(op_neq_null)
275         DEFINE_OP(op_new_array)
276         DEFINE_OP(op_new_array_with_size)
277         DEFINE_OP(op_new_array_buffer)
278         DEFINE_OP(op_new_func)
279         DEFINE_OP(op_new_func_exp)
280         DEFINE_OP(op_new_generator_func)
281         DEFINE_OP(op_new_generator_func_exp)
282         DEFINE_OP(op_new_arrow_func_exp) 
283         DEFINE_OP(op_new_object)
284         DEFINE_OP(op_new_regexp)
285         DEFINE_OP(op_not)
286         DEFINE_OP(op_nstricteq)
287         DEFINE_OP(op_dec)
288         DEFINE_OP(op_inc)
289         DEFINE_OP(op_profile_did_call)
290         DEFINE_OP(op_profile_will_call)
291         DEFINE_OP(op_profile_type)
292         DEFINE_OP(op_profile_control_flow)
293         DEFINE_OP(op_push_with_scope)
294         DEFINE_OP(op_create_lexical_environment)
295         DEFINE_OP(op_get_parent_scope)
296         DEFINE_OP(op_put_by_id)
297         DEFINE_OP(op_put_by_id_with_this)
298         DEFINE_OP(op_put_by_index)
299         case op_put_by_val_direct:
300         DEFINE_OP(op_put_by_val)
301         DEFINE_OP(op_put_by_val_with_this)
302         DEFINE_OP(op_put_getter_by_id)
303         DEFINE_OP(op_put_setter_by_id)
304         DEFINE_OP(op_put_getter_setter_by_id)
305         DEFINE_OP(op_put_getter_by_val)
306         DEFINE_OP(op_put_setter_by_val)
307
308         DEFINE_OP(op_ret)
309         DEFINE_OP(op_rshift)
310         DEFINE_OP(op_unsigned)
311         DEFINE_OP(op_urshift)
312         DEFINE_OP(op_set_function_name)
313         DEFINE_OP(op_strcat)
314         DEFINE_OP(op_stricteq)
315         DEFINE_OP(op_sub)
316         DEFINE_OP(op_switch_char)
317         DEFINE_OP(op_switch_imm)
318         DEFINE_OP(op_switch_string)
319         DEFINE_OP(op_throw)
320         DEFINE_OP(op_throw_static_error)
321         DEFINE_OP(op_to_number)
322         DEFINE_OP(op_to_string)
323         DEFINE_OP(op_to_primitive)
324
325         DEFINE_OP(op_resolve_scope)
326         DEFINE_OP(op_get_from_scope)
327         DEFINE_OP(op_put_to_scope)
328         DEFINE_OP(op_get_from_arguments)
329         DEFINE_OP(op_put_to_arguments)
330
331         DEFINE_OP(op_get_enumerable_length)
332         DEFINE_OP(op_has_generic_property)
333         DEFINE_OP(op_has_structure_property)
334         DEFINE_OP(op_has_indexed_property)
335         DEFINE_OP(op_get_direct_pname)
336         DEFINE_OP(op_get_property_enumerator)
337         DEFINE_OP(op_enumerator_structure_pname)
338         DEFINE_OP(op_enumerator_generic_pname)
339         DEFINE_OP(op_to_index_string)
340             
341         DEFINE_OP(op_log_shadow_chicken_prologue)
342         DEFINE_OP(op_log_shadow_chicken_tail)
343         default:
344             RELEASE_ASSERT_NOT_REACHED();
345         }
346
347         if (false)
348             dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
349     }
350
351     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
352
353 #ifndef NDEBUG
354     // Reset this, in order to guard its use with ASSERTs.
355     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
356 #endif
357 }
358
359 void JIT::privateCompileLinkPass()
360 {
361     unsigned jmpTableCount = m_jmpTable.size();
362     for (unsigned i = 0; i < jmpTableCount; ++i)
363         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
364     m_jmpTable.clear();
365 }
366
367 void JIT::privateCompileSlowCases()
368 {
369     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
370
371     m_getByIdIndex = 0;
372     m_putByIdIndex = 0;
373     m_byValInstructionIndex = 0;
374     m_callLinkInfoIndex = 0;
375     
376     // Use this to assert that slow-path code associates new profiling sites with existing
377     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
378     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
379     // instructions and the slow-path executions. Furthermore, if the slow-path code created
380     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
381     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
382     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
383
384     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
385         m_bytecodeOffset = iter->to;
386
387         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
388
389         unsigned firstTo = m_bytecodeOffset;
390
391         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
392         
393         RareCaseProfile* rareCaseProfile = 0;
394         if (shouldEmitProfiling())
395             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
396
397 #if ENABLE(JIT_VERBOSE)
398         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
399 #endif
400         
401         if (m_disassembler)
402             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
403
404         switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
405         DEFINE_SLOWCASE_OP(op_add)
406         DEFINE_SLOWCASE_OP(op_bitand)
407         DEFINE_SLOWCASE_OP(op_bitor)
408         DEFINE_SLOWCASE_OP(op_bitxor)
409         DEFINE_SLOWCASE_OP(op_call)
410         DEFINE_SLOWCASE_OP(op_tail_call)
411         DEFINE_SLOWCASE_OP(op_call_eval)
412         DEFINE_SLOWCASE_OP(op_call_varargs)
413         DEFINE_SLOWCASE_OP(op_tail_call_varargs)
414         DEFINE_SLOWCASE_OP(op_construct_varargs)
415         DEFINE_SLOWCASE_OP(op_construct)
416         DEFINE_SLOWCASE_OP(op_to_this)
417         DEFINE_SLOWCASE_OP(op_check_tdz)
418         DEFINE_SLOWCASE_OP(op_create_this)
419         DEFINE_SLOWCASE_OP(op_div)
420         DEFINE_SLOWCASE_OP(op_eq)
421         DEFINE_SLOWCASE_OP(op_try_get_by_id)
422         case op_get_array_length:
423         DEFINE_SLOWCASE_OP(op_get_by_id)
424         DEFINE_SLOWCASE_OP(op_get_by_val)
425         DEFINE_SLOWCASE_OP(op_instanceof)
426         DEFINE_SLOWCASE_OP(op_instanceof_custom)
427         DEFINE_SLOWCASE_OP(op_jfalse)
428         DEFINE_SLOWCASE_OP(op_jless)
429         DEFINE_SLOWCASE_OP(op_jlesseq)
430         DEFINE_SLOWCASE_OP(op_jgreater)
431         DEFINE_SLOWCASE_OP(op_jgreatereq)
432         DEFINE_SLOWCASE_OP(op_jnless)
433         DEFINE_SLOWCASE_OP(op_jnlesseq)
434         DEFINE_SLOWCASE_OP(op_jngreater)
435         DEFINE_SLOWCASE_OP(op_jngreatereq)
436         DEFINE_SLOWCASE_OP(op_jtrue)
437         DEFINE_SLOWCASE_OP(op_loop_hint)
438         DEFINE_SLOWCASE_OP(op_watchdog)
439         DEFINE_SLOWCASE_OP(op_lshift)
440         DEFINE_SLOWCASE_OP(op_mod)
441         DEFINE_SLOWCASE_OP(op_mul)
442         DEFINE_SLOWCASE_OP(op_negate)
443         DEFINE_SLOWCASE_OP(op_neq)
444         DEFINE_SLOWCASE_OP(op_new_object)
445         DEFINE_SLOWCASE_OP(op_not)
446         DEFINE_SLOWCASE_OP(op_nstricteq)
447         DEFINE_SLOWCASE_OP(op_dec)
448         DEFINE_SLOWCASE_OP(op_inc)
449         DEFINE_SLOWCASE_OP(op_put_by_id)
450         case op_put_by_val_direct:
451         DEFINE_SLOWCASE_OP(op_put_by_val)
452         DEFINE_SLOWCASE_OP(op_rshift)
453         DEFINE_SLOWCASE_OP(op_unsigned)
454         DEFINE_SLOWCASE_OP(op_urshift)
455         DEFINE_SLOWCASE_OP(op_stricteq)
456         DEFINE_SLOWCASE_OP(op_sub)
457         DEFINE_SLOWCASE_OP(op_to_number)
458         DEFINE_SLOWCASE_OP(op_to_string)
459         DEFINE_SLOWCASE_OP(op_to_primitive)
460         DEFINE_SLOWCASE_OP(op_has_indexed_property)
461         DEFINE_SLOWCASE_OP(op_has_structure_property)
462         DEFINE_SLOWCASE_OP(op_get_direct_pname)
463
464         DEFINE_SLOWCASE_OP(op_resolve_scope)
465         DEFINE_SLOWCASE_OP(op_get_from_scope)
466         DEFINE_SLOWCASE_OP(op_put_to_scope)
467
468         default:
469             RELEASE_ASSERT_NOT_REACHED();
470         }
471
472         if (false)
473             dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
474
475         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
476         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
477         
478         if (shouldEmitProfiling())
479             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
480
481         emitJumpSlowToHot(jump(), 0);
482     }
483
484     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
485     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
486     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
487     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
488
489 #ifndef NDEBUG
490     // Reset this, in order to guard its use with ASSERTs.
491     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
492 #endif
493 }
494
495 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
496 {
497     double before = 0;
498     if (UNLIKELY(computeCompileTimes()))
499         before = monotonicallyIncreasingTimeMS();
500
501     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
502     switch (level) {
503     case DFG::CannotCompile:
504         m_canBeOptimized = false;
505         m_canBeOptimizedOrInlined = false;
506         m_shouldEmitProfiling = false;
507         break;
508     case DFG::CanCompile:
509     case DFG::CanCompileAndInline:
510         m_canBeOptimized = true;
511         m_canBeOptimizedOrInlined = true;
512         m_shouldEmitProfiling = true;
513         break;
514     default:
515         RELEASE_ASSERT_NOT_REACHED();
516         break;
517     }
518     
519     switch (m_codeBlock->codeType()) {
520     case GlobalCode:
521     case ModuleCode:
522     case EvalCode:
523         m_codeBlock->m_shouldAlwaysBeInlined = false;
524         break;
525     case FunctionCode:
526         // We could have already set it to false because we detected an uninlineable call.
527         // Don't override that observation.
528         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
529         break;
530     }
531
532     m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value.
533
534     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
535     if (m_vm->typeProfiler())
536         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
537     
538     if (Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))
539         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
540     if (m_vm->m_perBytecodeProfiler) {
541         m_compilation = adoptRef(
542             new Profiler::Compilation(
543                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
544                 Profiler::Baseline));
545         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
546     }
547     
548     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
549
550     if (m_disassembler)
551         m_disassembler->setStartOfCode(label());
552
553     // Just add a little bit of randomness to the codegen
554     if (m_randomGenerator.getUint32() & 1)
555         nop();
556
557     emitFunctionPrologue();
558     emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
559
560     Label beginLabel(this);
561
562     sampleCodeBlock(m_codeBlock);
563 #if ENABLE(OPCODE_SAMPLING)
564     sampleInstruction(m_codeBlock->instructions().begin());
565 #endif
566
567     if (m_codeBlock->codeType() == FunctionCode) {
568         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
569         if (shouldEmitProfiling()) {
570             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
571                 // If this is a constructor, then we want to put in a dummy profiling site (to
572                 // keep things consistent) but we don't actually want to record the dummy value.
573                 if (m_codeBlock->m_isConstructor && !argument)
574                     continue;
575                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
576 #if USE(JSVALUE64)
577                 load64(Address(callFrameRegister, offset), regT0);
578 #elif USE(JSVALUE32_64)
579                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
580                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
581 #endif
582                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
583             }
584         }
585     }
586
587     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
588     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
589
590     move(regT1, stackPointerRegister);
591     checkStackPointerAlignment();
592
593     emitSaveCalleeSaves();
594     emitMaterializeTagCheckRegisters();
595
596     privateCompileMainPass();
597     privateCompileLinkPass();
598     privateCompileSlowCases();
599     
600     if (m_disassembler)
601         m_disassembler->setEndOfSlowPath(label());
602     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
603
604     stackOverflow.link(this);
605     m_bytecodeOffset = 0;
606     if (maxFrameExtentForSlowPathCall)
607         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
608     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
609
610     Label arityCheck;
611     if (m_codeBlock->codeType() == FunctionCode) {
612         arityCheck = label();
613         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
614         emitFunctionPrologue();
615         emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
616
617         load32(payloadFor(JSStack::ArgumentCount), regT1);
618         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
619
620         m_bytecodeOffset = 0;
621
622         if (maxFrameExtentForSlowPathCall)
623             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
624         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
625         if (maxFrameExtentForSlowPathCall)
626             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
627         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
628         move(returnValueGPR, GPRInfo::argumentGPR0);
629         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
630
631 #if !ASSERT_DISABLED
632         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
633 #endif
634
635         jump(beginLabel);
636     }
637
638     ASSERT(m_jmpTable.isEmpty());
639     
640     privateCompileExceptionHandlers();
641     
642     if (m_disassembler)
643         m_disassembler->setEndOfCode(label());
644     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
645
646
647     LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
648     if (patchBuffer.didFailToAllocate())
649         return CompilationFailed;
650
651     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
652     for (unsigned i = 0; i < m_switches.size(); ++i) {
653         SwitchRecord record = m_switches[i];
654         unsigned bytecodeOffset = record.bytecodeOffset;
655
656         if (record.type != SwitchRecord::String) {
657             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
658             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
659
660             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
661
662             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
663                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
664                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
665             }
666         } else {
667             ASSERT(record.type == SwitchRecord::String);
668
669             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
670
671             StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();            
672             for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
673                 unsigned offset = it->value.branchOffset;
674                 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
675             }
676         }
677     }
678
679     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
680         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
681         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
682     }
683
684     for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
685         if (iter->to)
686             patchBuffer.link(iter->from, FunctionPtr(iter->to));
687     }
688
689     for (unsigned i = m_getByIds.size(); i--;)
690         m_getByIds[i].finalize(patchBuffer);
691     for (unsigned i = m_putByIds.size(); i--;)
692         m_putByIds[i].finalize(patchBuffer);
693
694     for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
695         PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
696         CodeLocationJump notIndexJump = CodeLocationJump();
697         if (Jump(patchableNotIndexJump).isSet())
698             notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
699         CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
700         CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
701         CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
702         CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
703         CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
704
705         *byValCompilationInfo.byValInfo = ByValInfo(
706             byValCompilationInfo.bytecodeIndex,
707             notIndexJump,
708             badTypeJump,
709             byValCompilationInfo.arrayMode,
710             byValCompilationInfo.arrayProfile,
711             differenceBetweenCodePtr(badTypeJump, doneTarget),
712             differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
713             differenceBetweenCodePtr(returnAddress, slowPathTarget));
714     }
715     for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
716         CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
717         CallLinkInfo& info = *compilationInfo.callLinkInfo;
718         info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation),
719             patchBuffer.locationOf(compilationInfo.hotPathBegin),
720             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
721     }
722
723     CompactJITCodeMap::Encoder jitCodeMapEncoder;
724     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
725         if (m_labels[bytecodeOffset].isSet())
726             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
727     }
728     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
729
730     MacroAssemblerCodePtr withArityCheck;
731     if (m_codeBlock->codeType() == FunctionCode)
732         withArityCheck = patchBuffer.locationOf(arityCheck);
733
734     if (Options::dumpDisassembly()) {
735         m_disassembler->dump(patchBuffer);
736         patchBuffer.didAlreadyDisassemble();
737     }
738     if (m_compilation) {
739         if (Options::disassembleBaselineForProfiler())
740             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
741         m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
742     }
743
744     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
745         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
746     
747     CodeRef result = FINALIZE_CODE(
748         patchBuffer,
749         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
750     
751     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
752         static_cast<double>(result.size()) /
753         static_cast<double>(m_codeBlock->instructions().size()));
754
755     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
756     m_codeBlock->setJITCode(
757         adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
758
759     double after;
760     if (UNLIKELY(computeCompileTimes())) {
761         after = monotonicallyIncreasingTimeMS();
762
763         if (Options::reportTotalCompileTimes())
764             totalBaselineCompileTime += after - before;
765     }
766     if (UNLIKELY(reportCompileTimes())) {
767         CString codeBlockName = toCString(*m_codeBlock);
768         
769         dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", patchBuffer.size(), " bytes in ", after - before, " ms.\n");
770     }
771
772 #if ENABLE(JIT_VERBOSE)
773     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
774 #endif
775     
776     return CompilationSuccessful;
777 }
778
779 void JIT::privateCompileExceptionHandlers()
780 {
781     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
782         m_exceptionChecksWithCallFrameRollback.link(this);
783
784         copyCalleeSavesToVMCalleeSavesBuffer();
785
786         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
787
788         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
789         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
790
791 #if CPU(X86)
792         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
793         poke(GPRInfo::argumentGPR0);
794         poke(GPRInfo::argumentGPR1, 1);
795 #endif
796         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
797         jumpToExceptionHandler();
798     }
799
800     if (!m_exceptionChecks.empty()) {
801         m_exceptionChecks.link(this);
802
803         copyCalleeSavesToVMCalleeSavesBuffer();
804
805         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
806         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
807         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
808
809 #if CPU(X86)
810         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
811         poke(GPRInfo::argumentGPR0);
812         poke(GPRInfo::argumentGPR1, 1);
813 #endif
814         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
815         jumpToExceptionHandler();
816     }
817 }
818
819 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
820 {
821     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
822
823     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
824 }
825
826 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
827 {
828     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
829 }
830
831 bool JIT::reportCompileTimes()
832 {
833     return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
834 }
835
836 bool JIT::computeCompileTimes()
837 {
838     return reportCompileTimes() || Options::reportTotalCompileTimes();
839 }
840
841 HashMap<CString, double> JIT::compileTimeStats()
842 {
843     HashMap<CString, double> result;
844     if (Options::reportTotalCompileTimes()) {
845         result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
846         result.add("Baseline Compile Time", totalBaselineCompileTime);
847 #if ENABLE(DFG_JIT)
848         result.add("DFG Compile Time", totalDFGCompileTime);
849 #if ENABLE(FTL_JIT)
850         result.add("FTL Compile Time", totalFTLCompileTime);
851         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
852         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
853 #endif // ENABLE(FTL_JIT)
854 #endif // ENABLE(DFG_JIT)
855     }
856     return result;
857 }
858
859 } // namespace JSC
860
861 #endif // ENABLE(JIT)