[JSC] Remove per-host-function CTI stub in 32bit environment
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "BytecodeGraph.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGCapabilities.h"
36 #include "InterpreterInlines.h"
37 #include "JITInlines.h"
38 #include "JITOperations.h"
39 #include "JSArray.h"
40 #include "JSCInlines.h"
41 #include "JSFunction.h"
42 #include "LinkBuffer.h"
43 #include "MaxFrameExtentForSlowPathCall.h"
44 #include "ModuleProgramCodeBlock.h"
45 #include "PCToCodeOriginMap.h"
46 #include "ProfilerDatabase.h"
47 #include "ProgramCodeBlock.h"
48 #include "ResultType.h"
49 #include "SlowPathCall.h"
50 #include "StackAlignment.h"
51 #include "ThunkGenerators.h"
52 #include "TypeProfilerLog.h"
53 #include <wtf/CryptographicallyRandomNumber.h>
54 #include <wtf/GraphNodeWorklist.h>
55 #include <wtf/SimpleStats.h>
56
57 using namespace std;
58
59 namespace JSC {
60
61 double totalBaselineCompileTime;
62 double totalDFGCompileTime;
63 double totalFTLCompileTime;
64 double totalFTLDFGCompileTime;
65 double totalFTLB3CompileTime;
66
67 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
68 {
69     MacroAssembler::repatchCall(
70         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
71         newCalleeFunction);
72 }
73
74 JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
75     : JSInterfaceJIT(vm, codeBlock)
76     , m_interpreter(vm->interpreter)
77     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
78     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
79     , m_getByIdIndex(UINT_MAX)
80     , m_getByIdWithThisIndex(UINT_MAX)
81     , m_putByIdIndex(UINT_MAX)
82     , m_byValInstructionIndex(UINT_MAX)
83     , m_callLinkInfoIndex(UINT_MAX)
84     , m_pcToCodeOriginMapBuilder(*vm)
85     , m_canBeOptimized(false)
86     , m_shouldEmitProfiling(false)
87     , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset)
88 {
89 }
90
91 JIT::~JIT()
92 {
93 }
94
95 #if ENABLE(DFG_JIT)
96 void JIT::emitEnterOptimizationCheck()
97 {
98     if (!canBeOptimized())
99         return;
100
101     JumpList skipOptimize;
102     
103     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
104     ASSERT(!m_bytecodeOffset);
105
106     copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
107
108     callOperation(operationOptimize, m_bytecodeOffset);
109     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
110     move(returnValueGPR2, stackPointerRegister);
111     jump(returnValueGPR);
112     skipOptimize.link(this);
113 }
114 #endif
115
116 void JIT::emitNotifyWrite(WatchpointSet* set)
117 {
118     if (!set || set->state() == IsInvalidated) {
119         addSlowCase(Jump());
120         return;
121     }
122     
123     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
124 }
125
126 void JIT::emitNotifyWrite(GPRReg pointerToSet)
127 {
128     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
129 }
130
131 void JIT::assertStackPointerOffset()
132 {
133     if (ASSERT_DISABLED)
134         return;
135     
136     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
137     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
138     breakpoint();
139     ok.link(this);
140 }
141
142 #define NEXT_OPCODE(name) \
143     m_bytecodeOffset += OPCODE_LENGTH(name); \
144     break;
145
146 #define DEFINE_SLOW_OP(name) \
147     case op_##name: { \
148         if (m_bytecodeOffset >= startBytecodeOffset) { \
149             JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
150             slowPathCall.call(); \
151         } \
152         NEXT_OPCODE(op_##name); \
153     }
154
155 #define DEFINE_OP(name) \
156     case name: { \
157         if (m_bytecodeOffset >= startBytecodeOffset) { \
158             emit_##name(currentInstruction); \
159         } \
160         NEXT_OPCODE(name); \
161     }
162
163 #define DEFINE_SLOWCASE_OP(name) \
164     case name: { \
165         emitSlow_##name(currentInstruction, iter); \
166         NEXT_OPCODE(name); \
167     }
168
169 void JIT::privateCompileMainPass()
170 {
171     if (false)
172         dataLog("Compiling ", *m_codeBlock, "\n");
173     
174     jitAssertTagsInPlace();
175     jitAssertArgumentCountSane();
176     
177     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
178     unsigned instructionCount = m_instructions.size();
179
180     m_callLinkInfoIndex = 0;
181
182     VM& vm = *m_codeBlock->vm();
183     unsigned startBytecodeOffset = 0;
184     if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits(vm, ProgramCodeBlock::info()) || m_codeBlock->inherits(vm, ModuleProgramCodeBlock::info()))) {
185         // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
186         // This optimization would be invalid otherwise. When the LLInt determines it wants to
187         // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
188         // was executing at when it kicked off our compilation. We only need to compile code for
189         // anything reachable from that bytecode offset.
190
191         // We only bother building the bytecode graph if it could save time and executable
192         // memory. We pick an arbitrary offset where we deem this is profitable.
193         if (m_loopOSREntryBytecodeOffset >= 200) {
194             // As a simplification, we don't find all bytecode ranges that are unreachable.
195             // Instead, we just find the minimum bytecode offset that is reachable, and
196             // compile code from that bytecode offset onwards.
197
198             BytecodeGraph<CodeBlock> graph(m_codeBlock, m_instructions);
199             BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset);
200             RELEASE_ASSERT(block);
201
202             GraphNodeWorklist<BytecodeBasicBlock*> worklist;
203             startBytecodeOffset = UINT_MAX;
204             worklist.push(block);
205             while (BytecodeBasicBlock* block = worklist.pop()) {
206                 startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset());
207                 worklist.pushAll(block->successors());
208             }
209         }
210     }
211
212     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
213         if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) {
214             // We've proven all bytecode instructions up until here are unreachable.
215             // Let's ensure that by crashing if it's ever hit.
216             breakpoint();
217         }
218
219         if (m_disassembler)
220             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
221         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
222         ASSERT_WITH_MESSAGE(Interpreter::isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
223
224         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
225
226 #if ENABLE(OPCODE_SAMPLING)
227         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
228             sampleInstruction(currentInstruction);
229 #endif
230
231         m_labels[m_bytecodeOffset] = label();
232
233 #if ENABLE(JIT_VERBOSE)
234         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
235 #endif
236         
237         OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);
238
239         if (UNLIKELY(m_compilation)) {
240             add64(
241                 TrustedImm32(1),
242                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
243                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
244         }
245         
246         if (Options::eagerlyUpdateTopCallFrame())
247             updateTopCallFrame();
248
249         unsigned bytecodeOffset = m_bytecodeOffset;
250
251         switch (opcodeID) {
252         DEFINE_SLOW_OP(in)
253         DEFINE_SLOW_OP(less)
254         DEFINE_SLOW_OP(lesseq)
255         DEFINE_SLOW_OP(greater)
256         DEFINE_SLOW_OP(greatereq)
257         DEFINE_SLOW_OP(is_function)
258         DEFINE_SLOW_OP(is_object_or_null)
259         DEFINE_SLOW_OP(typeof)
260
261         DEFINE_OP(op_add)
262         DEFINE_OP(op_bitand)
263         DEFINE_OP(op_bitor)
264         DEFINE_OP(op_bitxor)
265         DEFINE_OP(op_call)
266         DEFINE_OP(op_tail_call)
267         DEFINE_OP(op_call_eval)
268         DEFINE_OP(op_call_varargs)
269         DEFINE_OP(op_tail_call_varargs)
270         DEFINE_OP(op_tail_call_forward_arguments)
271         DEFINE_OP(op_construct_varargs)
272         DEFINE_OP(op_catch)
273         DEFINE_OP(op_construct)
274         DEFINE_OP(op_create_this)
275         DEFINE_OP(op_to_this)
276         DEFINE_OP(op_create_direct_arguments)
277         DEFINE_OP(op_create_scoped_arguments)
278         DEFINE_OP(op_create_cloned_arguments)
279         DEFINE_OP(op_get_argument)
280         DEFINE_OP(op_argument_count)
281         DEFINE_OP(op_create_rest)
282         DEFINE_OP(op_get_rest_length)
283         DEFINE_OP(op_check_tdz)
284         DEFINE_OP(op_assert)
285         DEFINE_OP(op_identity_with_profile)
286         DEFINE_OP(op_unreachable)
287         DEFINE_OP(op_debug)
288         DEFINE_OP(op_del_by_id)
289         DEFINE_OP(op_del_by_val)
290         DEFINE_OP(op_div)
291         DEFINE_OP(op_end)
292         DEFINE_OP(op_enter)
293         DEFINE_OP(op_get_scope)
294         DEFINE_OP(op_eq)
295         DEFINE_OP(op_eq_null)
296         DEFINE_OP(op_below)
297         DEFINE_OP(op_beloweq)
298         DEFINE_OP(op_try_get_by_id)
299         case op_get_array_length:
300         case op_get_by_id_proto_load:
301         case op_get_by_id_unset:
302         DEFINE_OP(op_get_by_id)
303         DEFINE_OP(op_get_by_id_with_this)
304         DEFINE_OP(op_get_by_val)
305         DEFINE_OP(op_get_by_val_with_this)
306         DEFINE_OP(op_overrides_has_instance)
307         DEFINE_OP(op_instanceof)
308         DEFINE_OP(op_instanceof_custom)
309         DEFINE_OP(op_is_empty)
310         DEFINE_OP(op_is_undefined)
311         DEFINE_OP(op_is_boolean)
312         DEFINE_OP(op_is_number)
313         DEFINE_OP(op_is_object)
314         DEFINE_OP(op_is_cell_with_type)
315         DEFINE_OP(op_jeq_null)
316         DEFINE_OP(op_jfalse)
317         DEFINE_OP(op_jmp)
318         DEFINE_OP(op_jneq_null)
319         DEFINE_OP(op_jneq_ptr)
320         DEFINE_OP(op_jless)
321         DEFINE_OP(op_jlesseq)
322         DEFINE_OP(op_jgreater)
323         DEFINE_OP(op_jgreatereq)
324         DEFINE_OP(op_jnless)
325         DEFINE_OP(op_jnlesseq)
326         DEFINE_OP(op_jngreater)
327         DEFINE_OP(op_jngreatereq)
328         DEFINE_OP(op_jbelow)
329         DEFINE_OP(op_jbeloweq)
330         DEFINE_OP(op_jtrue)
331         DEFINE_OP(op_loop_hint)
332         DEFINE_OP(op_check_traps)
333         DEFINE_OP(op_nop)
334         DEFINE_OP(op_lshift)
335         DEFINE_OP(op_mod)
336         DEFINE_OP(op_mov)
337         DEFINE_OP(op_mul)
338         DEFINE_OP(op_negate)
339         DEFINE_OP(op_neq)
340         DEFINE_OP(op_neq_null)
341         DEFINE_OP(op_new_array)
342         DEFINE_OP(op_new_array_with_size)
343         DEFINE_OP(op_new_array_buffer)
344         DEFINE_OP(op_new_array_with_spread)
345         DEFINE_OP(op_spread)
346         DEFINE_OP(op_new_func)
347         DEFINE_OP(op_new_func_exp)
348         DEFINE_OP(op_new_generator_func)
349         DEFINE_OP(op_new_generator_func_exp)
350         DEFINE_OP(op_new_async_func)
351         DEFINE_OP(op_new_async_func_exp)
352         DEFINE_OP(op_new_async_generator_func)
353         DEFINE_OP(op_new_async_generator_func_exp)
354         DEFINE_OP(op_new_object)
355         DEFINE_OP(op_new_regexp)
356         DEFINE_OP(op_not)
357         DEFINE_OP(op_nstricteq)
358         DEFINE_OP(op_dec)
359         DEFINE_OP(op_inc)
360         DEFINE_OP(op_pow)
361         DEFINE_OP(op_profile_type)
362         DEFINE_OP(op_profile_control_flow)
363         DEFINE_OP(op_push_with_scope)
364         DEFINE_OP(op_create_lexical_environment)
365         DEFINE_OP(op_get_parent_scope)
366         DEFINE_OP(op_put_by_id)
367         DEFINE_OP(op_put_by_id_with_this)
368         DEFINE_OP(op_put_by_index)
369         case op_put_by_val_direct:
370         DEFINE_OP(op_put_by_val)
371         DEFINE_OP(op_put_by_val_with_this)
372         DEFINE_OP(op_put_getter_by_id)
373         DEFINE_OP(op_put_setter_by_id)
374         DEFINE_OP(op_put_getter_setter_by_id)
375         DEFINE_OP(op_put_getter_by_val)
376         DEFINE_OP(op_put_setter_by_val)
377         DEFINE_OP(op_define_data_property)
378         DEFINE_OP(op_define_accessor_property)
379
380         DEFINE_OP(op_ret)
381         DEFINE_OP(op_rshift)
382         DEFINE_OP(op_unsigned)
383         DEFINE_OP(op_urshift)
384         DEFINE_OP(op_set_function_name)
385         DEFINE_OP(op_strcat)
386         DEFINE_OP(op_stricteq)
387         DEFINE_OP(op_sub)
388         DEFINE_OP(op_switch_char)
389         DEFINE_OP(op_switch_imm)
390         DEFINE_OP(op_switch_string)
391         DEFINE_OP(op_throw)
392         DEFINE_OP(op_throw_static_error)
393         DEFINE_OP(op_to_number)
394         DEFINE_OP(op_to_string)
395         DEFINE_OP(op_to_primitive)
396
397         DEFINE_OP(op_resolve_scope)
398         DEFINE_OP(op_resolve_scope_for_hoisting_func_decl_in_eval)
399         DEFINE_OP(op_get_from_scope)
400         DEFINE_OP(op_put_to_scope)
401         DEFINE_OP(op_get_from_arguments)
402         DEFINE_OP(op_put_to_arguments)
403
404         DEFINE_OP(op_get_enumerable_length)
405         DEFINE_OP(op_has_generic_property)
406         DEFINE_OP(op_has_structure_property)
407         DEFINE_OP(op_has_indexed_property)
408         DEFINE_OP(op_get_direct_pname)
409         DEFINE_OP(op_get_property_enumerator)
410         DEFINE_OP(op_enumerator_structure_pname)
411         DEFINE_OP(op_enumerator_generic_pname)
412         DEFINE_OP(op_to_index_string)
413             
414         DEFINE_OP(op_log_shadow_chicken_prologue)
415         DEFINE_OP(op_log_shadow_chicken_tail)
416         default:
417             RELEASE_ASSERT_NOT_REACHED();
418         }
419
420         if (false)
421             dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
422     }
423
424     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
425
426 #ifndef NDEBUG
427     // Reset this, in order to guard its use with ASSERTs.
428     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
429 #endif
430 }
431
432 void JIT::privateCompileLinkPass()
433 {
434     unsigned jmpTableCount = m_jmpTable.size();
435     for (unsigned i = 0; i < jmpTableCount; ++i)
436         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
437     m_jmpTable.clear();
438 }
439
440 void JIT::privateCompileSlowCases()
441 {
442     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
443
444     m_getByIdIndex = 0;
445     m_getByIdWithThisIndex = 0;
446     m_putByIdIndex = 0;
447     m_byValInstructionIndex = 0;
448     m_callLinkInfoIndex = 0;
449     
450     // Use this to assert that slow-path code associates new profiling sites with existing
451     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
452     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
453     // instructions and the slow-path executions. Furthermore, if the slow-path code created
454     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
455     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
456     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
457
458     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
459         m_bytecodeOffset = iter->to;
460
461         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
462
463         unsigned firstTo = m_bytecodeOffset;
464
465         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
466         
467         RareCaseProfile* rareCaseProfile = 0;
468         if (shouldEmitProfiling())
469             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
470
471 #if ENABLE(JIT_VERBOSE)
472         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
473 #endif
474         
475         if (m_disassembler)
476             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
477
478         switch (Interpreter::getOpcodeID(currentInstruction->u.opcode)) {
479         DEFINE_SLOWCASE_OP(op_add)
480         DEFINE_SLOWCASE_OP(op_bitand)
481         DEFINE_SLOWCASE_OP(op_bitor)
482         DEFINE_SLOWCASE_OP(op_bitxor)
483         DEFINE_SLOWCASE_OP(op_call)
484         DEFINE_SLOWCASE_OP(op_tail_call)
485         DEFINE_SLOWCASE_OP(op_call_eval)
486         DEFINE_SLOWCASE_OP(op_call_varargs)
487         DEFINE_SLOWCASE_OP(op_tail_call_varargs)
488         DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
489         DEFINE_SLOWCASE_OP(op_construct_varargs)
490         DEFINE_SLOWCASE_OP(op_construct)
491         DEFINE_SLOWCASE_OP(op_to_this)
492         DEFINE_SLOWCASE_OP(op_check_tdz)
493         DEFINE_SLOWCASE_OP(op_create_this)
494         DEFINE_SLOWCASE_OP(op_div)
495         DEFINE_SLOWCASE_OP(op_eq)
496         DEFINE_SLOWCASE_OP(op_try_get_by_id)
497         case op_get_array_length:
498         case op_get_by_id_proto_load:
499         case op_get_by_id_unset:
500         DEFINE_SLOWCASE_OP(op_get_by_id)
501         DEFINE_SLOWCASE_OP(op_get_by_id_with_this)
502         DEFINE_SLOWCASE_OP(op_get_by_val)
503         DEFINE_SLOWCASE_OP(op_instanceof)
504         DEFINE_SLOWCASE_OP(op_instanceof_custom)
505         DEFINE_SLOWCASE_OP(op_jless)
506         DEFINE_SLOWCASE_OP(op_jlesseq)
507         DEFINE_SLOWCASE_OP(op_jgreater)
508         DEFINE_SLOWCASE_OP(op_jgreatereq)
509         DEFINE_SLOWCASE_OP(op_jnless)
510         DEFINE_SLOWCASE_OP(op_jnlesseq)
511         DEFINE_SLOWCASE_OP(op_jngreater)
512         DEFINE_SLOWCASE_OP(op_jngreatereq)
513         DEFINE_SLOWCASE_OP(op_loop_hint)
514         DEFINE_SLOWCASE_OP(op_check_traps)
515         DEFINE_SLOWCASE_OP(op_lshift)
516         DEFINE_SLOWCASE_OP(op_mod)
517         DEFINE_SLOWCASE_OP(op_mul)
518         DEFINE_SLOWCASE_OP(op_negate)
519         DEFINE_SLOWCASE_OP(op_neq)
520         DEFINE_SLOWCASE_OP(op_new_object)
521         DEFINE_SLOWCASE_OP(op_not)
522         DEFINE_SLOWCASE_OP(op_nstricteq)
523         DEFINE_SLOWCASE_OP(op_dec)
524         DEFINE_SLOWCASE_OP(op_inc)
525         DEFINE_SLOWCASE_OP(op_put_by_id)
526         case op_put_by_val_direct:
527         DEFINE_SLOWCASE_OP(op_put_by_val)
528         DEFINE_SLOWCASE_OP(op_rshift)
529         DEFINE_SLOWCASE_OP(op_unsigned)
530         DEFINE_SLOWCASE_OP(op_urshift)
531         DEFINE_SLOWCASE_OP(op_stricteq)
532         DEFINE_SLOWCASE_OP(op_sub)
533         DEFINE_SLOWCASE_OP(op_to_number)
534         DEFINE_SLOWCASE_OP(op_to_string)
535         DEFINE_SLOWCASE_OP(op_to_primitive)
536         DEFINE_SLOWCASE_OP(op_has_indexed_property)
537         DEFINE_SLOWCASE_OP(op_has_structure_property)
538         DEFINE_SLOWCASE_OP(op_get_direct_pname)
539
540         DEFINE_SLOWCASE_OP(op_resolve_scope)
541         DEFINE_SLOWCASE_OP(op_get_from_scope)
542         DEFINE_SLOWCASE_OP(op_put_to_scope)
543
544         default:
545             RELEASE_ASSERT_NOT_REACHED();
546         }
547
548         if (false)
549             dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
550
551         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
552         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
553         
554         if (shouldEmitProfiling())
555             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
556
557         emitJumpSlowToHot(jump(), 0);
558     }
559
560     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
561     RELEASE_ASSERT(m_getByIdWithThisIndex == m_getByIdsWithThis.size());
562     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
563     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
564     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
565
566 #ifndef NDEBUG
567     // Reset this, in order to guard its use with ASSERTs.
568     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
569 #endif
570 }
571
572 void JIT::compileWithoutLinking(JITCompilationEffort effort)
573 {
574     double before = 0;
575     if (UNLIKELY(computeCompileTimes()))
576         before = monotonicallyIncreasingTimeMS();
577     
578     {
579         ConcurrentJSLocker locker(m_codeBlock->m_lock);
580         m_instructions = m_codeBlock->instructions().clone();
581     }
582
583     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
584     switch (level) {
585     case DFG::CannotCompile:
586         m_canBeOptimized = false;
587         m_canBeOptimizedOrInlined = false;
588         m_shouldEmitProfiling = false;
589         break;
590     case DFG::CanCompile:
591     case DFG::CanCompileAndInline:
592         m_canBeOptimized = true;
593         m_canBeOptimizedOrInlined = true;
594         m_shouldEmitProfiling = true;
595         break;
596     default:
597         RELEASE_ASSERT_NOT_REACHED();
598         break;
599     }
600     
601     switch (m_codeBlock->codeType()) {
602     case GlobalCode:
603     case ModuleCode:
604     case EvalCode:
605         m_codeBlock->m_shouldAlwaysBeInlined = false;
606         break;
607     case FunctionCode:
608         // We could have already set it to false because we detected an uninlineable call.
609         // Don't override that observation.
610         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
611         break;
612     }
613
614     if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())))
615         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
616     if (UNLIKELY(m_vm->m_perBytecodeProfiler)) {
617         m_compilation = adoptRef(
618             new Profiler::Compilation(
619                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
620                 Profiler::Baseline));
621         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
622     }
623     
624     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
625
626     if (m_disassembler)
627         m_disassembler->setStartOfCode(label());
628
629     // Just add a little bit of randomness to the codegen
630     if (random() & 1)
631         nop();
632
633     emitFunctionPrologue();
634     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
635
636     Label beginLabel(this);
637
638     sampleCodeBlock(m_codeBlock);
639 #if ENABLE(OPCODE_SAMPLING)
640     sampleInstruction(m_codeBlock->instructions().begin());
641 #endif
642
643     if (m_codeBlock->codeType() == FunctionCode) {
644         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
645         if (shouldEmitProfiling()) {
646             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
647                 // If this is a constructor, then we want to put in a dummy profiling site (to
648                 // keep things consistent) but we don't actually want to record the dummy value.
649                 if (m_codeBlock->m_isConstructor && !argument)
650                     continue;
651                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
652 #if USE(JSVALUE64)
653                 load64(Address(callFrameRegister, offset), regT0);
654 #elif USE(JSVALUE32_64)
655                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
656                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
657 #endif
658                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
659             }
660         }
661     }
662
663     int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
664     unsigned maxFrameSize = -frameTopOffset;
665     addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1);
666     JumpList stackOverflow;
667     if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
668         stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));
669     stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1));
670
671     move(regT1, stackPointerRegister);
672     checkStackPointerAlignment();
673
674     emitSaveCalleeSaves();
675     emitMaterializeTagCheckRegisters();
676     
677     RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
678
679     privateCompileMainPass();
680     privateCompileLinkPass();
681     privateCompileSlowCases();
682     
683     if (m_disassembler)
684         m_disassembler->setEndOfSlowPath(label());
685     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
686
687     stackOverflow.link(this);
688     m_bytecodeOffset = 0;
689     if (maxFrameExtentForSlowPathCall)
690         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
691     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
692
693     if (m_codeBlock->codeType() == FunctionCode) {
694         m_arityCheck = label();
695         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
696         emitFunctionPrologue();
697         emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
698
699         load32(payloadFor(CallFrameSlot::argumentCount), regT1);
700         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
701
702         m_bytecodeOffset = 0;
703
704         if (maxFrameExtentForSlowPathCall)
705             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
706         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
707         if (maxFrameExtentForSlowPathCall)
708             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
709         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
710         move(returnValueGPR, GPRInfo::argumentGPR0);
711         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
712
713 #if !ASSERT_DISABLED
714         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
715 #endif
716
717         jump(beginLabel);
718     }
719
720     ASSERT(m_jmpTable.isEmpty());
721     
722     privateCompileExceptionHandlers();
723     
724     if (m_disassembler)
725         m_disassembler->setEndOfCode(label());
726     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
727
728     m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort));
729
730     double after = 0;
731     if (UNLIKELY(computeCompileTimes())) {
732         after = monotonicallyIncreasingTimeMS();
733
734         if (Options::reportTotalCompileTimes())
735             totalBaselineCompileTime += after - before;
736     }
737     if (UNLIKELY(reportCompileTimes())) {
738         CString codeBlockName = toCString(*m_codeBlock);
739         
740         dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", after - before, " ms.\n");
741     }
742 }
743
744 CompilationResult JIT::link()
745 {
746     LinkBuffer& patchBuffer = *m_linkBuffer;
747     
748     if (patchBuffer.didFailToAllocate())
749         return CompilationFailed;
750
751     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
752     for (auto& record : m_switches) {
753         unsigned bytecodeOffset = record.bytecodeOffset;
754
755         if (record.type != SwitchRecord::String) {
756             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
757             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
758
759             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
760
761             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
762                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
763                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
764             }
765         } else {
766             ASSERT(record.type == SwitchRecord::String);
767
768             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
769
770             for (auto& location : record.jumpTable.stringJumpTable->offsetTable.values()) {
771                 unsigned offset = location.branchOffset;
772                 location.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
773             }
774         }
775     }
776
777     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
778         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
779         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
780     }
781
782     for (auto& record : m_calls) {
783         if (record.to)
784             patchBuffer.link(record.from, FunctionPtr(record.to));
785     }
786
787     for (unsigned i = m_getByIds.size(); i--;)
788         m_getByIds[i].finalize(patchBuffer);
789     for (unsigned i = m_getByIdsWithThis.size(); i--;)
790         m_getByIdsWithThis[i].finalize(patchBuffer);
791     for (unsigned i = m_putByIds.size(); i--;)
792         m_putByIds[i].finalize(patchBuffer);
793
794     if (m_byValCompilationInfo.size()) {
795         CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
796
797         for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
798             PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
799             CodeLocationJump notIndexJump = CodeLocationJump();
800             if (Jump(patchableNotIndexJump).isSet())
801                 notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
802             CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
803             CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
804             CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
805             CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
806             CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
807
808             *byValCompilationInfo.byValInfo = ByValInfo(
809                 byValCompilationInfo.bytecodeIndex,
810                 notIndexJump,
811                 badTypeJump,
812                 exceptionHandler,
813                 byValCompilationInfo.arrayMode,
814                 byValCompilationInfo.arrayProfile,
815                 differenceBetweenCodePtr(badTypeJump, doneTarget),
816                 differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
817                 differenceBetweenCodePtr(returnAddress, slowPathTarget));
818         }
819     }
820
821     for (auto& compilationInfo : m_callCompilationInfo) {
822         CallLinkInfo& info = *compilationInfo.callLinkInfo;
823         info.setCallLocations(
824             CodeLocationLabel(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation)),
825             CodeLocationLabel(patchBuffer.locationOf(compilationInfo.hotPathBegin)),
826             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
827     }
828
829     CompactJITCodeMap::Encoder jitCodeMapEncoder;
830     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
831         if (m_labels[bytecodeOffset].isSet())
832             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
833     }
834     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
835
836     MacroAssemblerCodePtr withArityCheck;
837     if (m_codeBlock->codeType() == FunctionCode)
838         withArityCheck = patchBuffer.locationOf(m_arityCheck);
839
840     if (Options::dumpDisassembly()) {
841         m_disassembler->dump(patchBuffer);
842         patchBuffer.didAlreadyDisassemble();
843     }
844     if (UNLIKELY(m_compilation)) {
845         if (Options::disassembleBaselineForProfiler())
846             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
847         m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
848     }
849
850     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
851         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
852     
853     CodeRef result = FINALIZE_CODE(
854         patchBuffer,
855         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
856     
857     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
858         static_cast<double>(result.size()) /
859         static_cast<double>(m_instructions.size()));
860
861     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
862     m_codeBlock->setJITCode(
863         adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
864
865 #if ENABLE(JIT_VERBOSE)
866     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
867 #endif
868     
869     return CompilationSuccessful;
870 }
871
872 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
873 {
874     doMainThreadPreparationBeforeCompile();
875     compileWithoutLinking(effort);
876     return link();
877 }
878
879 void JIT::privateCompileExceptionHandlers()
880 {
881     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
882         m_exceptionChecksWithCallFrameRollback.link(this);
883
884         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
885
886         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
887
888         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
889         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
890
891 #if CPU(X86)
892         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
893         poke(GPRInfo::argumentGPR0);
894         poke(GPRInfo::argumentGPR1, 1);
895 #endif
896         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
897         jumpToExceptionHandler(*vm());
898     }
899
900     if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
901         m_exceptionHandler = label();
902         m_exceptionChecks.link(this);
903
904         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
905
906         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
907         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
908         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
909
910 #if CPU(X86)
911         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
912         poke(GPRInfo::argumentGPR0);
913         poke(GPRInfo::argumentGPR1, 1);
914 #endif
915         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
916         jumpToExceptionHandler(*vm());
917     }
918 }
919
920 void JIT::doMainThreadPreparationBeforeCompile()
921 {
922     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
923     if (m_vm->typeProfiler())
924         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
925 }
926
927 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
928 {
929     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
930
931     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
932 }
933
934 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
935 {
936     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
937 }
938
939 bool JIT::reportCompileTimes()
940 {
941     return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
942 }
943
944 bool JIT::computeCompileTimes()
945 {
946     return reportCompileTimes() || Options::reportTotalCompileTimes();
947 }
948
949 HashMap<CString, double> JIT::compileTimeStats()
950 {
951     HashMap<CString, double> result;
952     if (Options::reportTotalCompileTimes()) {
953         result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
954         result.add("Baseline Compile Time", totalBaselineCompileTime);
955 #if ENABLE(DFG_JIT)
956         result.add("DFG Compile Time", totalDFGCompileTime);
957 #if ENABLE(FTL_JIT)
958         result.add("FTL Compile Time", totalFTLCompileTime);
959         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
960         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
961 #endif // ENABLE(FTL_JIT)
962 #endif // ENABLE(DFG_JIT)
963     }
964     return result;
965 }
966
967 } // namespace JSC
968
969 #endif // ENABLE(JIT)