Unreviewed, rolling out r222791 and r222873.
[WebKit-https.git] / Source / JavaScriptCore / jit / JIT.cpp
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29
30 #include "JIT.h"
31
32 #include "BytecodeGraph.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGCapabilities.h"
36 #include "InterpreterInlines.h"
37 #include "JITInlines.h"
38 #include "JITOperations.h"
39 #include "JSArray.h"
40 #include "JSCInlines.h"
41 #include "JSFunction.h"
42 #include "LinkBuffer.h"
43 #include "MaxFrameExtentForSlowPathCall.h"
44 #include "ModuleProgramCodeBlock.h"
45 #include "PCToCodeOriginMap.h"
46 #include "ProfilerDatabase.h"
47 #include "ProgramCodeBlock.h"
48 #include "ResultType.h"
49 #include "SlowPathCall.h"
50 #include "StackAlignment.h"
51 #include "ThunkGenerators.h"
52 #include "TypeProfilerLog.h"
53 #include <wtf/CryptographicallyRandomNumber.h>
54 #include <wtf/GraphNodeWorklist.h>
55 #include <wtf/SimpleStats.h>
56
57 using namespace std;
58
59 namespace JSC {
60
61 double totalBaselineCompileTime;
62 double totalDFGCompileTime;
63 double totalFTLCompileTime;
64 double totalFTLDFGCompileTime;
65 double totalFTLB3CompileTime;
66
67 void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
68 {
69     MacroAssembler::repatchCall(
70         CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
71         newCalleeFunction);
72 }
73
74 JIT::CodeRef JIT::compileCTINativeCall(VM* vm, NativeFunction func)
75 {
76     if (!vm->canUseJIT())
77         return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
78     JIT jit(vm, 0);
79     return jit.privateCompileCTINativeCall(vm, func);
80 }
81
82 JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
83     : JSInterfaceJIT(vm, codeBlock)
84     , m_interpreter(vm->interpreter)
85     , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
86     , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
87     , m_getByIdIndex(UINT_MAX)
88     , m_getByIdWithThisIndex(UINT_MAX)
89     , m_putByIdIndex(UINT_MAX)
90     , m_byValInstructionIndex(UINT_MAX)
91     , m_callLinkInfoIndex(UINT_MAX)
92     , m_pcToCodeOriginMapBuilder(*vm)
93     , m_canBeOptimized(false)
94     , m_shouldEmitProfiling(false)
95     , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset)
96 {
97 }
98
99 JIT::~JIT()
100 {
101 }
102
103 #if ENABLE(DFG_JIT)
104 void JIT::emitEnterOptimizationCheck()
105 {
106     if (!canBeOptimized())
107         return;
108
109     JumpList skipOptimize;
110     
111     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
112     ASSERT(!m_bytecodeOffset);
113
114     copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(*vm());
115
116     callOperation(operationOptimize, m_bytecodeOffset);
117     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
118     move(returnValueGPR2, stackPointerRegister);
119     jump(returnValueGPR);
120     skipOptimize.link(this);
121 }
122 #endif
123
124 void JIT::emitNotifyWrite(WatchpointSet* set)
125 {
126     if (!set || set->state() == IsInvalidated) {
127         addSlowCase(Jump());
128         return;
129     }
130     
131     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
132 }
133
134 void JIT::emitNotifyWrite(GPRReg pointerToSet)
135 {
136     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
137 }
138
139 void JIT::assertStackPointerOffset()
140 {
141     if (ASSERT_DISABLED)
142         return;
143     
144     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
145     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
146     breakpoint();
147     ok.link(this);
148 }
149
150 #define NEXT_OPCODE(name) \
151     m_bytecodeOffset += OPCODE_LENGTH(name); \
152     break;
153
154 #define DEFINE_SLOW_OP(name) \
155     case op_##name: { \
156         if (m_bytecodeOffset >= startBytecodeOffset) { \
157             JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
158             slowPathCall.call(); \
159         } \
160         NEXT_OPCODE(op_##name); \
161     }
162
163 #define DEFINE_OP(name) \
164     case name: { \
165         if (m_bytecodeOffset >= startBytecodeOffset) { \
166             emit_##name(currentInstruction); \
167         } \
168         NEXT_OPCODE(name); \
169     }
170
171 #define DEFINE_SLOWCASE_OP(name) \
172     case name: { \
173         emitSlow_##name(currentInstruction, iter); \
174         NEXT_OPCODE(name); \
175     }
176
177 void JIT::privateCompileMainPass()
178 {
179     if (false)
180         dataLog("Compiling ", *m_codeBlock, "\n");
181     
182     jitAssertTagsInPlace();
183     jitAssertArgumentCountSane();
184     
185     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
186     unsigned instructionCount = m_instructions.size();
187
188     m_callLinkInfoIndex = 0;
189
190     VM& vm = *m_codeBlock->vm();
191     unsigned startBytecodeOffset = 0;
192     if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits(vm, ProgramCodeBlock::info()) || m_codeBlock->inherits(vm, ModuleProgramCodeBlock::info()))) {
193         // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
194         // This optimization would be invalid otherwise. When the LLInt determines it wants to
195         // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
196         // was executing at when it kicked off our compilation. We only need to compile code for
197         // anything reachable from that bytecode offset.
198
199         // We only bother building the bytecode graph if it could save time and executable
200         // memory. We pick an arbitrary offset where we deem this is profitable.
201         if (m_loopOSREntryBytecodeOffset >= 200) {
202             // As a simplification, we don't find all bytecode ranges that are unreachable.
203             // Instead, we just find the minimum bytecode offset that is reachable, and
204             // compile code from that bytecode offset onwards.
205
206             BytecodeGraph<CodeBlock> graph(m_codeBlock, m_instructions);
207             BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset);
208             RELEASE_ASSERT(block);
209
210             GraphNodeWorklist<BytecodeBasicBlock*> worklist;
211             startBytecodeOffset = UINT_MAX;
212             worklist.push(block);
213             while (BytecodeBasicBlock* block = worklist.pop()) {
214                 startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset());
215                 worklist.pushAll(block->successors());
216             }
217         }
218     }
219
220     for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
221         if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) {
222             // We've proven all bytecode instructions up until here are unreachable.
223             // Let's ensure that by crashing if it's ever hit.
224             breakpoint();
225         }
226
227         if (m_disassembler)
228             m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
229         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
230         ASSERT_WITH_MESSAGE(Interpreter::isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
231
232         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
233
234 #if ENABLE(OPCODE_SAMPLING)
235         if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
236             sampleInstruction(currentInstruction);
237 #endif
238
239         m_labels[m_bytecodeOffset] = label();
240
241 #if ENABLE(JIT_VERBOSE)
242         dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
243 #endif
244         
245         OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);
246
247         if (UNLIKELY(m_compilation)) {
248             add64(
249                 TrustedImm32(1),
250                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
251                     m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
252         }
253         
254         if (Options::eagerlyUpdateTopCallFrame())
255             updateTopCallFrame();
256
257         unsigned bytecodeOffset = m_bytecodeOffset;
258
259         switch (opcodeID) {
260         DEFINE_SLOW_OP(in)
261         DEFINE_SLOW_OP(less)
262         DEFINE_SLOW_OP(lesseq)
263         DEFINE_SLOW_OP(greater)
264         DEFINE_SLOW_OP(greatereq)
265         DEFINE_SLOW_OP(is_function)
266         DEFINE_SLOW_OP(is_object_or_null)
267         DEFINE_SLOW_OP(typeof)
268
269         DEFINE_OP(op_add)
270         DEFINE_OP(op_bitand)
271         DEFINE_OP(op_bitor)
272         DEFINE_OP(op_bitxor)
273         DEFINE_OP(op_call)
274         DEFINE_OP(op_tail_call)
275         DEFINE_OP(op_call_eval)
276         DEFINE_OP(op_call_varargs)
277         DEFINE_OP(op_tail_call_varargs)
278         DEFINE_OP(op_tail_call_forward_arguments)
279         DEFINE_OP(op_construct_varargs)
280         DEFINE_OP(op_catch)
281         DEFINE_OP(op_construct)
282         DEFINE_OP(op_create_this)
283         DEFINE_OP(op_to_this)
284         DEFINE_OP(op_create_direct_arguments)
285         DEFINE_OP(op_create_scoped_arguments)
286         DEFINE_OP(op_create_cloned_arguments)
287         DEFINE_OP(op_get_argument)
288         DEFINE_OP(op_argument_count)
289         DEFINE_OP(op_create_rest)
290         DEFINE_OP(op_get_rest_length)
291         DEFINE_OP(op_check_tdz)
292         DEFINE_OP(op_assert)
293         DEFINE_OP(op_identity_with_profile)
294         DEFINE_OP(op_unreachable)
295         DEFINE_OP(op_debug)
296         DEFINE_OP(op_del_by_id)
297         DEFINE_OP(op_del_by_val)
298         DEFINE_OP(op_div)
299         DEFINE_OP(op_end)
300         DEFINE_OP(op_enter)
301         DEFINE_OP(op_get_scope)
302         DEFINE_OP(op_eq)
303         DEFINE_OP(op_eq_null)
304         DEFINE_OP(op_try_get_by_id)
305         case op_get_array_length:
306         case op_get_by_id_proto_load:
307         case op_get_by_id_unset:
308         DEFINE_OP(op_get_by_id)
309         DEFINE_OP(op_get_by_id_with_this)
310         DEFINE_OP(op_get_by_val)
311         DEFINE_OP(op_get_by_val_with_this)
312         DEFINE_OP(op_overrides_has_instance)
313         DEFINE_OP(op_instanceof)
314         DEFINE_OP(op_instanceof_custom)
315         DEFINE_OP(op_is_empty)
316         DEFINE_OP(op_is_undefined)
317         DEFINE_OP(op_is_boolean)
318         DEFINE_OP(op_is_number)
319         DEFINE_OP(op_is_object)
320         DEFINE_OP(op_is_cell_with_type)
321         DEFINE_OP(op_jeq_null)
322         DEFINE_OP(op_jfalse)
323         DEFINE_OP(op_jmp)
324         DEFINE_OP(op_jneq_null)
325         DEFINE_OP(op_jneq_ptr)
326         DEFINE_OP(op_jless)
327         DEFINE_OP(op_jlesseq)
328         DEFINE_OP(op_jgreater)
329         DEFINE_OP(op_jgreatereq)
330         DEFINE_OP(op_jnless)
331         DEFINE_OP(op_jnlesseq)
332         DEFINE_OP(op_jngreater)
333         DEFINE_OP(op_jngreatereq)
334         DEFINE_OP(op_jtrue)
335         DEFINE_OP(op_loop_hint)
336         DEFINE_OP(op_check_traps)
337         DEFINE_OP(op_nop)
338         DEFINE_OP(op_lshift)
339         DEFINE_OP(op_mod)
340         DEFINE_OP(op_mov)
341         DEFINE_OP(op_mul)
342         DEFINE_OP(op_negate)
343         DEFINE_OP(op_neq)
344         DEFINE_OP(op_neq_null)
345         DEFINE_OP(op_new_array)
346         DEFINE_OP(op_new_array_with_size)
347         DEFINE_OP(op_new_array_buffer)
348         DEFINE_OP(op_new_array_with_spread)
349         DEFINE_OP(op_spread)
350         DEFINE_OP(op_new_func)
351         DEFINE_OP(op_new_func_exp)
352         DEFINE_OP(op_new_generator_func)
353         DEFINE_OP(op_new_generator_func_exp)
354         DEFINE_OP(op_new_async_func)
355         DEFINE_OP(op_new_async_func_exp)
356         DEFINE_OP(op_new_async_generator_func)
357         DEFINE_OP(op_new_async_generator_func_exp)
358         DEFINE_OP(op_new_object)
359         DEFINE_OP(op_new_regexp)
360         DEFINE_OP(op_not)
361         DEFINE_OP(op_nstricteq)
362         DEFINE_OP(op_dec)
363         DEFINE_OP(op_inc)
364         DEFINE_OP(op_pow)
365         DEFINE_OP(op_profile_type)
366         DEFINE_OP(op_profile_control_flow)
367         DEFINE_OP(op_push_with_scope)
368         DEFINE_OP(op_create_lexical_environment)
369         DEFINE_OP(op_get_parent_scope)
370         DEFINE_OP(op_put_by_id)
371         DEFINE_OP(op_put_by_id_with_this)
372         DEFINE_OP(op_put_by_index)
373         case op_put_by_val_direct:
374         DEFINE_OP(op_put_by_val)
375         DEFINE_OP(op_put_by_val_with_this)
376         DEFINE_OP(op_put_getter_by_id)
377         DEFINE_OP(op_put_setter_by_id)
378         DEFINE_OP(op_put_getter_setter_by_id)
379         DEFINE_OP(op_put_getter_by_val)
380         DEFINE_OP(op_put_setter_by_val)
381         DEFINE_OP(op_define_data_property)
382         DEFINE_OP(op_define_accessor_property)
383
384         DEFINE_OP(op_ret)
385         DEFINE_OP(op_rshift)
386         DEFINE_OP(op_unsigned)
387         DEFINE_OP(op_urshift)
388         DEFINE_OP(op_set_function_name)
389         DEFINE_OP(op_strcat)
390         DEFINE_OP(op_stricteq)
391         DEFINE_OP(op_sub)
392         DEFINE_OP(op_switch_char)
393         DEFINE_OP(op_switch_imm)
394         DEFINE_OP(op_switch_string)
395         DEFINE_OP(op_throw)
396         DEFINE_OP(op_throw_static_error)
397         DEFINE_OP(op_to_number)
398         DEFINE_OP(op_to_string)
399         DEFINE_OP(op_to_primitive)
400
401         DEFINE_OP(op_resolve_scope)
402         DEFINE_OP(op_resolve_scope_for_hoisting_func_decl_in_eval)
403         DEFINE_OP(op_get_from_scope)
404         DEFINE_OP(op_put_to_scope)
405         DEFINE_OP(op_get_from_arguments)
406         DEFINE_OP(op_put_to_arguments)
407
408         DEFINE_OP(op_get_enumerable_length)
409         DEFINE_OP(op_has_generic_property)
410         DEFINE_OP(op_has_structure_property)
411         DEFINE_OP(op_has_indexed_property)
412         DEFINE_OP(op_get_direct_pname)
413         DEFINE_OP(op_get_property_enumerator)
414         DEFINE_OP(op_enumerator_structure_pname)
415         DEFINE_OP(op_enumerator_generic_pname)
416         DEFINE_OP(op_to_index_string)
417             
418         DEFINE_OP(op_log_shadow_chicken_prologue)
419         DEFINE_OP(op_log_shadow_chicken_tail)
420         default:
421             RELEASE_ASSERT_NOT_REACHED();
422         }
423
424         if (false)
425             dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
426     }
427
428     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
429
430 #ifndef NDEBUG
431     // Reset this, in order to guard its use with ASSERTs.
432     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
433 #endif
434 }
435
436 void JIT::privateCompileLinkPass()
437 {
438     unsigned jmpTableCount = m_jmpTable.size();
439     for (unsigned i = 0; i < jmpTableCount; ++i)
440         m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
441     m_jmpTable.clear();
442 }
443
444 void JIT::privateCompileSlowCases()
445 {
446     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
447
448     m_getByIdIndex = 0;
449     m_getByIdWithThisIndex = 0;
450     m_putByIdIndex = 0;
451     m_byValInstructionIndex = 0;
452     m_callLinkInfoIndex = 0;
453     
454     // Use this to assert that slow-path code associates new profiling sites with existing
455     // ValueProfiles rather than creating new ones. This ensures that for a given instruction
456     // (say, get_by_id) we get combined statistics for both the fast-path executions of that
457     // instructions and the slow-path executions. Furthermore, if the slow-path code created
458     // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
459     // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
460     unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
461
462     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
463         m_bytecodeOffset = iter->to;
464
465         m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
466
467         unsigned firstTo = m_bytecodeOffset;
468
469         Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
470         
471         RareCaseProfile* rareCaseProfile = 0;
472         if (shouldEmitProfiling())
473             rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
474
475 #if ENABLE(JIT_VERBOSE)
476         dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
477 #endif
478         
479         if (m_disassembler)
480             m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
481
482         switch (Interpreter::getOpcodeID(currentInstruction->u.opcode)) {
483         DEFINE_SLOWCASE_OP(op_add)
484         DEFINE_SLOWCASE_OP(op_bitand)
485         DEFINE_SLOWCASE_OP(op_bitor)
486         DEFINE_SLOWCASE_OP(op_bitxor)
487         DEFINE_SLOWCASE_OP(op_call)
488         DEFINE_SLOWCASE_OP(op_tail_call)
489         DEFINE_SLOWCASE_OP(op_call_eval)
490         DEFINE_SLOWCASE_OP(op_call_varargs)
491         DEFINE_SLOWCASE_OP(op_tail_call_varargs)
492         DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
493         DEFINE_SLOWCASE_OP(op_construct_varargs)
494         DEFINE_SLOWCASE_OP(op_construct)
495         DEFINE_SLOWCASE_OP(op_to_this)
496         DEFINE_SLOWCASE_OP(op_check_tdz)
497         DEFINE_SLOWCASE_OP(op_create_this)
498         DEFINE_SLOWCASE_OP(op_div)
499         DEFINE_SLOWCASE_OP(op_eq)
500         DEFINE_SLOWCASE_OP(op_try_get_by_id)
501         case op_get_array_length:
502         case op_get_by_id_proto_load:
503         case op_get_by_id_unset:
504         DEFINE_SLOWCASE_OP(op_get_by_id)
505         DEFINE_SLOWCASE_OP(op_get_by_id_with_this)
506         DEFINE_SLOWCASE_OP(op_get_by_val)
507         DEFINE_SLOWCASE_OP(op_instanceof)
508         DEFINE_SLOWCASE_OP(op_instanceof_custom)
509         DEFINE_SLOWCASE_OP(op_jless)
510         DEFINE_SLOWCASE_OP(op_jlesseq)
511         DEFINE_SLOWCASE_OP(op_jgreater)
512         DEFINE_SLOWCASE_OP(op_jgreatereq)
513         DEFINE_SLOWCASE_OP(op_jnless)
514         DEFINE_SLOWCASE_OP(op_jnlesseq)
515         DEFINE_SLOWCASE_OP(op_jngreater)
516         DEFINE_SLOWCASE_OP(op_jngreatereq)
517         DEFINE_SLOWCASE_OP(op_loop_hint)
518         DEFINE_SLOWCASE_OP(op_check_traps)
519         DEFINE_SLOWCASE_OP(op_lshift)
520         DEFINE_SLOWCASE_OP(op_mod)
521         DEFINE_SLOWCASE_OP(op_mul)
522         DEFINE_SLOWCASE_OP(op_negate)
523         DEFINE_SLOWCASE_OP(op_neq)
524         DEFINE_SLOWCASE_OP(op_new_object)
525         DEFINE_SLOWCASE_OP(op_not)
526         DEFINE_SLOWCASE_OP(op_nstricteq)
527         DEFINE_SLOWCASE_OP(op_dec)
528         DEFINE_SLOWCASE_OP(op_inc)
529         DEFINE_SLOWCASE_OP(op_put_by_id)
530         case op_put_by_val_direct:
531         DEFINE_SLOWCASE_OP(op_put_by_val)
532         DEFINE_SLOWCASE_OP(op_rshift)
533         DEFINE_SLOWCASE_OP(op_unsigned)
534         DEFINE_SLOWCASE_OP(op_urshift)
535         DEFINE_SLOWCASE_OP(op_stricteq)
536         DEFINE_SLOWCASE_OP(op_sub)
537         DEFINE_SLOWCASE_OP(op_to_number)
538         DEFINE_SLOWCASE_OP(op_to_string)
539         DEFINE_SLOWCASE_OP(op_to_primitive)
540         DEFINE_SLOWCASE_OP(op_has_indexed_property)
541         DEFINE_SLOWCASE_OP(op_has_structure_property)
542         DEFINE_SLOWCASE_OP(op_get_direct_pname)
543
544         DEFINE_SLOWCASE_OP(op_resolve_scope)
545         DEFINE_SLOWCASE_OP(op_get_from_scope)
546         DEFINE_SLOWCASE_OP(op_put_to_scope)
547
548         default:
549             RELEASE_ASSERT_NOT_REACHED();
550         }
551
552         if (false)
553             dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
554
555         RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
556         RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
557         
558         if (shouldEmitProfiling())
559             add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
560
561         emitJumpSlowToHot(jump(), 0);
562     }
563
564     RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
565     RELEASE_ASSERT(m_getByIdWithThisIndex == m_getByIdsWithThis.size());
566     RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
567     RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
568     RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
569
570 #ifndef NDEBUG
571     // Reset this, in order to guard its use with ASSERTs.
572     m_bytecodeOffset = std::numeric_limits<unsigned>::max();
573 #endif
574 }
575
576 void JIT::compileWithoutLinking(JITCompilationEffort effort)
577 {
578     double before = 0;
579     if (UNLIKELY(computeCompileTimes()))
580         before = monotonicallyIncreasingTimeMS();
581     
582     {
583         ConcurrentJSLocker locker(m_codeBlock->m_lock);
584         m_instructions = m_codeBlock->instructions().clone();
585     }
586
587     DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
588     switch (level) {
589     case DFG::CannotCompile:
590         m_canBeOptimized = false;
591         m_canBeOptimizedOrInlined = false;
592         m_shouldEmitProfiling = false;
593         break;
594     case DFG::CanCompile:
595     case DFG::CanCompileAndInline:
596         m_canBeOptimized = true;
597         m_canBeOptimizedOrInlined = true;
598         m_shouldEmitProfiling = true;
599         break;
600     default:
601         RELEASE_ASSERT_NOT_REACHED();
602         break;
603     }
604     
605     switch (m_codeBlock->codeType()) {
606     case GlobalCode:
607     case ModuleCode:
608     case EvalCode:
609         m_codeBlock->m_shouldAlwaysBeInlined = false;
610         break;
611     case FunctionCode:
612         // We could have already set it to false because we detected an uninlineable call.
613         // Don't override that observation.
614         m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
615         break;
616     }
617
618     if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())))
619         m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
620     if (UNLIKELY(m_vm->m_perBytecodeProfiler)) {
621         m_compilation = adoptRef(
622             new Profiler::Compilation(
623                 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
624                 Profiler::Baseline));
625         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
626     }
627     
628     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
629
630     if (m_disassembler)
631         m_disassembler->setStartOfCode(label());
632
633     // Just add a little bit of randomness to the codegen
634     if (random() & 1)
635         nop();
636
637     emitFunctionPrologue();
638     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
639
640     Label beginLabel(this);
641
642     sampleCodeBlock(m_codeBlock);
643 #if ENABLE(OPCODE_SAMPLING)
644     sampleInstruction(m_codeBlock->instructions().begin());
645 #endif
646
647     if (m_codeBlock->codeType() == FunctionCode) {
648         ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
649         if (shouldEmitProfiling()) {
650             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
651                 // If this is a constructor, then we want to put in a dummy profiling site (to
652                 // keep things consistent) but we don't actually want to record the dummy value.
653                 if (m_codeBlock->m_isConstructor && !argument)
654                     continue;
655                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
656 #if USE(JSVALUE64)
657                 load64(Address(callFrameRegister, offset), regT0);
658 #elif USE(JSVALUE32_64)
659                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
660                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
661 #endif
662                 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
663             }
664         }
665     }
666
667     int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
668     unsigned maxFrameSize = -frameTopOffset;
669     addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1);
670     JumpList stackOverflow;
671     if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
672         stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));
673     stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1));
674
675     move(regT1, stackPointerRegister);
676     checkStackPointerAlignment();
677
678     emitSaveCalleeSaves();
679     emitMaterializeTagCheckRegisters();
680     
681     RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
682
683     privateCompileMainPass();
684     privateCompileLinkPass();
685     privateCompileSlowCases();
686     
687     if (m_disassembler)
688         m_disassembler->setEndOfSlowPath(label());
689     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
690
691     stackOverflow.link(this);
692     m_bytecodeOffset = 0;
693     if (maxFrameExtentForSlowPathCall)
694         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
695     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
696
697     if (m_codeBlock->codeType() == FunctionCode) {
698         m_arityCheck = label();
699         store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
700         emitFunctionPrologue();
701         emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
702
703         load32(payloadFor(CallFrameSlot::argumentCount), regT1);
704         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
705
706         m_bytecodeOffset = 0;
707
708         if (maxFrameExtentForSlowPathCall)
709             addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
710         callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
711         if (maxFrameExtentForSlowPathCall)
712             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
713         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
714         move(returnValueGPR, GPRInfo::argumentGPR0);
715         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
716
717 #if !ASSERT_DISABLED
718         m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
719 #endif
720
721         jump(beginLabel);
722     }
723
724     ASSERT(m_jmpTable.isEmpty());
725     
726     privateCompileExceptionHandlers();
727     
728     if (m_disassembler)
729         m_disassembler->setEndOfCode(label());
730     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
731
732     m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort));
733
734     double after = 0;
735     if (UNLIKELY(computeCompileTimes())) {
736         after = monotonicallyIncreasingTimeMS();
737
738         if (Options::reportTotalCompileTimes())
739             totalBaselineCompileTime += after - before;
740     }
741     if (UNLIKELY(reportCompileTimes())) {
742         CString codeBlockName = toCString(*m_codeBlock);
743         
744         dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", after - before, " ms.\n");
745     }
746 }
747
748 CompilationResult JIT::link()
749 {
750     LinkBuffer& patchBuffer = *m_linkBuffer;
751     
752     if (patchBuffer.didFailToAllocate())
753         return CompilationFailed;
754
755     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
756     for (auto& record : m_switches) {
757         unsigned bytecodeOffset = record.bytecodeOffset;
758
759         if (record.type != SwitchRecord::String) {
760             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
761             ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
762
763             record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
764
765             for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
766                 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
767                 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
768             }
769         } else {
770             ASSERT(record.type == SwitchRecord::String);
771
772             record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
773
774             for (auto& location : record.jumpTable.stringJumpTable->offsetTable.values()) {
775                 unsigned offset = location.branchOffset;
776                 location.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
777             }
778         }
779     }
780
781     for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
782         HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
783         handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
784     }
785
786     for (auto& record : m_calls) {
787         if (record.to)
788             patchBuffer.link(record.from, FunctionPtr(record.to));
789     }
790
791     for (unsigned i = m_getByIds.size(); i--;)
792         m_getByIds[i].finalize(patchBuffer);
793     for (unsigned i = m_getByIdsWithThis.size(); i--;)
794         m_getByIdsWithThis[i].finalize(patchBuffer);
795     for (unsigned i = m_putByIds.size(); i--;)
796         m_putByIds[i].finalize(patchBuffer);
797
798     if (m_byValCompilationInfo.size()) {
799         CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
800
801         for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
802             PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
803             CodeLocationJump notIndexJump = CodeLocationJump();
804             if (Jump(patchableNotIndexJump).isSet())
805                 notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
806             CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
807             CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
808             CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
809             CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
810             CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
811
812             *byValCompilationInfo.byValInfo = ByValInfo(
813                 byValCompilationInfo.bytecodeIndex,
814                 notIndexJump,
815                 badTypeJump,
816                 exceptionHandler,
817                 byValCompilationInfo.arrayMode,
818                 byValCompilationInfo.arrayProfile,
819                 differenceBetweenCodePtr(badTypeJump, doneTarget),
820                 differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
821                 differenceBetweenCodePtr(returnAddress, slowPathTarget));
822         }
823     }
824
825     for (auto& compilationInfo : m_callCompilationInfo) {
826         CallLinkInfo& info = *compilationInfo.callLinkInfo;
827         info.setCallLocations(
828             CodeLocationLabel(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation)),
829             CodeLocationLabel(patchBuffer.locationOf(compilationInfo.hotPathBegin)),
830             patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
831     }
832
833     CompactJITCodeMap::Encoder jitCodeMapEncoder;
834     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
835         if (m_labels[bytecodeOffset].isSet())
836             jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
837     }
838     m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
839
840     MacroAssemblerCodePtr withArityCheck;
841     if (m_codeBlock->codeType() == FunctionCode)
842         withArityCheck = patchBuffer.locationOf(m_arityCheck);
843
844     if (Options::dumpDisassembly()) {
845         m_disassembler->dump(patchBuffer);
846         patchBuffer.didAlreadyDisassemble();
847     }
848     if (UNLIKELY(m_compilation)) {
849         if (Options::disassembleBaselineForProfiler())
850             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
851         m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
852     }
853
854     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
855         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
856     
857     CodeRef result = FINALIZE_CODE(
858         patchBuffer,
859         ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
860     
861     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
862         static_cast<double>(result.size()) /
863         static_cast<double>(m_instructions.size()));
864
865     m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
866     m_codeBlock->setJITCode(
867         adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
868
869 #if ENABLE(JIT_VERBOSE)
870     dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
871 #endif
872     
873     return CompilationSuccessful;
874 }
875
876 CompilationResult JIT::privateCompile(JITCompilationEffort effort)
877 {
878     doMainThreadPreparationBeforeCompile();
879     compileWithoutLinking(effort);
880     return link();
881 }
882
883 void JIT::privateCompileExceptionHandlers()
884 {
885     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
886         m_exceptionChecksWithCallFrameRollback.link(this);
887
888         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
889
890         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
891
892         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
893         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
894
895 #if CPU(X86)
896         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
897         poke(GPRInfo::argumentGPR0);
898         poke(GPRInfo::argumentGPR1, 1);
899 #endif
900         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
901         jumpToExceptionHandler(*vm());
902     }
903
904     if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
905         m_exceptionHandler = label();
906         m_exceptionChecks.link(this);
907
908         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
909
910         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
911         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
912         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
913
914 #if CPU(X86)
915         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
916         poke(GPRInfo::argumentGPR0);
917         poke(GPRInfo::argumentGPR1, 1);
918 #endif
919         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
920         jumpToExceptionHandler(*vm());
921     }
922 }
923
924 void JIT::doMainThreadPreparationBeforeCompile()
925 {
926     // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
927     if (m_vm->typeProfiler())
928         m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
929 }
930
931 unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
932 {
933     ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
934
935     return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
936 }
937
938 int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
939 {
940     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
941 }
942
943 bool JIT::reportCompileTimes()
944 {
945     return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
946 }
947
948 bool JIT::computeCompileTimes()
949 {
950     return reportCompileTimes() || Options::reportTotalCompileTimes();
951 }
952
953 HashMap<CString, double> JIT::compileTimeStats()
954 {
955     HashMap<CString, double> result;
956     if (Options::reportTotalCompileTimes()) {
957         result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
958         result.add("Baseline Compile Time", totalBaselineCompileTime);
959 #if ENABLE(DFG_JIT)
960         result.add("DFG Compile Time", totalDFGCompileTime);
961 #if ENABLE(FTL_JIT)
962         result.add("FTL Compile Time", totalFTLCompileTime);
963         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
964         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
965 #endif // ENABLE(FTL_JIT)
966 #endif // ENABLE(DFG_JIT)
967     }
968     return result;
969 }
970
971 } // namespace JSC
972
973 #endif // ENABLE(JIT)