d175f44c3556e63cd2863aa6479626e75acd2040
[WebKit.git] / JavaScriptCore / jit / JIT.h
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef JIT_h
27 #define JIT_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(JIT)
32
33 // We've run into some problems where changing the size of the class JIT leads to
34 // performance fluctuations.  Try forcing alignment in an attempt to stabalize this.
35 #if COMPILER(GCC)
36 #define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
37 #else
38 #define JIT_CLASS_ALIGNMENT
39 #endif
40
41 #include "CodeBlock.h"
42 #include "Interpreter.h"
43 #include "JITCode.h"
44 #include "JITStubs.h"
45 #include "Opcode.h"
46 #include "RegisterFile.h"
47 #include "MacroAssembler.h"
48 #include "Profiler.h"
49 #include <bytecode/SamplingTool.h>
50 #include <wtf/AlwaysInline.h>
51 #include <wtf/Vector.h>
52
53 namespace JSC {
54
55     class CodeBlock;
56     class JIT;
57     class JSPropertyNameIterator;
58     class Interpreter;
59     class Register;
60     class RegisterFile;
61     class ScopeChainNode;
62     class SimpleJumpTable;
63     class StringJumpTable;
64     class StructureChain;
65
66     struct CallLinkInfo;
67     struct Instruction;
68     struct OperandTypes;
69     struct PolymorphicAccessStructureList;
70     struct StructureStubInfo;
71
72     struct CallRecord {
73         MacroAssembler::Call from;
74         unsigned bytecodeIndex;
75         void* to;
76
77         CallRecord()
78         {
79         }
80
81         CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
82             : from(from)
83             , bytecodeIndex(bytecodeIndex)
84             , to(to)
85         {
86         }
87     };
88
89     struct JumpTable {
90         MacroAssembler::Jump from;
91         unsigned toBytecodeIndex;
92
93         JumpTable(MacroAssembler::Jump f, unsigned t)
94             : from(f)
95             , toBytecodeIndex(t)
96         {
97         }
98     };
99
100     struct SlowCaseEntry {
101         MacroAssembler::Jump from;
102         unsigned to;
103         unsigned hint;
104         
105         SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
106             : from(f)
107             , to(t)
108             , hint(h)
109         {
110         }
111     };
112
113     struct SwitchRecord {
114         enum Type {
115             Immediate,
116             Character,
117             String
118         };
119
120         Type type;
121
122         union {
123             SimpleJumpTable* simpleJumpTable;
124             StringJumpTable* stringJumpTable;
125         } jumpTable;
126
127         unsigned bytecodeIndex;
128         unsigned defaultOffset;
129
130         SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
131             : type(type)
132             , bytecodeIndex(bytecodeIndex)
133             , defaultOffset(defaultOffset)
134         {
135             this->jumpTable.simpleJumpTable = jumpTable;
136         }
137
138         SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
139             : type(String)
140             , bytecodeIndex(bytecodeIndex)
141             , defaultOffset(defaultOffset)
142         {
143             this->jumpTable.stringJumpTable = jumpTable;
144         }
145     };
146
147     struct PropertyStubCompilationInfo {
148         MacroAssembler::Call callReturnLocation;
149         MacroAssembler::Label hotPathBegin;
150     };
151
152     struct StructureStubCompilationInfo {
153         MacroAssembler::DataLabelPtr hotPathBegin;
154         MacroAssembler::Call hotPathOther;
155         MacroAssembler::Call callReturnLocation;
156         MacroAssembler::Label coldPathOther;
157     };
158
159     void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
160     void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
161
162     class JIT : private MacroAssembler {
163         friend class JITStubCall;
164         friend class CallEvalJITStub;
165
166         using MacroAssembler::Jump;
167         using MacroAssembler::JumpList;
168         using MacroAssembler::Label;
169
170         // NOTES:
171         //
172         // regT0 has two special meanings.  The return value from a stub
173         // call will always be in regT0, and by default (unless
174         // a register is specified) emitPutVirtualRegister() will store
175         // the value from regT0.
176         //
177         // tempRegister2 is has no such dependencies.  It is important that
178         // on x86/x86-64 it is ecx for performance reasons, since the
179         // MacroAssembler will need to plant register swaps if it is not -
180         // however the code will still function correctly.
181 #if PLATFORM(X86_64)
182         static const RegisterID returnValueRegister = X86::eax;
183         static const RegisterID cachedResultRegister = X86::eax;
184         static const RegisterID firstArgumentRegister = X86::edi;
185
186         static const RegisterID timeoutCheckRegister = X86::r12;
187         static const RegisterID callFrameRegister = X86::r13;
188         static const RegisterID tagTypeNumberRegister = X86::r14;
189         static const RegisterID tagMaskRegister = X86::r15;
190
191         static const RegisterID regT0 = X86::eax;
192         static const RegisterID regT1 = X86::edx;
193         static const RegisterID regT2 = X86::ecx;
194         // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
195         static const RegisterID regT3 = X86::ebx;
196
197         static const FPRegisterID fpRegT0 = X86::xmm0;
198         static const FPRegisterID fpRegT1 = X86::xmm1;
199         static const FPRegisterID fpRegT2 = X86::xmm2;
200 #elif PLATFORM(X86)
201         static const RegisterID returnValueRegister = X86::eax;
202         static const RegisterID cachedResultRegister = X86::eax;
203         // On x86 we always use fastcall conventions = but on
204         // OS X if might make more sense to just use regparm.
205         static const RegisterID firstArgumentRegister = X86::ecx;
206
207         static const RegisterID timeoutCheckRegister = X86::esi;
208         static const RegisterID callFrameRegister = X86::edi;
209
210         static const RegisterID regT0 = X86::eax;
211         static const RegisterID regT1 = X86::edx;
212         static const RegisterID regT2 = X86::ecx;
213         // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
214         static const RegisterID regT3 = X86::ebx;
215
216         static const FPRegisterID fpRegT0 = X86::xmm0;
217         static const FPRegisterID fpRegT1 = X86::xmm1;
218         static const FPRegisterID fpRegT2 = X86::xmm2;
219 #else
220     #error "JIT not supported on this platform."
221 #endif
222
223         static const int patchGetByIdDefaultStructure = -1;
224         // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
225         // will compress the displacement, and we may not be able to fit a patched offset.
226         static const int patchGetByIdDefaultOffset = 256;
227
228 #if USE(JIT_STUB_ARGUMENT_REGISTER)
229 #if PLATFORM(X86_64)
230         static const int ctiArgumentInitSize = 6;
231 #else
232         static const int ctiArgumentInitSize = 2;
233 #endif
234 #elif USE(JIT_STUB_ARGUMENT_STACK)
235         static const int ctiArgumentInitSize = 4;
236 #else // JIT_STUB_ARGUMENT_VA_LIST
237         static const int ctiArgumentInitSize = 0;
238 #endif
239
240 #if PLATFORM(X86_64)
241         // These architecture specific value are used to enable patching - see comment on op_put_by_id.
242         static const int patchOffsetPutByIdStructure = 10;
243         static const int patchOffsetPutByIdExternalLoad = 20;
244         static const int patchLengthPutByIdExternalLoad = 4;
245         static const int patchLengthPutByIdExternalLoadPrefix = 1;
246         static const int patchOffsetPutByIdPropertyMapOffset = 31;
247         // These architecture specific value are used to enable patching - see comment on op_get_by_id.
248         static const int patchOffsetGetByIdStructure = 10;
249         static const int patchOffsetGetByIdBranchToSlowCase = 20;
250         static const int patchOffsetGetByIdExternalLoad = 20;
251         static const int patchLengthGetByIdExternalLoad = 4;
252         static const int patchLengthGetByIdExternalLoadPrefix = 1;
253         static const int patchOffsetGetByIdPropertyMapOffset = 31;
254         static const int patchOffsetGetByIdPutResult = 31;
255 #if ENABLE(OPCODE_SAMPLING)
256         static const int patchOffsetGetByIdSlowCaseCall = 61 + ctiArgumentInitSize;
257 #else
258         static const int patchOffsetGetByIdSlowCaseCall = 38 + ctiArgumentInitSize;
259 #endif
260         static const int patchOffsetOpCallCompareToJump = 9;
261 #else
262         // These architecture specific value are used to enable patching - see comment on op_put_by_id.
263         static const int patchOffsetPutByIdStructure = 7;
264         static const int patchOffsetPutByIdExternalLoad = 13;
265         static const int patchLengthPutByIdExternalLoad = 3;
266         static const int patchLengthPutByIdExternalLoadPrefix = 0;
267         static const int patchOffsetPutByIdPropertyMapOffset = 22;
268         // These architecture specific value are used to enable patching - see comment on op_get_by_id.
269         static const int patchOffsetGetByIdStructure = 7;
270         static const int patchOffsetGetByIdBranchToSlowCase = 13;
271         static const int patchOffsetGetByIdExternalLoad = 13;
272         static const int patchLengthGetByIdExternalLoad = 3;
273         static const int patchLengthGetByIdExternalLoadPrefix = 0;
274         static const int patchOffsetGetByIdPropertyMapOffset = 22;
275         static const int patchOffsetGetByIdPutResult = 22;
276 #if ENABLE(OPCODE_SAMPLING)
277         static const int patchOffsetGetByIdSlowCaseCall = 31 + ctiArgumentInitSize;
278 #else
279         static const int patchOffsetGetByIdSlowCaseCall = 21 + ctiArgumentInitSize;
280 #endif
281         static const int patchOffsetOpCallCompareToJump = 6;
282 #endif
283
284     public:
285         static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
286         {
287             JIT jit(globalData, codeBlock);
288             jit.privateCompile();
289         }
290
291         static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
292         {
293             JIT jit(globalData, codeBlock);
294             jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
295         }
296
297         static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
298         {
299             JIT jit(globalData, codeBlock);
300             jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset);
301         }
302         static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset)
303         {
304             JIT jit(globalData, codeBlock);
305             jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame);
306         }
307         static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset)
308         {
309             JIT jit(globalData, codeBlock);
310             jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
311         }
312
313         static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress)
314         {
315             JIT jit(globalData, codeBlock);
316             jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
317         }
318         
319         static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
320         {
321             JIT jit(globalData, codeBlock);
322             jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
323         }
324
325         static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall, void** ctiNativeCallThunk)
326         {
327             JIT jit(globalData);
328             jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
329         }
330
331         static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
332         static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
333
334         static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress)
335         {
336             JIT jit(globalData, codeBlock);
337             return jit.privateCompilePatchGetArrayLength(returnAddress);
338         }
339
340         static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount);
341         static void unlinkCall(CallLinkInfo*);
342
343     private:
344         struct JSRInfo {
345             DataLabelPtr storeLocation;
346             Label target;
347
348             JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
349                 : storeLocation(storeLocation)
350                 , target(targetLocation)
351             {
352             }
353         };
354
355         JIT(JSGlobalData*, CodeBlock* = 0);
356
357         void privateCompileMainPass();
358         void privateCompileLinkPass();
359         void privateCompileSlowCases();
360         void privateCompile();
361         void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
362         void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
363         void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
364         void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
365         void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
366         void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress);
367
368         void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall, void** ctiNativeCallThunk);
369         void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress);
370
371         void addSlowCase(Jump);
372         void addJump(Jump, int);
373         void emitJumpSlowToHot(Jump, int);
374
375         void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
376         void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
377         void compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned propertyAccessInstructionIndex);
378         void compilePutByIdSlowCase(int baseVReg, Identifier* ident, int valueVReg, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
379         void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
380         void compileOpCallVarargs(Instruction* instruction);
381         void compileOpCallInitializeCallFrame();
382         void compileOpCallSetupArgs(Instruction*);
383         void compileOpCallVarargsSetupArgs(Instruction*);
384         void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
385         void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
386         void compileOpConstructSetupArgs(Instruction*);
387         enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
388         void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
389
390         void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
391         void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
392         void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
393
394         // Arithmetic Ops
395
396         void emit_op_add(Instruction*);
397         void emit_op_sub(Instruction*);
398         void emit_op_mul(Instruction*);
399         void emit_op_mod(Instruction*);
400         void emit_op_bitand(Instruction*);
401         void emit_op_lshift(Instruction*);
402         void emit_op_rshift(Instruction*);
403         void emit_op_jnless(Instruction*);
404         void emit_op_jnlesseq(Instruction*);
405         void emit_op_pre_inc(Instruction*);
406         void emit_op_pre_dec(Instruction*);
407         void emit_op_post_inc(Instruction*);
408         void emit_op_post_dec(Instruction*);
409         void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
410         void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
411         void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
412         void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
413         void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
414         void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
415         void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
416         void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
417         void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
418         void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
419         void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
420         void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
421         void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
422
423         void emit_op_get_by_val(Instruction*);
424         void emit_op_put_by_val(Instruction*);
425         void emit_op_put_by_index(Instruction*);
426         void emit_op_put_getter(Instruction*);
427         void emit_op_put_setter(Instruction*);
428         void emit_op_del_by_id(Instruction*);
429
430         void emit_op_mov(Instruction*);
431         void emit_op_end(Instruction*);
432         void emit_op_jmp(Instruction*);
433         void emit_op_loop(Instruction*);
434         void emit_op_loop_if_less(Instruction*);
435         void emit_op_loop_if_lesseq(Instruction*);
436         void emit_op_new_object(Instruction*);
437         void emit_op_put_by_id(Instruction*);
438         void emit_op_get_by_id(Instruction*);
439         void emit_op_instanceof(Instruction*);
440         void emit_op_new_func(Instruction*);
441         void emit_op_call(Instruction*);
442         void emit_op_call_eval(Instruction*);
443         void emit_op_load_varargs(Instruction*);
444         void emit_op_call_varargs(Instruction*);
445         void emit_op_construct(Instruction*);
446         void emit_op_get_global_var(Instruction*);
447         void emit_op_put_global_var(Instruction*);
448         void emit_op_get_scoped_var(Instruction*);
449         void emit_op_put_scoped_var(Instruction*);
450         void emit_op_tear_off_activation(Instruction*);
451         void emit_op_tear_off_arguments(Instruction*);
452         void emit_op_ret(Instruction*);
453         void emit_op_new_array(Instruction*);
454         void emit_op_resolve(Instruction*);
455         void emit_op_construct_verify(Instruction*);
456         void emit_op_to_primitive(Instruction*);
457         void emit_op_strcat(Instruction*);
458         void emit_op_resolve_func(Instruction*);
459         void emit_op_loop_if_true(Instruction*);
460         void emit_op_resolve_base(Instruction*);
461         void emit_op_resolve_skip(Instruction*);
462         void emit_op_resolve_global(Instruction*);
463         void emit_op_not(Instruction*);
464         void emit_op_jfalse(Instruction*);
465         void emit_op_jeq_null(Instruction*);
466         void emit_op_jneq_null(Instruction*);
467         void emit_op_jneq_ptr(Instruction*);
468         void emit_op_unexpected_load(Instruction*);
469         void emit_op_jsr(Instruction*);
470         void emit_op_sret(Instruction*);
471         void emit_op_eq(Instruction*);
472         void emit_op_bitnot(Instruction*);
473         void emit_op_resolve_with_base(Instruction*);
474         void emit_op_new_func_exp(Instruction*);
475         void emit_op_jtrue(Instruction*);
476         void emit_op_neq(Instruction*);
477         void emit_op_bitxor(Instruction*);
478         void emit_op_new_regexp(Instruction*);
479         void emit_op_bitor(Instruction*);
480         void emit_op_throw(Instruction*);
481         void emit_op_next_pname(Instruction*);
482         void emit_op_push_scope(Instruction*);
483         void emit_op_pop_scope(Instruction*);
484         void emit_op_stricteq(Instruction*);
485         void emit_op_nstricteq(Instruction*);
486         void emit_op_to_jsnumber(Instruction*);
487         void emit_op_push_new_scope(Instruction*);
488         void emit_op_catch(Instruction*);
489         void emit_op_jmp_scopes(Instruction*);
490         void emit_op_switch_imm(Instruction*);
491         void emit_op_switch_char(Instruction*);
492         void emit_op_switch_string(Instruction*);
493         void emit_op_new_error(Instruction*);
494         void emit_op_debug(Instruction*);
495         void emit_op_eq_null(Instruction*);
496         void emit_op_neq_null(Instruction*);
497         void emit_op_enter(Instruction*);
498         void emit_op_enter_with_activation(Instruction*);
499         void emit_op_init_arguments(Instruction*);
500         void emit_op_create_arguments(Instruction*);
501         void emit_op_convert_this(Instruction*);
502         void emit_op_profile_will_call(Instruction*);
503         void emit_op_profile_did_call(Instruction*);
504
505         void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
506         void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
507         void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
508         void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
509         void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
510         void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
511         void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
512         void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
513         void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
514         void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
515         void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
516         void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
517         void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
518         void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
519         void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
520         void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
521         void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
522         void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
523         void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
524         void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
525         void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
526         void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
527         void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
528         void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
529         void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
530         void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
531
532 #if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
533         void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
534         void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
535 #endif
536
537         void emitGetVirtualRegister(int src, RegisterID dst);
538         void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
539         void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
540
541         void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
542         void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
543         void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
544         void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
545         void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
546
547         void emitInitRegister(unsigned dst);
548
549         void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
550         void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
551         void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
552         void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
553
554         JSValue getConstantOperand(unsigned src);
555         int32_t getConstantOperandImmediateInt(unsigned src);
556         bool isOperandConstantImmediateInt(unsigned src);
557
558         Jump emitJumpIfJSCell(RegisterID);
559         Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
560         void emitJumpSlowCaseIfJSCell(RegisterID);
561         Jump emitJumpIfNotJSCell(RegisterID);
562         void emitJumpSlowCaseIfNotJSCell(RegisterID);
563         void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
564 #if USE(ALTERNATE_JSIMMEDIATE)
565         JIT::Jump emitJumpIfImmediateNumber(RegisterID);
566         JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
567 #else
568         JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
569         {
570             return emitJumpIfImmediateInteger(reg);
571         }
572         
573         JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
574         {
575             return emitJumpIfNotImmediateInteger(reg);
576         }
577 #endif
578
579         Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
580         {
581             return iter++->from;
582         }
583         void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
584         {
585             iter->from.link(this);
586             ++iter;
587         }
588         void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
589
590         JIT::Jump emitJumpIfImmediateInteger(RegisterID);
591         JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
592         JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
593         void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
594         void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
595
596         Jump checkStructure(RegisterID reg, Structure* structure);
597
598 #if !USE(ALTERNATE_JSIMMEDIATE)
599         void emitFastArithDeTagImmediate(RegisterID);
600         Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
601 #endif
602         void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
603         void emitFastArithImmToInt(RegisterID);
604         void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
605
606         void emitTagAsBoolImmediate(RegisterID reg);
607
608         void restoreArgumentReference();
609         void restoreArgumentReferenceForTrampoline();
610
611         Call emitNakedCall(void* function);
612
613         void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
614         void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
615         
616         void emitTimeoutCheck();
617 #ifndef NDEBUG
618         void printBytecodeOperandTypes(unsigned src1, unsigned src2);
619 #endif
620
621         void killLastResultRegister();
622
623
624 #if ENABLE(SAMPLING_FLAGS)
625         void setSamplingFlag(int32_t);
626         void clearSamplingFlag(int32_t);
627 #endif
628
629 #if ENABLE(SAMPLING_COUNTERS)
630         void emitCount(AbstractSamplingCounter&, uint32_t = 1);
631 #endif
632
633 #if ENABLE(OPCODE_SAMPLING)
634         void sampleInstruction(Instruction*, bool = false);
635 #endif
636
637 #if ENABLE(CODEBLOCK_SAMPLING)
638         void sampleCodeBlock(CodeBlock*);
639 #else
640         void sampleCodeBlock(CodeBlock*) {}
641 #endif
642
643         Interpreter* m_interpreter;
644         JSGlobalData* m_globalData;
645         CodeBlock* m_codeBlock;
646
647         Vector<CallRecord> m_calls;
648         Vector<Label> m_labels;
649         Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
650         Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
651         Vector<JumpTable> m_jmpTable;
652
653         unsigned m_bytecodeIndex;
654         Vector<JSRInfo> m_jsrSites;
655         Vector<SlowCaseEntry> m_slowCases;
656         Vector<SwitchRecord> m_switches;
657
658         int m_lastResultBytecodeRegister;
659         unsigned m_jumpTargetsPosition;
660
661         unsigned m_propertyAccessInstructionIndex;
662         unsigned m_globalResolveInfoIndex;
663         unsigned m_callLinkInfoIndex;
664     } JIT_CLASS_ALIGNMENT;
665
666 }
667
668 #endif // ENABLE(JIT)
669
670 #endif // JIT_h