Move DFG inline caching logic into jit/
[WebKit-https.git] / Source / JavaScriptCore / jit / JITCall32_64.cpp
1 /*
2  * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "Arguments.h"
33 #include "CodeBlock.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
36 #include "JITStubCall.h"
37 #include "JSArray.h"
38 #include "JSFunction.h"
39 #include "Operations.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43 #include <wtf/StringPrintStream.h>
44
45 #ifndef NDEBUG
46 #include <stdio.h>
47 #endif
48
49 using namespace std;
50
51 namespace JSC {
52
53 void JIT::emitPutCallResult(Instruction* instruction)
54 {
55     int dst = instruction[1].u.operand;
56     emitValueProfilingSite(regT4);
57     emitStore(dst, regT1, regT0);
58 }
59
60 void JIT::emit_op_ret(Instruction* currentInstruction)
61 {
62     unsigned dst = currentInstruction[1].u.operand;
63
64     emitLoad(dst, regT1, regT0);
65     emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
66     emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
67
68     restoreReturnAddressBeforeReturn(regT2);
69     ret();
70 }
71
72 void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
73 {
74     unsigned result = currentInstruction[1].u.operand;
75     unsigned thisReg = currentInstruction[2].u.operand;
76
77     emitLoad(result, regT1, regT0);
78     Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
79     loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
80     Jump notObject = emitJumpIfNotObject(regT2);
81
82     emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
83     emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
84
85     restoreReturnAddressBeforeReturn(regT2);
86     ret();
87
88     notJSCell.link(this);
89     notObject.link(this);
90     emitLoad(thisReg, regT1, regT0);
91
92     emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
93     emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
94
95     restoreReturnAddressBeforeReturn(regT2);
96     ret();
97 }
98
99 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
100 {
101     compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
102 }
103
104 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
105 {
106     compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
107 }
108  
109 void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
110 {
111     compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
112 }
113
114 void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
115 {
116     compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
117 }
118
119 void JIT::emit_op_call(Instruction* currentInstruction)
120 {
121     compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
122 }
123
124 void JIT::emit_op_call_eval(Instruction* currentInstruction)
125 {
126     compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
127 }
128
129 void JIT::emit_op_call_varargs(Instruction* currentInstruction)
130 {
131     compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
132 }
133
134 void JIT::emit_op_construct(Instruction* currentInstruction)
135 {
136     compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
137 }
138
139 void JIT::compileLoadVarargs(Instruction* instruction)
140 {
141     int thisValue = instruction[3].u.operand;
142     int arguments = instruction[4].u.operand;
143     int firstFreeRegister = instruction[5].u.operand;
144
145     JumpList slowCase;
146     JumpList end;
147     bool canOptimize = m_codeBlock->usesArguments()
148         && arguments == m_codeBlock->argumentsRegister()
149         && !m_codeBlock->symbolTable()->slowArguments();
150
151     if (canOptimize) {
152         emitLoadTag(arguments, regT1);
153         slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
154
155         load32(payloadFor(JSStack::ArgumentCount), regT2);
156         slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
157         // regT2: argumentCountIncludingThis
158
159         move(regT2, regT3);
160         neg32(regT3);
161         add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
162         lshift32(TrustedImm32(3), regT3);
163         addPtr(callFrameRegister, regT3);
164         // regT3: newCallFrame
165
166         slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));
167
168         // Initialize ArgumentCount.
169         store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
170
171         // Initialize 'this'.
172         emitLoad(thisValue, regT1, regT0);
173         store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
174         store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
175
176         // Copy arguments.
177         end.append(branchSub32(Zero, TrustedImm32(1), regT2));
178         // regT2: argumentCount;
179
180         Label copyLoop = label();
181         load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
182         load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
183         store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
184         store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
185         branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);
186
187         end.append(jump());
188     }
189
190     if (canOptimize)
191         slowCase.link(this);
192
193     JITStubCall stubCall(this, cti_op_load_varargs);
194     stubCall.addArgument(thisValue);
195     stubCall.addArgument(arguments);
196     stubCall.addArgument(Imm32(firstFreeRegister));
197     stubCall.call(regT3);
198
199     if (canOptimize)
200         end.link(this);
201 }
202
203 void JIT::compileCallEval(Instruction* instruction)
204 {
205     JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
206     stubCall.call();
207     addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
208     emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
209
210     sampleCodeBlock(m_codeBlock);
211     
212     emitPutCallResult(instruction);
213 }
214
215 void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
216 {
217     linkSlowCase(iter);
218
219     emitLoad(JSStack::Callee, regT1, regT0);
220     emitNakedCall(m_vm->getCTIStub(oldStyleVirtualCallGenerator).code());
221
222     sampleCodeBlock(m_codeBlock);
223     
224     emitPutCallResult(instruction);
225 }
226
227 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
228 {
229     int callee = instruction[2].u.operand;
230
231     /* Caller always:
232         - Updates callFrameRegister to callee callFrame.
233         - Initializes ArgumentCount; CallerFrame; Callee.
234
235        For a JS call:
236         - Caller initializes ScopeChain.
237         - Callee initializes ReturnPC; CodeBlock.
238         - Callee restores callFrameRegister before return.
239
240        For a non-JS call:
241         - Caller initializes ScopeChain; ReturnPC; CodeBlock.
242         - Caller restores callFrameRegister after return.
243     */
244     
245     if (opcodeID == op_call_varargs)
246         compileLoadVarargs(instruction);
247     else {
248         int argCount = instruction[3].u.operand;
249         int registerOffset = -instruction[4].u.operand;
250         
251         if (opcodeID == op_call && shouldEmitProfiling()) {
252             emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
253             Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
254             loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
255             storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
256             done.link(this);
257         }
258     
259         addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
260
261         store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
262     } // regT3 holds newCallFrame with ArgumentCount initialized.
263     
264     uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
265     store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
266     emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
267
268     storePtr(callFrameRegister, Address(regT3, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
269     emitStore(JSStack::Callee, regT1, regT0, regT3);
270     move(regT3, callFrameRegister);
271
272     if (opcodeID == op_call_eval) {
273         compileCallEval(instruction);
274         return;
275     }
276
277     DataLabelPtr addressOfLinkedFunctionCheck;
278     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
279     Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
280     END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
281
282     addSlowCase(slowCase);
283     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
284
285     ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
286     m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
287     m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
288     m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
289     m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
290
291     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
292     emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
293     m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
294
295     sampleCodeBlock(m_codeBlock);
296     emitPutCallResult(instruction);
297 }
298
299 void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
300 {
301     if (opcodeID == op_call_eval) {
302         compileCallEvalSlowCase(instruction, iter);
303         return;
304     }
305
306     linkSlowCase(iter);
307     linkSlowCase(iter);
308     
309     m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(oldStyleLinkConstructGenerator).code() : m_vm->getCTIStub(oldStyleLinkCallGenerator).code());
310
311     sampleCodeBlock(m_codeBlock);
312     emitPutCallResult(instruction);
313 }
314
315 void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
316 {
317     JumpList slowCases;
318
319     slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
320     slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
321     slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
322     
323     loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
324     emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
325     
326     Call call = nearCall();
327     Jump done = jump();
328     
329     slowCases.link(this);
330     move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
331     restoreReturnAddressBeforeReturn(regT2);
332     Jump slow = jump();
333     
334     LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
335     
336     patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
337     patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
338     patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(oldStyleVirtualCallGenerator).code()));
339     
340     RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
341         FINALIZE_CODE(
342             patchBuffer,
343             ("Baseline closure call stub for %s, return point %p, target %p (%s)",
344                 toCString(*m_codeBlock).data(),
345                 callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
346                 codePtr.executableAddress(),
347                 toCString(pointerDump(calleeCodeBlock)).data())),
348         *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
349         callLinkInfo->codeOrigin));
350     
351     RepatchBuffer repatchBuffer(m_codeBlock);
352     
353     repatchBuffer.replaceWithJump(
354         RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
355         CodeLocationLabel(stubRoutine->code().code()));
356     repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(oldStyleVirtualCallGenerator).code());
357     
358     callLinkInfo->stub = stubRoutine.release();
359 }
360
361 } // namespace JSC
362
363 #endif // USE(JSVALUE32_64)
364 #endif // ENABLE(JIT)