Varargs frame set-up should be factored out for use by other JITs
[WebKit-https.git] / Source / JavaScriptCore / jit / JITCall32_64.cpp
1 /*
2  * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "Arguments.h"
33 #include "CodeBlock.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43 #include "SetupVarargsFrame.h"
44 #include "StackAlignment.h"
45 #include <wtf/StringPrintStream.h>
46
47
48 namespace JSC {
49
50 void JIT::emitPutCallResult(Instruction* instruction)
51 {
52     int dst = instruction[1].u.operand;
53     emitValueProfilingSite();
54     emitStore(dst, regT1, regT0);
55 }
56
57 void JIT::emit_op_ret(Instruction* currentInstruction)
58 {
59     unsigned dst = currentInstruction[1].u.operand;
60
61     emitLoad(dst, regT1, regT0);
62
63     checkStackPointerAlignment();
64     emitFunctionEpilogue();
65     ret();
66 }
67
68 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
69 {
70     compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
71 }
72
73 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
74 {
75     compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
76 }
77  
78 void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
79 {
80     compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
81 }
82     
83 void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
84 {
85     compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
86 }
87     
88 void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
89 {
90     compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
91 }
92
93 void JIT::emit_op_call(Instruction* currentInstruction)
94 {
95     compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
96 }
97
98 void JIT::emit_op_call_eval(Instruction* currentInstruction)
99 {
100     compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
101 }
102
103 void JIT::emit_op_call_varargs(Instruction* currentInstruction)
104 {
105     compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
106 }
107     
108 void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
109 {
110     compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
111 }
112     
113 void JIT::emit_op_construct(Instruction* currentInstruction)
114 {
115     compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
116 }
117
118 void JIT::compileSetupVarargsFrame(Instruction* instruction)
119 {
120     int thisValue = instruction[3].u.operand;
121     int arguments = instruction[4].u.operand;
122     int firstFreeRegister = instruction[5].u.operand;
123     int firstVarArgOffset = instruction[6].u.operand;
124
125     JumpList slowCase;
126     JumpList end;
127     bool canOptimize = m_codeBlock->usesArguments()
128         && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
129         && !m_codeBlock->symbolTable()->slowArguments();
130
131     if (canOptimize) {
132         emitLoadTag(arguments, regT1);
133         slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
134         
135         move(TrustedImm32(-firstFreeRegister), regT1);
136         emitSetupVarargsFrameFastCase(*this, regT1, regT0, regT1, regT2, 0, firstVarArgOffset, slowCase);
137         end.append(jump());
138         slowCase.link(this);
139     }
140
141     emitLoad(arguments, regT1, regT0);
142     callOperation(operationSizeFrameForVarargs, regT1, regT0, -firstFreeRegister, firstVarArgOffset);
143     // This is spectacularly dirty. We want to pass four arguments to operationSetupVarargsFrame. On x86-32 we
144     // will pass them on the stack. We want four stack slots, or 16 bytes. Extending the stack by 8 bytes
145     // over where we planned on pointing the FP gives us enough room. The reason is that the FP gives an
146     // extra CallerFrameAndPC bytes beyond where SP should point prior to the call. So if we just did
147     // move(returnValueGPR, stackPointerRegister), we'd have enough room for passing two args, or 8
148     // bytes - except that we'd have a misaligned stack. So if we subtract *another* CallerFrameAndPC
149     // bytes, we are up to 16 bytes of spare room *and* we have an aligned stack. Gross, but correct!
150     addPtr(TrustedImm32(-sizeof(CallerFrameAndPC)), returnValueGPR, stackPointerRegister);
151     emitLoad(arguments, regT2, regT1);
152     callOperation(operationSetupVarargsFrame, returnValueGPR, regT2, regT1, firstVarArgOffset);
153     move(returnValueGPR, regT1);
154
155     if (canOptimize)
156         end.link(this);
157
158     // Initialize 'this'.
159     emitLoad(thisValue, regT2, regT0);
160     store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
161     store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
162     
163     addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
164 }
165
166 void JIT::compileCallEval(Instruction* instruction)
167 {
168     addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
169     storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
170
171     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
172
173     callOperation(operationCallEval, regT1);
174
175     addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
176
177     sampleCodeBlock(m_codeBlock);
178     
179     emitPutCallResult(instruction);
180 }
181
182 void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
183 {
184     linkSlowCase(iter);
185
186     int registerOffset = -instruction[4].u.operand;
187
188     addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
189
190     loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
191     loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1);
192     move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2);
193
194     emitLoad(JSStack::Callee, regT1, regT0);
195     emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
196     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
197     checkStackPointerAlignment();
198
199     sampleCodeBlock(m_codeBlock);
200     
201     emitPutCallResult(instruction);
202 }
203
204 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
205 {
206     int callee = instruction[2].u.operand;
207
208     /* Caller always:
209         - Updates callFrameRegister to callee callFrame.
210         - Initializes ArgumentCount; CallerFrame; Callee.
211
212        For a JS call:
213         - Callee initializes ReturnPC; CodeBlock.
214         - Callee restores callFrameRegister before return.
215
216        For a non-JS call:
217         - Caller initializes ReturnPC; CodeBlock.
218         - Caller restores callFrameRegister after return.
219     */
220     
221     if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs)
222         compileSetupVarargsFrame(instruction);
223     else {
224         int argCount = instruction[3].u.operand;
225         int registerOffset = -instruction[4].u.operand;
226         
227         if (opcodeID == op_call && shouldEmitProfiling()) {
228             emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
229             Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
230             loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
231             storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
232             done.link(this);
233         }
234     
235         addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
236
237         store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
238     } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
239     
240     uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
241     store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
242     emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
243
244     store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
245     store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
246
247     CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
248
249     if (opcodeID == op_call_eval) {
250         compileCallEval(instruction);
251         return;
252     }
253
254     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
255
256     DataLabelPtr addressOfLinkedFunctionCheck;
257     Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
258
259     addSlowCase(slowCase);
260
261     ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
262     info->callType = CallLinkInfo::callTypeFor(opcodeID);
263     info->codeOrigin = CodeOrigin(m_bytecodeOffset);
264     info->calleeGPR = regT0;
265     m_callCompilationInfo.append(CallCompilationInfo());
266     m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
267     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
268
269     checkStackPointerAlignment();
270     m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
271
272     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
273     checkStackPointerAlignment();
274
275     sampleCodeBlock(m_codeBlock);
276     emitPutCallResult(instruction);
277 }
278
279 void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
280 {
281     if (opcodeID == op_call_eval) {
282         compileCallEvalSlowCase(instruction, iter);
283         return;
284     }
285
286     linkSlowCase(iter);
287     linkSlowCase(iter);
288
289     ThunkGenerator generator = linkThunkGeneratorFor(
290         (opcodeID == op_construct || opcodeID == op_construct_varargs) ? CodeForConstruct : CodeForCall,
291         RegisterPreservationNotRequired);
292     
293     move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
294     m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(generator).code());
295
296     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
297     checkStackPointerAlignment();
298
299     sampleCodeBlock(m_codeBlock);
300     emitPutCallResult(instruction);
301 }
302
303 } // namespace JSC
304
305 #endif // USE(JSVALUE32_64)
306 #endif // ENABLE(JIT)