fourthTier: Landing the initial FTL logic in a single commit to avoid spurious
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompilerCommon.cpp
1 /*
2  * Copyright (C) 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompilerCommon.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGOperations.h"
32 #include "JSCJSValueInlines.h"
33 #include "Operations.h"
34
35 namespace JSC { namespace DFG {
36
37 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
38 {
39     jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
40     
41     jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
42     
43     AssemblyHelpers::Jump tooFewFails;
44     
45     jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
46     jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
47     jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
48     jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
49     tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()));
50     
51     // Reoptimize as soon as possible.
52 #if !NUMBER_OF_ARGUMENT_REGISTERS
53     jit.poke(GPRInfo::regT0);
54 #else
55     jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
56     ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);
57 #endif
58     jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1);
59     jit.call(GPRInfo::regT1);
60     AssemblyHelpers::Jump doneAdjusting = jit.jump();
61     
62     tooFewFails.link(&jit);
63     
64     // Adjust the execution counter such that the target is to only optimize after a while.
65     int32_t activeThreshold =
66         jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp();
67     int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
68         activeThreshold, jit.baselineCodeBlock());
69     int32_t clippedValue =
70         ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
71     jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
72     jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
73     jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
74     
75     doneAdjusting.link(&jit);
76 }
77
78 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
79 {
80 #if USE(JSVALUE64)
81     ASSERT(jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
82     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
83     
84     for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
85         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
86         CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
87         CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
88         Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlockForCaller);
89         unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
90         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
91         
92         ASSERT(mapping);
93         ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
94         
95         void* jumpTarget = baselineCodeBlockForCaller->getJITCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
96
97         GPRReg callerFrameGPR;
98         if (inlineCallFrame->caller.inlineCallFrame) {
99             jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
100             callerFrameGPR = GPRInfo::regT3;
101         } else
102             callerFrameGPR = GPRInfo::callFrameRegister;
103         
104         jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
105         if (!inlineCallFrame->isClosureCall())
106             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
107         jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
108         jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
109         jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
110         if (!inlineCallFrame->isClosureCall())
111             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
112     }
113 #else // USE(JSVALUE64) // so this is the 32-bit part
114     ASSERT(jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
115     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
116     
117     for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
118         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
119         CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
120         CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
121         Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlockForCaller);
122         unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
123         BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
124         
125         ASSERT(mapping);
126         ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
127         
128         void* jumpTarget = baselineCodeBlockForCaller->getJITCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
129
130         GPRReg callerFrameGPR;
131         if (inlineCallFrame->caller.inlineCallFrame) {
132             jit.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
133             callerFrameGPR = GPRInfo::regT3;
134         } else
135             callerFrameGPR = GPRInfo::callFrameRegister;
136         
137         jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
138         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
139         if (!inlineCallFrame->isClosureCall())
140             jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
141         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
142         jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
143         jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
144         jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
145         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
146         if (!inlineCallFrame->isClosureCall())
147             jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
148     }
149 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
150 }
151
152 void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
153 {
154     if (exit.m_codeOrigin.inlineCallFrame)
155         jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
156
157     CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
158     Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
159     
160     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
161     
162     ASSERT(mapping);
163     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
164     
165     void* jumpTarget = baselineCodeBlock->getJITCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
166     
167     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
168     jit.jump(GPRInfo::regT2);
169
170 #if DFG_ENABLE(DEBUG_VERBOSE)
171     dataLogF("   -> %p\n", jumpTarget);
172 #endif
173 }
174
175 } } // namespace JSC::DFG
176
177 #endif // ENABLE(DFG_JIT)
178