2 * Copyright (C) 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "LowLevelInterpreter.h"
31 #include "LLIntOfflineAsmConfig.h"
32 #include <wtf/InlineASM.h>
34 #if ENABLE(LLINT_C_LOOP)
35 #include "CodeBlock.h"
36 #include "LLIntCLoop.h"
37 #include "LLIntSlowPaths.h"
38 #include "Operations.h"
39 #include "VMInspector.h"
40 #include <wtf/Assertions.h>
41 #include <wtf/MathExtras.h>
43 using namespace JSC::LLInt;
45 // LLInt C Loop opcodes
46 // ====================
47 // In the implementation of the C loop, the LLint trampoline glue functions
48 // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
49 // if they are bytecode handlers. That means the names of the trampoline
50 // functions will be added to the OpcodeID list via the
51 // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
54 // In addition, some JIT trampoline functions which are needed by LLInt
55 // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
56 // bytecodes, and the CLoop will provide bytecode handlers for them.
58 // In the CLoop, we can only dispatch indirectly to these bytecodes
59 // (including the LLInt and JIT extensions). All other dispatches
60 // (i.e. goto's) must be to a known label (i.e. local / global labels).
63 // How are the opcodes named?
64 // ==========================
65 // Here is a table to show examples of how each of the manifestation of the
68 // Type: Opcode Trampoline Glue
69 // ====== ===============
70 // [In the llint .asm files]
71 // llint labels: llint_op_enter llint_program_prologue
73 // OpcodeID: op_enter llint_program
74 // [in Opcode.h] [in LLIntOpcode.h]
76 // When using a switch statement dispatch in the CLoop, each "opcode" is
78 // Opcode: case op_enter: case llint_program_prologue:
80 // When using a computed goto dispatch in the CLoop, each opcode is a label:
81 // Opcode: op_enter: llint_program_prologue:
84 //============================================================================
85 // Define the opcode dispatch mechanism when using the C loop:
88 // These are for building a C Loop interpreter:
89 #define OFFLINE_ASM_BEGIN
90 #define OFFLINE_ASM_END
93 #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode)
94 #if ENABLE(COMPUTED_GOTO_OPCODES)
95 #define OFFLINE_ASM_GLUE_LABEL(label) label:
97 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label:
100 #define OFFLINE_ASM_LOCAL_LABEL(label) label:
103 //============================================================================
110 #if USE(JSVALUE32_64)
111 static double Ints2Double(uint32_t lo, uint32_t hi)
117 u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
121 static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
128 hi = static_cast<uint32_t>(u.ival64 >> 32);
129 lo = static_cast<uint32_t>(u.ival64);
131 #endif // USE(JSVALUE32_64)
136 //============================================================================
137 // CLoopRegister is the storage for an emulated CPU register.
138 // It defines the policy of how ints smaller than intptr_t are packed into the
139 // pseudo register, as well as hides endianness differences.
141 struct CLoopRegister {
160 uint8_t u8padding[7];
163 #else // !CPU(BIG_ENDIAN)
178 uint8_t u8padding[7];
180 #endif // !CPU(BIG_ENDIAN)
181 #else // !USE(JSVALUE64)
191 uint8_t u8padding[3];
195 #else // !CPU(BIG_ENDIAN)
202 uint8_t u8padding[3];
204 #endif // !CPU(BIG_ENDIAN)
205 #endif // !USE(JSVALUE64)
209 ExecState* execState;
211 NativeFunction nativeFunc;
215 EncodedJSValue encodedJSValue;
222 inline void clearHighWord() { i32padding = 0; }
224 inline void clearHighWord() { }
228 //============================================================================
229 // The llint C++ interpreter loop:
232 JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
233 bool isInitializationPass)
235 #define CAST reinterpret_cast
236 #define SIGN_BIT32(x) ((x) & 0x80000000)
238 // One-time initialization of our address tables. We have to put this code
239 // here because our labels are only in scope inside this function. The
240 // caller (or one of its ancestors) is responsible for ensuring that this
241 // is only called once during the initialization of the VM before threads
243 if (UNLIKELY(isInitializationPass)) {
244 #if ENABLE(COMPUTED_GOTO_OPCODES)
245 Opcode* opcodeMap = LLInt::opcodeMap();
246 #define OPCODE_ENTRY(__opcode, length) \
247 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
248 FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
251 #define LLINT_OPCODE_ENTRY(__opcode, length) \
252 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
254 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
255 #undef LLINT_OPCODE_ENTRY
257 // Note: we can only set the exceptionInstructions after we have
258 // initialized the opcodeMap above. This is because getCodePtr()
259 // can depend on the opcodeMap.
260 Instruction* exceptionInstructions = LLInt::exceptionInstructions();
261 for (int i = 0; i < maxOpcodeLength + 1; ++i)
262 exceptionInstructions[i].u.pointer =
263 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
268 ASSERT(callFrame->globalData().topCallFrame == callFrame);
270 // Define the pseudo registers used by the LLINT C Loop backend:
271 ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
273 union CLoopDoubleRegister {
280 // The CLoop llint backend is initially based on the ARMv7 backend, and
281 // then further enhanced with a few instructions from the x86 backend to
282 // support building for X64 targets. Hence, the shape of the generated
283 // code and the usage convention of registers will look a lot like the
286 // For example, on a 32-bit build:
287 // 1. Outgoing args will be set up as follows:
288 // arg1 in t0 (r0 on ARM)
289 // arg2 in t1 (r1 on ARM)
290 // 2. 32 bit return values will be in t0 (r0 on ARM).
291 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
293 // But instead of naming these simulator registers based on their ARM
294 // counterparts, we'll name them based on their original llint asm names.
295 // This will make it easier to correlate the generated code with the
296 // original llint asm code.
298 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
300 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
301 // 2. 32 bit result values will be in the low 32-bit of t0.
302 // 3. 64 bit result values will be in t0.
304 CLoopRegister t0, t1, t2, t3;
306 CLoopRegister rBasePC, tagTypeNumber, tagMask;
308 CLoopRegister rRetVPC;
309 CLoopDoubleRegister d0, d1;
312 // Keep the compiler happy. We don't really need this, but the compiler
313 // will complain. This makes the warning go away.
318 // Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
319 JITStackFrame jitStackFrame;
321 // The llint expects the native stack pointer, sp, to be pointing to the
322 // jitStackFrame (which is the simulation of the native stack frame):
323 JITStackFrame* const sp = &jitStackFrame;
324 sp->globalData = &callFrame->globalData();
326 // Set up an alias for the globalData ptr in the JITStackFrame:
327 JSGlobalData* &globalData = sp->globalData;
329 CodeBlock* codeBlock = callFrame->codeBlock();
332 // rPC is an alias for vPC. Set up the alias:
333 CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
335 #if USE(JSVALUE32_64)
336 vPC = codeBlock->instructions().begin();
337 #else // USE(JSVALUE64)
339 rBasePC.vp = codeBlock->instructions().begin();
341 // For the ASM llint, JITStubs takes care of this initialization. We do
342 // it explicitly here for the C loop:
343 tagTypeNumber.i = 0xFFFF000000000000;
344 tagMask.i = 0xFFFF000000000002;
345 #endif // USE(JSVALUE64)
347 // cfr is an alias for callFrame. Set up this alias:
348 CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
350 // Simulate a native return PC which should never be used:
351 rRetVPC.i = 0xbbadbeef;
353 // Interpreter variables for value passing between opcodes and/or helpers:
354 NativeFunction nativeFunc = 0;
355 JSValue functionReturnValue;
358 opcode = LLInt::getOpcode(bootstrapOpcodeId);
360 #if ENABLE(OPCODE_STATS)
361 #define RECORD_OPCODE_STATS(__opcode) \
362 OpcodeStats::recordInstruction(__opcode)
364 #define RECORD_OPCODE_STATS(__opcode)
367 #if USE(JSVALUE32_64)
368 #define FETCH_OPCODE() vPC->u.opcode
369 #else // USE(JSVALUE64)
370 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
371 #endif // USE(JSVALUE64)
373 #define NEXT_INSTRUCTION() \
375 opcode = FETCH_OPCODE(); \
379 #if ENABLE(COMPUTED_GOTO_OPCODES)
381 //========================================================================
382 // Loop dispatch mechanism using computed goto statements:
384 #define DISPATCH_OPCODE() goto *opcode
386 #define DEFINE_OPCODE(__opcode) \
388 RECORD_OPCODE_STATS(__opcode);
390 // Dispatch to the current PC's bytecode:
393 #else // !ENABLE(COMPUTED_GOTO_OPCODES)
394 //========================================================================
395 // Loop dispatch mechanism using a C switch statement:
397 #define DISPATCH_OPCODE() goto dispatchOpcode
399 #define DEFINE_OPCODE(__opcode) \
402 RECORD_OPCODE_STATS(__opcode);
404 // Dispatch to the current PC's bytecode:
408 #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
410 //========================================================================
411 // Bytecode handlers:
413 // This is the file generated by offlineasm, which contains all of the
414 // bytecode handlers for the interpreter, as compiled from
415 // LowLevelInterpreter.asm and its peers.
417 #include "LLIntAssembly.h"
419 // In the ASM llint, getHostCallReturnValue() is a piece of glue
420 // function provided by the JIT (see dfg/DFGOperations.cpp).
421 // We simulate it here with a pseduo-opcode handler.
422 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
424 // The ASM part pops the frame:
425 callFrame = callFrame->callerFrame();
427 // The part in getHostCallReturnValueWithExecState():
428 JSValue result = globalData->hostCallReturnValue;
429 #if USE(JSVALUE32_64)
431 t0.i = result.payload();
433 t0.encodedJSValue = JSValue::encode(result);
438 OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
440 return globalData->exception;
443 #if !ENABLE(COMPUTED_GOTO_OPCODES)
448 } // END bytecode handler cases.
450 //========================================================================
455 if (callFrame->hasHostCallFrameFlag()) {
456 #if USE(JSVALUE32_64)
457 return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
459 return JSValue::decode(t0.encodedJSValue);
463 // The normal ASM llint call implementation returns to the caller as
464 // recorded in rRetVPC, and the caller would fetch the return address
465 // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
466 // the callTargetFunction() macro in the llint asm files).
468 // For the C loop, we don't have the JIT stub to this work for us.
469 // So, we need to implement the equivalent of dispatchAfterCall() here
470 // before dispatching to the PC.
472 vPC = callFrame->currentVPC();
475 // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
477 // When returning from a native trampoline call, unlike the assembly
478 // LLInt, we can't simply return to the caller. In our case, we grab
479 // the caller's VPC and resume execution there. However, the caller's
480 // VPC returned by callFrame->currentVPC() is in the form of the real
481 // address of the target bytecode, but the 64-bit llint expects the
482 // VPC to be a bytecode offset. Hence, we need to map it back to a
483 // bytecode offset before we dispatch via the usual dispatch mechanism
484 // i.e. NEXT_INSTRUCTION():
486 codeBlock = callFrame->codeBlock();
488 rPC.vp = callFrame->currentVPC();
489 rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
492 rBasePC.vp = codeBlock->instructions().begin();
493 #endif // USE(JSVALUE64)
497 } // END doReturnHelper.
500 // Keep the compiler happy so that it doesn't complain about unused
501 // labels for the LLInt trampoline glue. The labels are automatically
502 // emitted by label macros above, and some of them are referenced by
503 // the llint generated code. Since we can't tell ahead of time which
504 // will be referenced and which will be not, we'll just passify the
505 // compiler on all such labels:
506 #define LLINT_OPCODE_ENTRY(__opcode, length) \
507 UNUSED_LABEL(__opcode);
508 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
509 #undef LLINT_OPCODE_ENTRY
512 #undef NEXT_INSTRUCTION
514 #undef CHECK_FOR_TIMEOUT
518 } // Interpreter::llintCLoopExecute()
522 #else // !ENABLE(LLINT_C_LOOP)
524 //============================================================================
525 // Define the opcode dispatch mechanism when using an ASM loop:
528 // These are for building an interpreter from generated assembly code:
529 #define OFFLINE_ASM_BEGIN asm (
530 #define OFFLINE_ASM_END );
532 #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
533 #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
536 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
537 ".globl " SYMBOL_STRING(label) "\n" \
538 HIDE_SYMBOL(label) "\n" \
540 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
541 SYMBOL_STRING(label) ":\n"
543 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
544 ".globl " SYMBOL_STRING(label) "\n" \
545 HIDE_SYMBOL(label) "\n" \
546 SYMBOL_STRING(label) ":\n"
549 #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
551 // This is a file generated by offlineasm, which contains all of the assembly code
552 // for the interpreter, as compiled from LowLevelInterpreter.asm.
553 #include "LLIntAssembly.h"
555 #endif // !ENABLE(LLINT_C_LOOP)
557 #endif // ENABLE(LLINT)