Enable JIT on ARM/Linux
authordinfuehr@igalia.com <dinfuehr@igalia.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 21 Nov 2018 11:03:29 +0000 (11:03 +0000)
committerdinfuehr@igalia.com <dinfuehr@igalia.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 21 Nov 2018 11:03:29 +0000 (11:03 +0000)
https://bugs.webkit.org/show_bug.cgi?id=191548

Reviewed by Yusuke Suzuki.

.:

Enable JIT by default on ARMv7/Linux after it was disabled with
recent bytcode format change.

* Source/cmake/WebKitFeatures.cmake:

JSTests:

Disable test on system with limited memory. Program was killed by
the OS before the exception was thrown.

* slowMicrobenchmarks/function-constructor-with-huge-strings.js:

Source/bmalloc:

* bmalloc/IsoPageInlines.h:
(bmalloc::IsoPage<Config>::startAllocating):

Source/JavaScriptCore:

Enable JIT by default on ARMv7/Linux after it was disabled with
recent bytcode format change.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::getICStatusMap):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::metadata):
* bytecode/InByIdStatus.cpp:
(JSC::InByIdStatus::computeFor):
* bytecode/Instruction.h:
(JSC::Instruction::cast):
* bytecode/MetadataTable.h:
(JSC::MetadataTable::forEach):
* bytecode/PutByIdStatus.cpp:
(JSC::PutByIdStatus::computeFor):
(JSC::PutByIdStatus::hasExitSite): Deleted.
* bytecode/PutByIdStatus.h:
* dfg/DFGOSRExit.cpp:
(JSC::DFG::reifyInlinedCallFrames):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::reifyInlinedCallFrames):
* generator/Argument.rb:
* generator/Opcode.rb:
* jit/GPRInfo.h:
* jit/JIT.h:
* jit/JITArithmetic32_64.cpp:
(JSC::JIT::emit_compareAndJump):
(JSC::JIT::emit_compareUnsignedAndJump):
(JSC::JIT::emit_compareUnsigned):
(JSC::JIT::emit_compareAndJumpSlow):
(JSC::JIT::emit_op_unsigned):
(JSC::JIT::emit_op_inc):
(JSC::JIT::emit_op_dec):
(JSC::JIT::emitBinaryDoubleOp):
(JSC::JIT::emit_op_mod):
(JSC::JIT::emitSlow_op_mod):
* jit/JITCall32_64.cpp:
(JSC::JIT::emitPutCallResult):
(JSC::JIT::emit_op_ret):
(JSC::JIT::emitSlow_op_call):
(JSC::JIT::emitSlow_op_tail_call):
(JSC::JIT::emitSlow_op_call_eval):
(JSC::JIT::emitSlow_op_call_varargs):
(JSC::JIT::emitSlow_op_tail_call_varargs):
(JSC::JIT::emitSlow_op_tail_call_forward_arguments):
(JSC::JIT::emitSlow_op_construct_varargs):
(JSC::JIT::emitSlow_op_construct):
(JSC::JIT::emit_op_call):
(JSC::JIT::emit_op_tail_call):
(JSC::JIT::emit_op_call_eval):
(JSC::JIT::emit_op_call_varargs):
(JSC::JIT::emit_op_tail_call_varargs):
(JSC::JIT::emit_op_tail_call_forward_arguments):
(JSC::JIT::emit_op_construct_varargs):
(JSC::JIT::emit_op_construct):
(JSC::JIT::compileSetupFrame):
(JSC::JIT::compileCallEval):
(JSC::JIT::compileCallEvalSlowCase):
(JSC::JIT::compileOpCall):
(JSC::JIT::compileOpCallSlowCase):
(JSC::JIT::compileSetupVarargsFrame): Deleted.
* jit/JITInlines.h:
(JSC::JIT::updateTopCallFrame):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_catch):
(JSC::JIT::emitSlow_op_loop_hint):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_mov):
(JSC::JIT::emit_op_end):
(JSC::JIT::emit_op_jmp):
(JSC::JIT::emit_op_new_object):
(JSC::JIT::emitSlow_op_new_object):
(JSC::JIT::emit_op_overrides_has_instance):
(JSC::JIT::emit_op_instanceof):
(JSC::JIT::emit_op_instanceof_custom):
(JSC::JIT::emitSlow_op_instanceof):
(JSC::JIT::emitSlow_op_instanceof_custom):
(JSC::JIT::emit_op_is_empty):
(JSC::JIT::emit_op_is_undefined):
(JSC::JIT::emit_op_is_boolean):
(JSC::JIT::emit_op_is_number):
(JSC::JIT::emit_op_is_cell_with_type):
(JSC::JIT::emit_op_is_object):
(JSC::JIT::emit_op_to_primitive):
(JSC::JIT::emit_op_set_function_name):
(JSC::JIT::emit_op_not):
(JSC::JIT::emit_op_jfalse):
(JSC::JIT::emit_op_jtrue):
(JSC::JIT::emit_op_jeq_null):
(JSC::JIT::emit_op_jneq_null):
(JSC::JIT::emit_op_jneq_ptr):
(JSC::JIT::emit_op_eq):
(JSC::JIT::emitSlow_op_eq):
(JSC::JIT::emit_op_jeq):
(JSC::JIT::emitSlow_op_jeq):
(JSC::JIT::emit_op_neq):
(JSC::JIT::emitSlow_op_neq):
(JSC::JIT::emit_op_jneq):
(JSC::JIT::emitSlow_op_jneq):
(JSC::JIT::compileOpStrictEq):
(JSC::JIT::emit_op_stricteq):
(JSC::JIT::emit_op_nstricteq):
(JSC::JIT::compileOpStrictEqJump):
(JSC::JIT::emit_op_jstricteq):
(JSC::JIT::emit_op_jnstricteq):
(JSC::JIT::emitSlow_op_jstricteq):
(JSC::JIT::emitSlow_op_jnstricteq):
(JSC::JIT::emit_op_eq_null):
(JSC::JIT::emit_op_neq_null):
(JSC::JIT::emit_op_throw):
(JSC::JIT::emit_op_to_number):
(JSC::JIT::emit_op_to_string):
(JSC::JIT::emit_op_to_object):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_identity_with_profile):
(JSC::JIT::emit_op_get_parent_scope):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::emit_op_switch_char):
(JSC::JIT::emit_op_switch_string):
(JSC::JIT::emit_op_debug):
(JSC::JIT::emit_op_enter):
(JSC::JIT::emit_op_get_scope):
(JSC::JIT::emit_op_create_this):
(JSC::JIT::emit_op_to_this):
(JSC::JIT::emit_op_check_tdz):
(JSC::JIT::emit_op_has_structure_property):
(JSC::JIT::privateCompileHasIndexedProperty):
(JSC::JIT::emit_op_has_indexed_property):
(JSC::JIT::emitSlow_op_has_indexed_property):
(JSC::JIT::emit_op_get_direct_pname):
(JSC::JIT::emit_op_enumerator_structure_pname):
(JSC::JIT::emit_op_enumerator_generic_pname):
(JSC::JIT::emit_op_profile_type):
(JSC::JIT::emit_op_log_shadow_chicken_prologue):
(JSC::JIT::emit_op_log_shadow_chicken_tail):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_put_getter_by_id):
(JSC::JIT::emit_op_put_setter_by_id):
(JSC::JIT::emit_op_put_getter_setter_by_id):
(JSC::JIT::emit_op_put_getter_by_val):
(JSC::JIT::emit_op_put_setter_by_val):
(JSC::JIT::emit_op_del_by_id):
(JSC::JIT::emit_op_del_by_val):
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitGetByValWithCachedId):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_put_by_val_direct):
(JSC::JIT::emit_op_put_by_val):
(JSC::JIT::emitGenericContiguousPutByVal):
(JSC::JIT::emitArrayStoragePutByVal):
(JSC::JIT::emitPutByValWithCachedId):
(JSC::JIT::emitSlow_op_put_by_val):
(JSC::JIT::emit_op_try_get_by_id):
(JSC::JIT::emitSlow_op_try_get_by_id):
(JSC::JIT::emit_op_get_by_id_direct):
(JSC::JIT::emitSlow_op_get_by_id_direct):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emitSlow_op_get_by_id):
(JSC::JIT::emit_op_get_by_id_with_this):
(JSC::JIT::emitSlow_op_get_by_id_with_this):
(JSC::JIT::emit_op_put_by_id):
(JSC::JIT::emitSlow_op_put_by_id):
(JSC::JIT::emit_op_in_by_id):
(JSC::JIT::emitSlow_op_in_by_id):
(JSC::JIT::emit_op_resolve_scope):
(JSC::JIT::emit_op_get_from_scope):
(JSC::JIT::emitSlow_op_get_from_scope):
(JSC::JIT::emit_op_put_to_scope):
(JSC::JIT::emitSlow_op_put_to_scope):
(JSC::JIT::emit_op_get_from_arguments):
(JSC::JIT::emit_op_put_to_arguments):
* jit/RegisterSet.cpp:
(JSC::RegisterSet::vmCalleeSaveRegisters):
* llint/LLIntData.cpp:
(JSC::LLInt::Data::performAssertions):
* llint/LowLevelInterpreter.asm:
* runtime/SamplingProfiler.cpp:
(JSC::tryGetBytecodeIndex):

Source/WTF:

Enable JIT by default on ARMv7/Linux after it was disabled with
recent bytcode format change.

* wtf/Platform.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@238414 268f45cc-cd09-0410-ab3c-d52691b4dbfc

31 files changed:
CMakeLists.txt
ChangeLog
JSTests/ChangeLog
JSTests/slowMicrobenchmarks/function-constructor-with-huge-strings.js
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/Instruction.h
Source/JavaScriptCore/bytecode/InstructionStream.cpp
Source/JavaScriptCore/bytecode/InstructionStream.h
Source/JavaScriptCore/bytecode/MetadataTable.h
Source/JavaScriptCore/dfg/DFGOSRExit.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
Source/JavaScriptCore/generator/Argument.rb
Source/JavaScriptCore/generator/Opcode.rb
Source/JavaScriptCore/jit/GPRInfo.h
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
Source/JavaScriptCore/jit/JITCall32_64.cpp
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
Source/JavaScriptCore/jit/RegisterSet.cpp
Source/JavaScriptCore/llint/LLIntData.cpp
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/runtime/SamplingProfiler.cpp
Source/WTF/ChangeLog
Source/WTF/wtf/Platform.h
Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/IsoPageInlines.h
Source/cmake/WebKitFeatures.cmake

index 8e180d4..fea7e30 100644 (file)
@@ -112,6 +112,8 @@ endif ()
 if (UNIX)
     if (APPLE)
         set(WTF_OS_MAC_OS_X 1)
+    elseif (CMAKE_SYSTEM_NAME MATCHES "Linux")
+        set(WTF_OS_LINUX 1)
     else ()
         set(WTF_OS_UNIX 1)
     endif ()
index 00556bb..2735c61 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2018-11-21  Dominik Infuehr  <dinfuehr@igalia.com>
+
+        Enable JIT on ARM/Linux
+        https://bugs.webkit.org/show_bug.cgi?id=191548
+
+        Reviewed by Yusuke Suzuki.
+
+        Enable JIT by default on ARMv7/Linux after it was disabled with
+        recent bytcode format change.
+
+        * Source/cmake/WebKitFeatures.cmake:
+
 2018-11-16  Don Olmstead  <don.olmstead@sony.com>
 
         Add USE(LIBWPE) to WebCore
index 6467d07..a47ae1f 100644 (file)
@@ -1,3 +1,15 @@
+2018-11-21  Dominik Infuehr  <dinfuehr@igalia.com>
+
+        Enable JIT on ARM/Linux
+        https://bugs.webkit.org/show_bug.cgi?id=191548
+
+        Reviewed by Yusuke Suzuki.
+
+        Disable test on system with limited memory. Program was killed by
+        the OS before the exception was thrown.
+
+        * slowMicrobenchmarks/function-constructor-with-huge-strings.js:
+
 2018-11-20  Saam barati  <sbarati@apple.com>
 
         Merging an IC variant may lead to the IC status containing overlapping structure sets
index bb465f5..23bcdf5 100644 (file)
@@ -1,3 +1,189 @@
+2018-11-21  Dominik Infuehr  <dinfuehr@igalia.com>
+
+        Enable JIT on ARM/Linux
+        https://bugs.webkit.org/show_bug.cgi?id=191548
+
+        Reviewed by Yusuke Suzuki.
+
+        Enable JIT by default on ARMv7/Linux after it was disabled with
+        recent bytcode format change.
+
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::getICStatusMap):
+        * bytecode/CodeBlock.h:
+        (JSC::CodeBlock::metadata):
+        * bytecode/InByIdStatus.cpp:
+        (JSC::InByIdStatus::computeFor):
+        * bytecode/Instruction.h:
+        (JSC::Instruction::cast):
+        * bytecode/MetadataTable.h:
+        (JSC::MetadataTable::forEach):
+        * bytecode/PutByIdStatus.cpp:
+        (JSC::PutByIdStatus::computeFor):
+        (JSC::PutByIdStatus::hasExitSite): Deleted.
+        * bytecode/PutByIdStatus.h:
+        * dfg/DFGOSRExit.cpp:
+        (JSC::DFG::reifyInlinedCallFrames):
+        * dfg/DFGOSRExitCompilerCommon.cpp:
+        (JSC::DFG::reifyInlinedCallFrames):
+        * generator/Argument.rb:
+        * generator/Opcode.rb:
+        * jit/GPRInfo.h:
+        * jit/JIT.h:
+        * jit/JITArithmetic32_64.cpp:
+        (JSC::JIT::emit_compareAndJump):
+        (JSC::JIT::emit_compareUnsignedAndJump):
+        (JSC::JIT::emit_compareUnsigned):
+        (JSC::JIT::emit_compareAndJumpSlow):
+        (JSC::JIT::emit_op_unsigned):
+        (JSC::JIT::emit_op_inc):
+        (JSC::JIT::emit_op_dec):
+        (JSC::JIT::emitBinaryDoubleOp):
+        (JSC::JIT::emit_op_mod):
+        (JSC::JIT::emitSlow_op_mod):
+        * jit/JITCall32_64.cpp:
+        (JSC::JIT::emitPutCallResult):
+        (JSC::JIT::emit_op_ret):
+        (JSC::JIT::emitSlow_op_call):
+        (JSC::JIT::emitSlow_op_tail_call):
+        (JSC::JIT::emitSlow_op_call_eval):
+        (JSC::JIT::emitSlow_op_call_varargs):
+        (JSC::JIT::emitSlow_op_tail_call_varargs):
+        (JSC::JIT::emitSlow_op_tail_call_forward_arguments):
+        (JSC::JIT::emitSlow_op_construct_varargs):
+        (JSC::JIT::emitSlow_op_construct):
+        (JSC::JIT::emit_op_call):
+        (JSC::JIT::emit_op_tail_call):
+        (JSC::JIT::emit_op_call_eval):
+        (JSC::JIT::emit_op_call_varargs):
+        (JSC::JIT::emit_op_tail_call_varargs):
+        (JSC::JIT::emit_op_tail_call_forward_arguments):
+        (JSC::JIT::emit_op_construct_varargs):
+        (JSC::JIT::emit_op_construct):
+        (JSC::JIT::compileSetupFrame):
+        (JSC::JIT::compileCallEval):
+        (JSC::JIT::compileCallEvalSlowCase):
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        (JSC::JIT::compileSetupVarargsFrame): Deleted.
+        * jit/JITInlines.h:
+        (JSC::JIT::updateTopCallFrame):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emitSlow_op_loop_hint):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_mov):
+        (JSC::JIT::emit_op_end):
+        (JSC::JIT::emit_op_jmp):
+        (JSC::JIT::emit_op_new_object):
+        (JSC::JIT::emitSlow_op_new_object):
+        (JSC::JIT::emit_op_overrides_has_instance):
+        (JSC::JIT::emit_op_instanceof):
+        (JSC::JIT::emit_op_instanceof_custom):
+        (JSC::JIT::emitSlow_op_instanceof):
+        (JSC::JIT::emitSlow_op_instanceof_custom):
+        (JSC::JIT::emit_op_is_empty):
+        (JSC::JIT::emit_op_is_undefined):
+        (JSC::JIT::emit_op_is_boolean):
+        (JSC::JIT::emit_op_is_number):
+        (JSC::JIT::emit_op_is_cell_with_type):
+        (JSC::JIT::emit_op_is_object):
+        (JSC::JIT::emit_op_to_primitive):
+        (JSC::JIT::emit_op_set_function_name):
+        (JSC::JIT::emit_op_not):
+        (JSC::JIT::emit_op_jfalse):
+        (JSC::JIT::emit_op_jtrue):
+        (JSC::JIT::emit_op_jeq_null):
+        (JSC::JIT::emit_op_jneq_null):
+        (JSC::JIT::emit_op_jneq_ptr):
+        (JSC::JIT::emit_op_eq):
+        (JSC::JIT::emitSlow_op_eq):
+        (JSC::JIT::emit_op_jeq):
+        (JSC::JIT::emitSlow_op_jeq):
+        (JSC::JIT::emit_op_neq):
+        (JSC::JIT::emitSlow_op_neq):
+        (JSC::JIT::emit_op_jneq):
+        (JSC::JIT::emitSlow_op_jneq):
+        (JSC::JIT::compileOpStrictEq):
+        (JSC::JIT::emit_op_stricteq):
+        (JSC::JIT::emit_op_nstricteq):
+        (JSC::JIT::compileOpStrictEqJump):
+        (JSC::JIT::emit_op_jstricteq):
+        (JSC::JIT::emit_op_jnstricteq):
+        (JSC::JIT::emitSlow_op_jstricteq):
+        (JSC::JIT::emitSlow_op_jnstricteq):
+        (JSC::JIT::emit_op_eq_null):
+        (JSC::JIT::emit_op_neq_null):
+        (JSC::JIT::emit_op_throw):
+        (JSC::JIT::emit_op_to_number):
+        (JSC::JIT::emit_op_to_string):
+        (JSC::JIT::emit_op_to_object):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_identity_with_profile):
+        (JSC::JIT::emit_op_get_parent_scope):
+        (JSC::JIT::emit_op_switch_imm):
+        (JSC::JIT::emit_op_switch_char):
+        (JSC::JIT::emit_op_switch_string):
+        (JSC::JIT::emit_op_debug):
+        (JSC::JIT::emit_op_enter):
+        (JSC::JIT::emit_op_get_scope):
+        (JSC::JIT::emit_op_create_this):
+        (JSC::JIT::emit_op_to_this):
+        (JSC::JIT::emit_op_check_tdz):
+        (JSC::JIT::emit_op_has_structure_property):
+        (JSC::JIT::privateCompileHasIndexedProperty):
+        (JSC::JIT::emit_op_has_indexed_property):
+        (JSC::JIT::emitSlow_op_has_indexed_property):
+        (JSC::JIT::emit_op_get_direct_pname):
+        (JSC::JIT::emit_op_enumerator_structure_pname):
+        (JSC::JIT::emit_op_enumerator_generic_pname):
+        (JSC::JIT::emit_op_profile_type):
+        (JSC::JIT::emit_op_log_shadow_chicken_prologue):
+        (JSC::JIT::emit_op_log_shadow_chicken_tail):
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emit_op_put_getter_by_id):
+        (JSC::JIT::emit_op_put_setter_by_id):
+        (JSC::JIT::emit_op_put_getter_setter_by_id):
+        (JSC::JIT::emit_op_put_getter_by_val):
+        (JSC::JIT::emit_op_put_setter_by_val):
+        (JSC::JIT::emit_op_del_by_id):
+        (JSC::JIT::emit_op_del_by_val):
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emitGetByValWithCachedId):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::emit_op_put_by_val_direct):
+        (JSC::JIT::emit_op_put_by_val):
+        (JSC::JIT::emitGenericContiguousPutByVal):
+        (JSC::JIT::emitArrayStoragePutByVal):
+        (JSC::JIT::emitPutByValWithCachedId):
+        (JSC::JIT::emitSlow_op_put_by_val):
+        (JSC::JIT::emit_op_try_get_by_id):
+        (JSC::JIT::emitSlow_op_try_get_by_id):
+        (JSC::JIT::emit_op_get_by_id_direct):
+        (JSC::JIT::emitSlow_op_get_by_id_direct):
+        (JSC::JIT::emit_op_get_by_id):
+        (JSC::JIT::emitSlow_op_get_by_id):
+        (JSC::JIT::emit_op_get_by_id_with_this):
+        (JSC::JIT::emitSlow_op_get_by_id_with_this):
+        (JSC::JIT::emit_op_put_by_id):
+        (JSC::JIT::emitSlow_op_put_by_id):
+        (JSC::JIT::emit_op_in_by_id):
+        (JSC::JIT::emitSlow_op_in_by_id):
+        (JSC::JIT::emit_op_resolve_scope):
+        (JSC::JIT::emit_op_get_from_scope):
+        (JSC::JIT::emitSlow_op_get_from_scope):
+        (JSC::JIT::emit_op_put_to_scope):
+        (JSC::JIT::emitSlow_op_put_to_scope):
+        (JSC::JIT::emit_op_get_from_arguments):
+        (JSC::JIT::emit_op_put_to_arguments):
+        * jit/RegisterSet.cpp:
+        (JSC::RegisterSet::vmCalleeSaveRegisters):
+        * llint/LLIntData.cpp:
+        (JSC::LLInt::Data::performAssertions):
+        * llint/LowLevelInterpreter.asm:
+        * runtime/SamplingProfiler.cpp:
+        (JSC::tryGetBytecodeIndex):
+
 2018-11-20  Saam barati  <sbarati@apple.com>
 
         Merging an IC variant may lead to the IC status containing overlapping structure sets
index 29a605b..e7c66e2 100644 (file)
@@ -860,7 +860,7 @@ public:
     Metadata& metadata(OpcodeID opcodeID, unsigned metadataID)
     {
         ASSERT(m_metadata);
-        return reinterpret_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID];
+        return bitwise_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID];
     }
 
     size_t metadataSizeInBytes()
index d94fc67..cb826cd 100644 (file)
@@ -91,7 +91,7 @@ public:
     T* cast()
     {
         ASSERT(is<T>());
-        return reinterpret_cast<T*>(this);
+        return bitwise_cast<T*>(this);
     }
 
     template<class T>
index 1461afd..514cf11 100644 (file)
@@ -40,4 +40,11 @@ size_t InstructionStream::sizeInBytes() const
     return m_instructions.size();
 }
 
+bool InstructionStream::contains(Instruction* instruction) const
+{
+
+    const uint8_t* pointer = bitwise_cast<const uint8_t*>(instruction);
+    return pointer >= m_instructions.data() && pointer < (m_instructions.data() + m_instructions.size());
+}
+
 }
index 44b4be7..70ede3e 100644 (file)
@@ -167,6 +167,8 @@ public:
         return m_instructions.data();
     }
 
+    bool contains(Instruction *) const;
+
 protected:
     explicit InstructionStream(InstructionBuffer&&);
 
index 60cde5f..a5d4121 100644 (file)
@@ -55,8 +55,8 @@ public:
     template<typename Op, typename Functor>
     ALWAYS_INLINE void forEach(const Functor& func)
     {
-        auto* metadata = reinterpret_cast<typename Op::Metadata*>(get(Op::opcodeID));
-        auto* end = reinterpret_cast<typename Op::Metadata*>(getImpl(Op::opcodeID + 1));
+        auto* metadata = bitwise_cast<typename Op::Metadata*>(get(Op::opcodeID));
+        auto* end = bitwise_cast<typename Op::Metadata*>(getImpl(Op::opcodeID + 1));
         for (; metadata + 1 <= end; ++metadata)
             func(*metadata);
     }
index 12b34e2..6ea9776 100644 (file)
@@ -811,7 +811,7 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
         if (!inlineCallFrame->isClosureCall)
             frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
 #else // USE(JSVALUE64) // so this is the 32-bit part
-        Instruction* instruction = &baselineCodeBlock->instructions()[codeOrigin->bytecodeIndex];
+        const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex).ptr();
         uint32_t locationBits = CallSiteIndex(instruction).bits();
         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
         frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
@@ -825,7 +825,7 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
 #if USE(JSVALUE64)
         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
 #else
-        Instruction* instruction = &outermostBaselineCodeBlock->instructions()[codeOrigin->bytecodeIndex];
+        const Instruction* instruction = outermostBaselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex).ptr();
         uint32_t locationBits = CallSiteIndex(instruction).bits();
 #endif
         frame.setOperand<uint32_t>(CallFrameSlot::argumentCount, TagOffset, locationBits);
index 111ebd3..c1ab846 100644 (file)
@@ -237,7 +237,7 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
 #else // USE(JSVALUE64) // so this is the 32-bit part
         jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
-        Instruction* instruction = &baselineCodeBlock->instructions()[codeOrigin->bytecodeIndex];
+        const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex).ptr();
         uint32_t locationBits = CallSiteIndex(instruction).bits();
         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
@@ -251,7 +251,7 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
 #if USE(JSVALUE64)
         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
 #else
-        Instruction* instruction = &jit.baselineCodeBlock()->instructions()[codeOrigin->bytecodeIndex];
+        const Instruction* instruction = jit.baselineCodeBlock()->instructions().at(codeOrigin->bytecodeIndex).ptr();
         uint32_t locationBits = CallSiteIndex(instruction).bits();
 #endif
         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
index f5ddf50..e69f8ab 100644 (file)
@@ -74,7 +74,7 @@ class Argument
         {
             if (!#{Fits::check "size", "value", @type})
                 value = func();
-            auto* stream = reinterpret_cast<typename TypeBySize<size>::type*>(reinterpret_cast<uint8_t*>(this) + #{@index} * size + PaddingBySize<size>::value);
+            auto* stream = bitwise_cast<typename TypeBySize<size>::type*>(reinterpret_cast<uint8_t*>(this) + #{@index} * size + PaddingBySize<size>::value);
             *stream = #{Fits::convert "size", "value", @type};
         }
         EOF
index b0a4e0a..eae00c5 100644 (file)
@@ -192,7 +192,7 @@ class Opcode
             if (*stream != op_wide)
                 return { stream };
 
-            auto wideStream = reinterpret_cast<const uint32_t*>(stream + 1);
+            auto wideStream = bitwise_cast<const uint32_t*>(stream + 1);
             return { wideStream };
         }
 
index f15ae83..1a0a5c9 100644 (file)
@@ -528,7 +528,11 @@ public:
 
 #if CPU(ARM)
 #define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#if CPU(ARM_THUMB2)
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 1u
+#else
 #define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
+#endif
 
 class GPRInfo {
 public:
index 6db4d7f..395f538 100644 (file)
@@ -457,7 +457,8 @@ namespace JSC {
         void compileGetByIdHotPath(const Identifier*);
 
         // Arithmetic opcode helpers
-        void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
+        template <typename Op>
+        void emitBinaryDoubleOp(const Instruction *, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
 
 #else // USE(JSVALUE32_64)
         void emitGetVirtualRegister(int src, RegisterID dst);
index d3ebdb6..8be745d 100644 (file)
 
 namespace JSC {
 
-void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
+template <typename Op>
+void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition)
 {
     JumpList notInt32Op1;
     JumpList notInt32Op2;
 
+    auto bytecode = instruction->as<Op>();
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
+    unsigned target = jumpTarget(instruction, bytecode.target);
+
     // Character less.
     if (isOperandConstantChar(op1)) {
         emitLoad(op2, regT1, regT0);
@@ -88,12 +94,18 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
     Jump end = jump();
 
     // Double less.
-    emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
+    emitBinaryDoubleOp<Op>(instruction, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
     end.link(this);
 }
 
-void JIT::emit_compareUnsignedAndJump(int op1, int op2, unsigned target, RelationalCondition condition)
+template <typename Op>
+void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition)
 {
+    auto bytecode = instruction->as<Op>();
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
+    unsigned target = jumpTarget(instruction, bytecode.target);
+
     if (isOperandConstantInt(op1)) {
         emitLoad(op2, regT3, regT2);
         addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
@@ -106,9 +118,14 @@ void JIT::emit_compareUnsignedAndJump(int op1, int op2, unsigned target, Relatio
     }
 }
 
-
-void JIT::emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition condition)
+template <typename Op>
+void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition)
 {
+    auto bytecode = instruction->as<Op>();
+    int dst = bytecode.dst.offset();
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
+
     if (isOperandConstantInt(op1)) {
         emitLoad(op2, regT3, regT2);
         compare32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32()), regT0);
@@ -122,8 +139,14 @@ void JIT::emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition co
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+template <typename Op>
+void JIT::emit_compareAndJumpSlow(const Instruction *instruction, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
 {
+    auto bytecode = instruction->as<Op>();
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
+    unsigned target = jumpTarget(instruction, bytecode.target);
+
     linkAllSlowCases(iter);
 
     emitLoad(op1, regT1, regT0);
@@ -132,10 +155,11 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
     emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
 }
 
-void JIT::emit_op_unsigned(Instruction* currentInstruction)
+void JIT::emit_op_unsigned(const Instruction* currentInstruction)
 {
-    int result = currentInstruction[1].u.operand;
-    int op1 = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpUnsigned>();
+    int result = bytecode.dst.offset();
+    int op1 = bytecode.operand.offset();
     
     emitLoad(op1, regT1, regT0);
     
@@ -144,9 +168,10 @@ void JIT::emit_op_unsigned(Instruction* currentInstruction)
     emitStoreInt32(result, regT0, result == op1);
 }
 
-void JIT::emit_op_inc(Instruction* currentInstruction)
+void JIT::emit_op_inc(const Instruction* currentInstruction)
 {
-    int srcDst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpInc>();
+    int srcDst = bytecode.srcDst.offset();
 
     emitLoad(srcDst, regT1, regT0);
 
@@ -155,9 +180,10 @@ void JIT::emit_op_inc(Instruction* currentInstruction)
     emitStoreInt32(srcDst, regT0, true);
 }
 
-void JIT::emit_op_dec(Instruction* currentInstruction)
+void JIT::emit_op_dec(const Instruction* currentInstruction)
 {
-    int srcDst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpDec>();
+    int srcDst = bytecode.srcDst.offset();
 
     emitLoad(srcDst, regT1, regT0);
 
@@ -166,10 +192,17 @@ void JIT::emit_op_dec(Instruction* currentInstruction)
     emitStoreInt32(srcDst, regT0, true);
 }
 
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+template <typename Op>
+void JIT::emitBinaryDoubleOp(const Instruction *instruction, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
 {
     JumpList end;
 
+    auto bytecode = instruction->as<Op>();
+    int opcodeID = Op::opcodeID;
+    int target = jumpTarget(instruction, bytecode.target);
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
+
     if (!notInt32Op1.empty()) {
         // Double case 1: Op1 is not int32; Op2 is unknown.
         notInt32Op1.link(this);
@@ -200,35 +233,35 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
         switch (opcodeID) {
             case op_jless:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
+                addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), target);
                 break;
             case op_jlesseq:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
+                addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), target);
                 break;
             case op_jgreater:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
+                addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), target);
                 break;
             case op_jgreatereq:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
+                addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), target);
                 break;
             case op_jnless:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
                 break;
             case op_jnlesseq:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
+                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), target);
                 break;
             case op_jngreater:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+                addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
                 break;
             case op_jngreatereq:
                 emitLoadDouble(op1, fpRegT2);
-                addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
+                addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), target);
                 break;
             default:
                 RELEASE_ASSERT_NOT_REACHED();
@@ -257,35 +290,35 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
         switch (opcodeID) {
             case op_jless:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
+                addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
                 break;
             case op_jlesseq:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
+                addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), target);
                 break;
             case op_jgreater:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
+                addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), target);
                 break;
             case op_jgreatereq:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
+                addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), target);
                 break;
             case op_jnless:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+                addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
                 break;
             case op_jnlesseq:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
+                addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
                 break;
             case op_jngreater:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+                addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
                 break;
             case op_jngreatereq:
                 emitLoadDouble(op2, fpRegT1);
-                addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
+                addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), target);
                 break;
             default:
                 RELEASE_ASSERT_NOT_REACHED();
@@ -299,12 +332,13 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
 
 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
 
-void JIT::emit_op_mod(Instruction* currentInstruction)
+void JIT::emit_op_mod(const Instruction* currentInstruction)
 {
 #if CPU(X86)
-    int dst = currentInstruction[1].u.operand;
-    int op1 = currentInstruction[2].u.operand;
-    int op2 = currentInstruction[3].u.operand;
+    auto bytecode = instruction->as<OpMod>();
+    int dst = bytecode.dst.offset();
+    int op1 = bytecode.lhs.offset();
+    int op2 = bytecode.rhs.offset();
 
     // Make sure registers are correct for x86 IDIV instructions.
     ASSERT(regT0 == X86Registers::eax);
@@ -333,7 +367,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
 #endif
 }
 
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
 #if CPU(X86)
     linkAllSlowCases(iter);
index aefaecc..09c4d03 100644 (file)
 
 namespace JSC {
 
-void JIT::emitPutCallResult(Instruction* instruction)
+template<typename Op>
+void JIT::emitPutCallResult(const Op& bytecode)
 {
-    int dst = instruction[1].u.operand;
-    emitValueProfilingSite();
-    emitStore(dst, regT1, regT0);
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
+    emitStore(bytecode.dst.offset(), regT1, regT0);
 }
 
-void JIT::emit_op_ret(Instruction* currentInstruction)
+void JIT::emit_op_ret(const Instruction* currentInstruction)
 {
-    unsigned dst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpRet>();
+    int value = bytecode.value.offset();
 
-    emitLoad(dst, regT1, regT0);
+    emitLoad(value, regT1, regT0);
 
     checkStackPointerAlignment();
     emitRestoreCalleeSaves();
@@ -64,96 +65,125 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
     ret();
 }
 
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpCall>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
-void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_tail_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpTailCall>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_call_eval(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
+    compileOpCallSlowCase<OpCallEval>(currentInstruction, iter, m_callLinkInfoIndex);
 }
  
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
-void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_tail_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpTailCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
-void JIT::emitSlow_op_tail_call_forward_arguments(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_tail_call_forward_arguments(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_tail_call_forward_arguments, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpTailCallForwardArguments>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
     
-void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_construct_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpConstructVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
     
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_construct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
+    compileOpCallSlowCase<OpConstruct>(currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::emit_op_call(const Instruction* currentInstruction)
 {
-    compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpCall>(currentInstruction, m_callLinkInfoIndex++);
 }
 
-void JIT::emit_op_tail_call(Instruction* currentInstruction)
+void JIT::emit_op_tail_call(const Instruction* currentInstruction)
 {
-    compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpTailCall>(currentInstruction, m_callLinkInfoIndex++);
 }
 
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
+void JIT::emit_op_call_eval(const Instruction* currentInstruction)
 {
-    compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
+    compileOpCall<OpCallEval>(currentInstruction, m_callLinkInfoIndex);
 }
 
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+void JIT::emit_op_call_varargs(const Instruction* currentInstruction)
 {
-    compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
 }
 
-void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+void JIT::emit_op_tail_call_varargs(const Instruction* currentInstruction)
 {
-    compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpTailCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
 }
 
-void JIT::emit_op_tail_call_forward_arguments(Instruction* currentInstruction)
+void JIT::emit_op_tail_call_forward_arguments(const Instruction* currentInstruction)
 {
-    compileOpCall(op_tail_call_forward_arguments, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpTailCallForwardArguments>(currentInstruction, m_callLinkInfoIndex++);
 }
     
-void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
+void JIT::emit_op_construct_varargs(const Instruction* currentInstruction)
 {
-    compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpConstructVarargs>(currentInstruction, m_callLinkInfoIndex++);
 }
     
-void JIT::emit_op_construct(Instruction* currentInstruction)
+void JIT::emit_op_construct(const Instruction* currentInstruction)
 {
-    compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+    compileOpCall<OpConstruct>(currentInstruction, m_callLinkInfoIndex++);
 }
 
-void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info)
+template <typename Op>
+std::enable_if_t<
+    Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
+    && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
+, void>
+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*)
 {
-    int thisValue = instruction[3].u.operand;
-    int arguments = instruction[4].u.operand;
-    int firstFreeRegister = instruction[5].u.operand;
-    int firstVarArgOffset = instruction[6].u.operand;
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int argCount = bytecode.argc;
+    int registerOffset = -static_cast<int>(bytecode.argv);
+
+    if (Op::opcodeID == op_call && shouldEmitProfiling()) {
+        emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
+        Jump done = branchIfNotCell(regT0);
+        load32(Address(regT1, JSCell::structureIDOffset()), regT1);
+        store32(regT1, metadata.arrayProfile.addressOfLastSeenStructureID());
+        done.link(this);
+    }
+
+    addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+    store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+}
+
+template<typename Op>
+std::enable_if_t<
+    Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
+    || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
+, void>
+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info)
+{
+    OpcodeID opcodeID = Op::opcodeID;
+    int thisValue = bytecode.thisValue.offset();
+    int arguments = bytecode.arguments.offset();
+    int firstFreeRegister = bytecode.firstFree.offset();
+    int firstVarArgOffset = bytecode.firstVarArg;
 
     emitLoad(arguments, regT1, regT0);
     Z_JITOperation_EJZZ sizeOperation;
-    if (opcode == op_tail_call_forward_arguments)
+    if (Op::opcodeID == op_tail_call_forward_arguments)
         sizeOperation = operationSizeFrameForForwardArguments;
     else
         sizeOperation = operationSizeFrameForVarargs;
@@ -163,7 +193,7 @@ void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, Ca
     addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
     emitLoad(arguments, regT2, regT4);
     F_JITOperation_EFJZZ setupOperation;
-    if (opcode == op_tail_call_forward_arguments)
+    if (opcodeID == op_tail_call_forward_arguments)
         setupOperation = operationSetupForwardArgumentsFrame;
     else
         setupOperation = operationSetupVarargsFrame;
@@ -185,7 +215,14 @@ void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, Ca
     addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
 }
 
-void JIT::compileCallEval(Instruction* instruction)
+template<typename Op>
+bool JIT::compileCallEval(const Op&)
+{
+    return false;
+}
+
+template<>
+bool JIT::compileCallEval(const OpCallEval& bytecode)
 {
     addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
     storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
@@ -198,18 +235,21 @@ void JIT::compileCallEval(Instruction* instruction)
 
     sampleCodeBlock(m_codeBlock);
     
-    emitPutCallResult(instruction);
+    emitPutCallResult(bytecode);
+
+    return true;
 }
 
-void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileCallEvalSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
+    auto bytecode = instruction->as<OpCallEval>();
     CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
     info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
 
-    int registerOffset = -instruction[4].u.operand;
-    int callee = instruction[2].u.operand;
+    int registerOffset = -bytecode.argv;
+    int callee = bytecode.callee.offset();
 
     addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
 
@@ -220,12 +260,15 @@ void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry
 
     sampleCodeBlock(m_codeBlock);
     
-    emitPutCallResult(instruction);
+    emitPutCallResult(bytecode);
 }
 
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+template <typename Op>
+void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoIndex)
 {
-    int callee = instruction[2].u.operand;
+    OpcodeID opcodeID = Op::opcodeID;
+    auto bytecode = instruction->as<Op>();
+    int callee = bytecode.callee.offset();
 
     /* Caller always:
         - Updates callFrameRegister to callee callFrame.
@@ -242,24 +285,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     CallLinkInfo* info = nullptr;
     if (opcodeID != op_call_eval)
         info = m_codeBlock->addCallLinkInfo();
-    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
-        compileSetupVarargsFrame(opcodeID, instruction, info);
-    else {
-        int argCount = instruction[3].u.operand;
-        int registerOffset = -instruction[4].u.operand;
-        
-        if (opcodeID == op_call && shouldEmitProfiling()) {
-            emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
-            Jump done = branchIfNotCell(regT0);
-            loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
-            storePtr(regT1, arrayProfileFor<OpCallShape>(instruction)->addressOfLastSeenStructureID());
-            done.link(this);
-        }
-    
-        addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
-
-        store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
-    } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
+    compileSetupFrame(bytecode, info);
+    // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
     
     uint32_t locationBits = CallSiteIndex(instruction).bits();
     store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount));
@@ -268,10 +295,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     store32(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
     store32(regT1, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
 
-    if (opcodeID == op_call_eval) {
-        compileCallEval(instruction);
+    if (compileCallEval(bytecode))
         return;
-    }
 
     if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
         emitRestoreCalleeSaves();
@@ -302,11 +327,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     checkStackPointerAlignment();
 
     sampleCodeBlock(m_codeBlock);
-    emitPutCallResult(instruction);
+    emitPutCallResult(bytecode);
 }
 
-void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
+template <typename Op>
+void JIT::compileOpCallSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
 {
+    OpcodeID opcodeID = Op::opcodeID;
+
     if (opcodeID == op_call_eval) {
         compileCallEvalSlowCase(instruction, iter);
         return;
@@ -316,7 +344,7 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
 
     move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
 
-    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
         emitRestoreCalleeSaves();
 
     m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).retaggedCode<NoPtrTag>());
@@ -330,7 +358,9 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
     checkStackPointerAlignment();
 
     sampleCodeBlock(m_codeBlock);
-    emitPutCallResult(instruction);
+
+    auto bytecode = instruction->as<Op>();
+    emitPutCallResult(bytecode);
 }
 
 } // namespace JSC
index 6897709..d6c7b7f 100644 (file)
@@ -26,7 +26,6 @@
 #pragma once
 
 #if ENABLE(JIT)
-
 #include "JSCInlines.h"
 
 namespace JSC {
@@ -131,7 +130,7 @@ ALWAYS_INLINE void JIT::updateTopCallFrame()
 {
     ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
 #if USE(JSVALUE32_64)
-    Instruction* instruction = &m_codeBlock->instructions()[m_bytecodeOffset]; 
+    const Instruction* instruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr();
     uint32_t locationBits = CallSiteIndex(instruction).bits();
 #else
     uint32_t locationBits = CallSiteIndex(m_bytecodeOffset).bits();
@@ -706,6 +705,8 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
     addSlowCase(branchIfNotNumber(reg));
 }
 
+#endif // USE(JSVALUE32_64)
+
 ALWAYS_INLINE int JIT::jumpTarget(const Instruction* instruction, int target)
 {
     if (target)
@@ -736,8 +737,6 @@ ALWAYS_INLINE ArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
     return arithProfile;
 }
 
-#endif // USE(JSVALUE32_64)
-
 } // namespace JSC
 
 #endif // ENABLE(JIT)
index f4270be..801a6a8 100644 (file)
@@ -1016,6 +1016,7 @@ void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<Sl
         emitJumpSlowToHot(jump(), currentInstruction->size());
     }
 #else
+    UNUSED_PARAM(currentInstruction);
     UNUSED_PARAM(iter);
 #endif
 }
index ef03102..87c1fa0 100644 (file)
 
 namespace JSC {
 
-void JIT::emit_op_mov(Instruction* currentInstruction)
+void JIT::emit_op_mov(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpMov>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.src.offset();
     
     if (m_codeBlock->isConstantRegisterIndex(src))
         emitStore(dst, getConstantOperand(src));
@@ -60,24 +61,28 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
     }
 }
 
-void JIT::emit_op_end(Instruction* currentInstruction)
+void JIT::emit_op_end(const Instruction* currentInstruction)
 {
     ASSERT(returnValueGPR != callFrameRegister);
-    emitLoad(currentInstruction[1].u.operand, regT1, returnValueGPR);
+    auto bytecode = currentInstruction->as<OpEnd>();
+    emitLoad(bytecode.value.offset(), regT1, returnValueGPR);
     emitRestoreCalleeSaves();
     emitFunctionEpilogue();
     ret();
 }
 
-void JIT::emit_op_jmp(Instruction* currentInstruction)
+void JIT::emit_op_jmp(const Instruction* currentInstruction)
 {
-    unsigned target = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpJmp>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
     addJump(jump(), target);
 }
 
-void JIT::emit_op_new_object(Instruction* currentInstruction)
+void JIT::emit_op_new_object(const Instruction* currentInstruction)
 {
-    Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+    auto bytecode = currentInstruction->as<OpNewObject>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    Structure* structure = metadata.objectAllocationProfile.structure();
     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
     Allocator allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
 
@@ -93,26 +98,28 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
         emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases);
         emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
         addSlowCase(slowCases);
-        emitStoreCell(currentInstruction[1].u.operand, resultReg);
+        emitStoreCell(bytecode.dst.offset(), resultReg);
     }
 }
 
-void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int dst = currentInstruction[1].u.operand;
-    Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+    auto bytecode = currentInstruction->as<OpNewObject>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int dst = bytecode.dst.offset();
+    Structure* structure = metadata.objectAllocationProfile.structure();
     callOperation(operationNewObject, structure);
     emitStoreCell(dst, returnValueGPR);
 }
 
-void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction)
 {
-    auto& bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction);
-    int dst = bytecode.dst();
-    int constructor = bytecode.constructor();
-    int hasInstanceValue = bytecode.hasInstanceValue();
+    auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
+    int dst = bytecode.dst.offset();
+    int constructor = bytecode.constructor.offset();
+    int hasInstanceValue = bytecode.hasInstanceValue.offset();
 
     emitLoadPayload(hasInstanceValue, regT0);
     // We don't jump if we know what Symbol.hasInstance would do.
@@ -135,12 +142,12 @@ void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
 
 }
 
-void JIT::emit_op_instanceof(Instruction* currentInstruction)
+void JIT::emit_op_instanceof(const Instruction* currentInstruction)
 {
-    auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction);
-    int dst = bytecode.dst();
-    int value = bytecode.value();
-    int proto = bytecode.prototype();
+    auto bytecode = currentInstruction->as<OpInstanceof>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.value.offset();
+    int proto = bytecode.prototype.offset();
 
     // Load the operands into registers.
     // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
@@ -164,20 +171,20 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_instanceof_custom(Instruction*)
+void JIT::emit_op_instanceof_custom(const Instruction*)
 {
     // This always goes to slow path since we expect it to be rare.
     addSlowCase(jump());
 }
 
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
     
-    auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction);
-    int dst = bytecode.dst();
-    int value = bytecode.value();
-    int proto = bytecode.prototype();
+    auto bytecode = currentInstruction->as<OpInstanceof>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.value.offset();
+    int proto = bytecode.prototype.offset();
     
     JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++];
     
@@ -188,15 +195,15 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
     gen.reportSlowPathCall(coldPathBegin, call);
 }
 
-void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    auto& bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction);
-    int dst = bytecode.dst();
-    int value = bytecode.value();
-    int constructor = bytecode.constructor();
-    int hasInstanceValue = bytecode.hasInstanceValue();
+    auto bytecode = currentInstruction->as<OpInstanceofCustom>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.value.offset();
+    int constructor = bytecode.constructor.offset();
+    int hasInstanceValue = bytecode.hasInstanceValue.offset();
 
     emitLoad(value, regT1, regT0);
     emitLoadPayload(constructor, regT2);
@@ -205,10 +212,11 @@ void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<
     emitStoreBool(dst, returnValueGPR);
 }
     
-void JIT::emit_op_is_empty(Instruction* currentInstruction)
+void JIT::emit_op_is_empty(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpIsEmpty>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
     
     emitLoad(value, regT1, regT0);
     compare32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag), regT0);
@@ -216,10 +224,11 @@ void JIT::emit_op_is_empty(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_is_undefined(Instruction* currentInstruction)
+void JIT::emit_op_is_undefined(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpIsUndefined>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
     
     emitLoad(value, regT1, regT0);
     Jump isCell = branchIfCell(regT1);
@@ -243,20 +252,22 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_is_boolean(Instruction* currentInstruction)
+void JIT::emit_op_is_boolean(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpIsBoolean>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
     
     emitLoadTag(value, regT0);
     compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0);
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_is_number(Instruction* currentInstruction)
+void JIT::emit_op_is_number(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpIsNumber>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
     
     emitLoadTag(value, regT0);
     add32(TrustedImm32(1), regT0);
@@ -264,11 +275,12 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction)
+void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
-    int type = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpIsCellWithType>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
+    int type = bytecode.type;
 
     emitLoad(value, regT1, regT0);
     Jump isNotCell = branchIfNotCell(regT1);
@@ -283,10 +295,11 @@ void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_is_object(Instruction* currentInstruction)
+void JIT::emit_op_is_object(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int value = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpIsObject>();
+    int dst = bytecode.dst.offset();
+    int value = bytecode.operand.offset();
 
     emitLoad(value, regT1, regT0);
     Jump isNotCell = branchIfNotCell(regT1);
@@ -301,10 +314,11 @@ void JIT::emit_op_is_object(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+void JIT::emit_op_to_primitive(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpToPrimitive>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.src.offset();
 
     emitLoad(src, regT1, regT0);
 
@@ -316,19 +330,21 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
         emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_set_function_name(Instruction* currentInstruction)
+void JIT::emit_op_set_function_name(const Instruction* currentInstruction)
 {
-    int func = currentInstruction[1].u.operand;
-    int name = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpSetFunctionName>();
+    int func = bytecode.function.offset();
+    int name = bytecode.name.offset();
     emitLoadPayload(func, regT1);
     emitLoad(name, regT3, regT2);
     callOperation(operationSetFunctionName, regT1, JSValueRegs(regT3, regT2));
 }
 
-void JIT::emit_op_not(Instruction* currentInstruction)
+void JIT::emit_op_not(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpNot>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoadTag(src, regT0);
 
@@ -339,10 +355,11 @@ void JIT::emit_op_not(Instruction* currentInstruction)
     emitStoreBool(dst, regT0, (dst == src));
 }
 
-void JIT::emit_op_jfalse(Instruction* currentInstruction)
+void JIT::emit_op_jfalse(const Instruction* currentInstruction)
 {
-    int cond = currentInstruction[1].u.operand;
-    unsigned target = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJfalse>();
+    int cond = bytecode.condition.offset();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
 
     emitLoad(cond, regT1, regT0);
 
@@ -353,10 +370,11 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
     addJump(branchIfFalsey(*vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
 }
 
-void JIT::emit_op_jtrue(Instruction* currentInstruction)
+void JIT::emit_op_jtrue(const Instruction* currentInstruction)
 {
-    int cond = currentInstruction[1].u.operand;
-    unsigned target = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJtrue>();
+    int cond = bytecode.condition.offset();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
 
     emitLoad(cond, regT1, regT0);
     bool shouldCheckMasqueradesAsUndefined = true;
@@ -366,10 +384,11 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
     addJump(branchIfTruthy(*vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
 }
 
-void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+void JIT::emit_op_jeq_null(const Instruction* currentInstruction)
 {
-    int src = currentInstruction[1].u.operand;
-    unsigned target = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJeqNull>();
+    int src = bytecode.value.offset();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
 
     emitLoad(src, regT1, regT0);
 
@@ -391,10 +410,11 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
     masqueradesGlobalObjectIsForeign.link(this);
 }
 
-void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+void JIT::emit_op_jneq_null(const Instruction* currentInstruction)
 {
-    int src = currentInstruction[1].u.operand;
-    unsigned target = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJneqNull>();
+    int src = bytecode.value.offset();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
 
     emitLoad(src, regT1, regT0);
 
@@ -416,26 +436,30 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
     wasNotImmediate.link(this);
 }
 
-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction)
 {
-    int src = currentInstruction[1].u.operand;
-    Special::Pointer ptr = currentInstruction[2].u.specialPointer;
-    unsigned target = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpJneqPtr>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int src = bytecode.value.offset();
+    Special::Pointer ptr = bytecode.specialPointer;
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
 
     emitLoad(src, regT1, regT0);
     Jump notCell = branchIfNotCell(regT1);
     Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr)));
     notCell.link(this);
-    store32(TrustedImm32(1), &currentInstruction[4].u.operand);
+    store8(TrustedImm32(1), &metadata.hasJumped);
     addJump(jump(), target);
     equal.link(this);
 }
 
-void JIT::emit_op_eq(Instruction* currentInstruction)
+void JIT::emit_op_eq(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src1 = currentInstruction[2].u.operand;
-    int src2 = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpEq>();
+
+    int dst = bytecode.dst.offset();
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
     addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -447,9 +471,10 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    int dst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpEq>();
+    int dst = bytecode.dst.offset();
 
     JumpList storeResult;
     JumpList genericCase;
@@ -473,11 +498,12 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
     emitStoreBool(dst, returnValueGPR);
 }
 
-void JIT::emit_op_jeq(Instruction* currentInstruction)
+void JIT::emit_op_jeq(const Instruction* currentInstruction)
 {
-    int target = currentInstruction[3].u.operand;
-    int src1 = currentInstruction[1].u.operand;
-    int src2 = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJeq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
     addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -512,16 +538,19 @@ void JIT::compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator& iter, CompileOpEq
     done.link(this);
 }
 
-void JIT::emitSlow_op_jeq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpEqJumpSlow(iter, CompileOpEqType::Eq, currentInstruction[3].u.operand);
+    auto bytecode = currentInstruction->as<OpJeq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
+    compileOpEqJumpSlow(iter, CompileOpEqType::Eq, target);
 }
 
-void JIT::emit_op_neq(Instruction* currentInstruction)
+void JIT::emit_op_neq(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src1 = currentInstruction[2].u.operand;
-    int src2 = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpNeq>();
+    int dst = bytecode.dst.offset();
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
     addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -533,9 +562,10 @@ void JIT::emit_op_neq(Instruction* currentInstruction)
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    int dst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpNeq>();
+    int dst = bytecode.dst.offset();
 
     JumpList storeResult;
     JumpList genericCase;
@@ -560,11 +590,12 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
     emitStoreBool(dst, returnValueGPR);
 }
 
-void JIT::emit_op_jneq(Instruction* currentInstruction)
+void JIT::emit_op_jneq(const Instruction* currentInstruction)
 {
-    int target = currentInstruction[3].u.operand;
-    int src1 = currentInstruction[1].u.operand;
-    int src2 = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpJneq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
     addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -574,16 +605,20 @@ void JIT::emit_op_jneq(Instruction* currentInstruction)
     addJump(branch32(NotEqual, regT0, regT2), target);
 }
 
-void JIT::emitSlow_op_jneq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    compileOpEqJumpSlow(iter, CompileOpEqType::NEq, currentInstruction[3].u.operand);
+    auto bytecode = currentInstruction->as<OpJneq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
+    compileOpEqJumpSlow(iter, CompileOpEqType::NEq, target);
 }
 
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+template <typename Op>
+void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src1 = currentInstruction[2].u.operand;
-    int src2 = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<Op>();
+    int dst = bytecode.dst.offset();
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
 
@@ -607,21 +642,23 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
     emitStoreBool(dst, regT0);
 }
 
-void JIT::emit_op_stricteq(Instruction* currentInstruction)
+void JIT::emit_op_stricteq(const Instruction* currentInstruction)
 {
-    compileOpStrictEq(currentInstruction, CompileOpStrictEqType::StrictEq);
+    compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
 }
 
-void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+void JIT::emit_op_nstricteq(const Instruction* currentInstruction)
 {
-    compileOpStrictEq(currentInstruction, CompileOpStrictEqType::NStrictEq);
+    compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
 }
 
-void JIT::compileOpStrictEqJump(Instruction* currentInstruction, CompileOpStrictEqType type)
+template<typename Op>
+void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type)
 {
-    int target = currentInstruction[3].u.operand;
-    int src1 = currentInstruction[1].u.operand;
-    int src2 = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<Op>();
+    int target = jumpTarget(currentInstruction, bytecode.target);
+    int src1 = bytecode.lhs.offset();
+    int src2 = bytecode.rhs.offset();
 
     emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
 
@@ -643,38 +680,41 @@ void JIT::compileOpStrictEqJump(Instruction* currentInstruction, CompileOpStrict
         addJump(branch32(NotEqual, regT0, regT2), target);
 }
 
-void JIT::emit_op_jstricteq(Instruction* currentInstruction)
+void JIT::emit_op_jstricteq(const Instruction* currentInstruction)
 {
-    compileOpStrictEqJump(currentInstruction, CompileOpStrictEqType::StrictEq);
+    compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
 }
 
-void JIT::emit_op_jnstricteq(Instruction* currentInstruction)
+void JIT::emit_op_jnstricteq(const Instruction* currentInstruction)
 {
-    compileOpStrictEqJump(currentInstruction, CompileOpStrictEqType::NStrictEq);
+    compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
 }
 
-void JIT::emitSlow_op_jstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    unsigned target = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpJstricteq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
     callOperation(operationCompareStrictEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
 }
 
-void JIT::emitSlow_op_jnstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    unsigned target = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpJnstricteq>();
+    unsigned target = jumpTarget(currentInstruction, bytecode.target);
     callOperation(operationCompareStrictEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
 }
 
-void JIT::emit_op_eq_null(Instruction* currentInstruction)
+void JIT::emit_op_eq_null(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpEqNull>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoad(src, regT1, regT0);
     Jump isImmediate = branchIfNotCell(regT1);
@@ -702,10 +742,11 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
     emitStoreBool(dst, regT1);
 }
 
-void JIT::emit_op_neq_null(Instruction* currentInstruction)
+void JIT::emit_op_neq_null(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpNeqNull>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoad(src, regT1, regT0);
     Jump isImmediate = branchIfNotCell(regT1);
@@ -733,19 +774,21 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
     emitStoreBool(dst, regT1);
 }
 
-void JIT::emit_op_throw(Instruction* currentInstruction)
+void JIT::emit_op_throw(const Instruction* currentInstruction)
 {
+    auto bytecode = currentInstruction->as<OpThrow>();
     ASSERT(regT0 == returnValueGPR);
     copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
-    emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+    emitLoad(bytecode.value.offset(), regT1, regT0);
     callOperationNoExceptionCheck(operationThrow, JSValueRegs(regT1, regT0));
     jumpToExceptionHandler(*vm());
 }
 
-void JIT::emit_op_to_number(Instruction* currentInstruction)
+void JIT::emit_op_to_number(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpToNumber>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoad(src, regT1, regT0);
 
@@ -753,15 +796,16 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
     addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
     isInt32.link(this);
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     if (src != dst)
         emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_to_string(Instruction* currentInstruction)
+void JIT::emit_op_to_string(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpToString>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoad(src, regT1, regT0);
 
@@ -772,23 +816,26 @@ void JIT::emit_op_to_string(Instruction* currentInstruction)
         emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_to_object(Instruction* currentInstruction)
+void JIT::emit_op_to_object(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int src = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpToObject>();
+    int dst = bytecode.dst.offset();
+    int src = bytecode.operand.offset();
 
     emitLoad(src, regT1, regT0);
 
     addSlowCase(branchIfNotCell(regT1));
     addSlowCase(branchIfNotObject(regT0));
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     if (src != dst)
         emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_catch(Instruction* currentInstruction)
+void JIT::emit_op_catch(const Instruction* currentInstruction)
 {
+    auto bytecode = currentInstruction->as<OpCatch>();
+
     restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
 
     move(TrustedImmPtr(m_vm), regT3);
@@ -811,13 +858,13 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
 
     store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset()));
 
-    unsigned exception = currentInstruction[1].u.operand;
+    unsigned exception = bytecode.exception.offset();
     emitStore(exception, regT1, regT2);
 
     load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
     load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
 
-    unsigned thrownValue = currentInstruction[2].u.operand;
+    unsigned thrownValue = bytecode.thrownValue.offset();
     emitStore(thrownValue, regT1, regT0);
 
 #if ENABLE(DFG_JIT)
@@ -825,7 +872,8 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
     // argument type proofs, storing locals to the buffer, etc
     // https://bugs.webkit.org/show_bug.cgi?id=175598
 
-    ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer);
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    ValueProfileAndOperandBuffer* buffer = metadata.buffer;
     if (buffer || !shouldEmitProfiling())
         callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset);
     else
@@ -844,24 +892,26 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
 #endif // ENABLE(DFG_JIT)
 }
 
-void JIT::emit_op_identity_with_profile(Instruction*)
+void JIT::emit_op_identity_with_profile(const Instruction*)
 {
     // We don't need to do anything here...
 }
 
-void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction)
 {
-    int currentScope = currentInstruction[2].u.operand;
+    auto bytecode = currentInstruction->as<OpGetParentScope>();
+    int currentScope = bytecode.scope.offset();
     emitLoadPayload(currentScope, regT0);
     loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
-    emitStoreCell(currentInstruction[1].u.operand, regT0);
+    emitStoreCell(bytecode.dst.offset(), regT0);
 }
 
-void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+void JIT::emit_op_switch_imm(const Instruction* currentInstruction)
 {
-    size_t tableIndex = currentInstruction[1].u.operand;
-    unsigned defaultOffset = currentInstruction[2].u.operand;
-    unsigned scrutinee = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpSwitchImm>();
+    size_t tableIndex = bytecode.tableIndex;
+    unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset);
+    unsigned scrutinee = bytecode.scrutinee.offset();
 
     // create jump table for switch destinations, track this switch statement.
     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
@@ -873,11 +923,12 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
     jump(returnValueGPR, NoPtrTag);
 }
 
-void JIT::emit_op_switch_char(Instruction* currentInstruction)
+void JIT::emit_op_switch_char(const Instruction* currentInstruction)
 {
-    size_t tableIndex = currentInstruction[1].u.operand;
-    unsigned defaultOffset = currentInstruction[2].u.operand;
-    unsigned scrutinee = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpSwitchChar>();
+    size_t tableIndex = bytecode.tableIndex;
+    unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset);
+    unsigned scrutinee = bytecode.scrutinee.offset();
 
     // create jump table for switch destinations, track this switch statement.
     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
@@ -889,11 +940,12 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
     jump(returnValueGPR, NoPtrTag);
 }
 
-void JIT::emit_op_switch_string(Instruction* currentInstruction)
+void JIT::emit_op_switch_string(const Instruction* currentInstruction)
 {
-    size_t tableIndex = currentInstruction[1].u.operand;
-    unsigned defaultOffset = currentInstruction[2].u.operand;
-    unsigned scrutinee = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpSwitchString>();
+    size_t tableIndex = bytecode.tableIndex;
+    unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset);
+    unsigned scrutinee = bytecode.scrutinee.offset();
 
     // create jump table for switch destinations, track this switch statement.
     StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
@@ -904,41 +956,45 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
     jump(returnValueGPR, NoPtrTag);
 }
 
-void JIT::emit_op_debug(Instruction* currentInstruction)
+void JIT::emit_op_debug(const Instruction* currentInstruction)
 {
+    auto bytecode = currentInstruction->as<OpDebug>();
     load32(codeBlock()->debuggerRequestsAddress(), regT0);
     Jump noDebuggerRequests = branchTest32(Zero, regT0);
-    callOperation(operationDebug, currentInstruction[1].u.operand);
+    callOperation(operationDebug, static_cast<int>(bytecode.debugHookType));
     noDebuggerRequests.link(this);
 }
 
 
-void JIT::emit_op_enter(Instruction* currentInstruction)
+void JIT::emit_op_enter(const Instruction* currentInstruction)
 {
     emitEnterOptimizationCheck();
     
     // Even though JIT code doesn't use them, we initialize our constant
     // registers to zap stale pointers, to avoid unnecessarily prolonging
     // object lifetime and increasing GC pressure.
-    for (int i = 0; i < m_codeBlock->numVars(); ++i)
+    for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i)
         emitStore(virtualRegisterForLocal(i).offset(), jsUndefined());
 
     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter);
     slowPathCall.call();
 }
 
-void JIT::emit_op_get_scope(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpGetScope>();
+    int dst = bytecode.dst.offset();
     emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
     loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
     emitStoreCell(dst, regT0);
 }
 
-void JIT::emit_op_create_this(Instruction* currentInstruction)
+void JIT::emit_op_create_this(const Instruction* currentInstruction)
 {
-    int callee = currentInstruction[2].u.operand;
-    WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
+    auto bytecode = currentInstruction->as<OpCreateThis>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int callee = bytecode.callee.offset();
+    WriteBarrierBase<JSCell>* cachedFunction = &metadata.cachedCallee;
     RegisterID calleeReg = regT0;
     RegisterID rareDataReg = regT4;
     RegisterID resultReg = regT0;
@@ -967,10 +1023,10 @@ void JIT::emit_op_create_this(Instruction* currentInstruction)
     load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg);
     emitInitializeInlineStorage(resultReg, scratchReg);
     addSlowCase(slowCases);
-    emitStoreCell(currentInstruction[1].u.operand, resultReg);
+    emitStoreCell(bytecode.dst.offset(), resultReg);
 }
 
-void JIT::emit_op_to_this(Instruction* currentInstruction)
+void JIT::emit_op_to_this(const Instruction* currentInstruction)
 {
     auto bytecode = currentInstruction->as<OpToThis>();
     auto& metadata = bytecode.metadata(m_codeBlock);
@@ -986,17 +1042,19 @@ void JIT::emit_op_to_this(Instruction* currentInstruction)
     addSlowCase(branchPtr(NotEqual, regT0, regT2));
 }
 
-void JIT::emit_op_check_tdz(Instruction* currentInstruction)
+void JIT::emit_op_check_tdz(const Instruction* currentInstruction)
 {
-    emitLoadTag(currentInstruction[1].u.operand, regT0);
+    auto bytecode = currentInstruction->as<OpCheckTdz>();
+    emitLoadTag(bytecode.target.offset(), regT0);
     addSlowCase(branchIfEmpty(regT0));
 }
 
-void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
+void JIT::emit_op_has_structure_property(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int enumerator = currentInstruction[4].u.operand;
+    auto bytecode = currentInstruction->as<OpHasStructureProperty>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int enumerator = bytecode.enumerator.offset();
 
     emitLoadPayload(base, regT0);
     emitJumpSlowCaseIfNotJSCell(base);
@@ -1012,7 +1070,7 @@ void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
 
 void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
 {
-    Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex];
+    const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
     
     PatchableJump badType;
     
@@ -1037,12 +1095,14 @@ void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPt
     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric));
 }
 
-void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
+void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
-    ArrayProfile* profile = arrayProfileFor<OpHasIndexedPropertyShape>(currentInstruction);
+    auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
+    ArrayProfile* profile = &metadata.arrayProfile;
     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
     
     emitLoadPayload(base, regT0);
@@ -1081,13 +1141,14 @@ void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
 }
 
-void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
 
     Label slowPath = label();
@@ -1101,12 +1162,13 @@ void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vect
     m_byValInstructionIndex++;
 }
 
-void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
+void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int index = currentInstruction[4].u.operand;
-    int enumerator = currentInstruction[5].u.operand;
+    auto bytecode = currentInstruction->as<OpGetDirectPname>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int index = bytecode.index.offset();
+    int enumerator = bytecode.enumerator.offset();
 
     // Check that base is a cell
     emitLoadPayload(base, regT0);
@@ -1137,15 +1199,16 @@ void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
     load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
     
     done.link(this);
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
+void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int enumerator = currentInstruction[2].u.operand;
-    int index = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
+    int dst = bytecode.dst.offset();
+    int enumerator = bytecode.enumerator.offset();
+    int index = bytecode.index.offset();
 
     emitLoadPayload(index, regT0);
     emitLoadPayload(enumerator, regT1);
@@ -1165,11 +1228,12 @@ void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
     emitStore(dst, regT2, regT0);
 }
 
-void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int enumerator = currentInstruction[2].u.operand;
-    int index = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
+    int dst = bytecode.dst.offset();
+    int enumerator = bytecode.enumerator.offset();
+    int index = bytecode.index.offset();
 
     emitLoadPayload(index, regT0);
     emitLoadPayload(enumerator, regT1);
@@ -1189,10 +1253,12 @@ void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
     emitStore(dst, regT2, regT0);
 }
 
-void JIT::emit_op_profile_type(Instruction* currentInstruction)
+void JIT::emit_op_profile_type(const Instruction* currentInstruction)
 {
-    TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
-    int valueToProfile = currentInstruction[1].u.operand;
+    auto bytecode = currentInstruction->as<OpProfileType>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    TypeLocation* cachedTypeLocation = metadata.typeLocation;
+    int valueToProfile = bytecode.target.offset();
 
     // Load payload in T0. Load tag in T3.
     emitLoadPayload(valueToProfile, regT0);
@@ -1254,33 +1320,34 @@ void JIT::emit_op_profile_type(Instruction* currentInstruction)
     jumpToEnd.link(this);
 }
 
-void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction)
+void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction)
 {
     updateTopCallFrame();
     static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+    auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
     ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
     scratch1Reg = regT4;
-    emitLoadPayload(currentInstruction[1].u.operand, regT3);
+    emitLoadPayload(bytecode.scope.offset(), regT3);
     logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
 }
 
-void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction)
+void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction)
 {
     updateTopCallFrame();
     static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+    auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
     ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
-
-    emitLoadPayload(currentInstruction[1].u.operand, regT2);
-    emitLoadTag(currentInstruction[1].u.operand, regT1);
+    emitLoadPayload(bytecode.thisValue.offset(), regT2);
+    emitLoadTag(bytecode.thisValue.offset(), regT1);
     JSValueRegs thisRegs(regT1, regT2);
-    emitLoadPayload(currentInstruction[2].u.operand, regT3);
+    emitLoadPayload(bytecode.scope.offset(), regT3);
     logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, m_codeBlock, CallSiteIndex(currentInstruction));
 }
 
index 9126f69..40ac4f3 100644 (file)
 
 namespace JSC {
     
-void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int options = currentInstruction[3].u.operand;
-    int getter = currentInstruction[4].u.operand;
+    auto bytecode = currentInstruction->as<OpPutGetterById>();
+    int base = bytecode.base.offset();
+    int property = bytecode.property;
+    int options = bytecode.attributes;
+    int getter = bytecode.accessor.offset();
 
     emitLoadPayload(base, regT1);
     emitLoadPayload(getter, regT3);
     callOperation(operationPutGetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
 }
 
-void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int options = currentInstruction[3].u.operand;
-    int setter = currentInstruction[4].u.operand;
+    auto bytecode = currentInstruction->as<OpPutSetterById>();
+    int base = bytecode.base.offset();
+    int property = bytecode.property;
+    int options = bytecode.attributes;
+    int setter = bytecode.accessor.offset();
 
     emitLoadPayload(base, regT1);
     emitLoadPayload(setter, regT3);
     callOperation(operationPutSetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
 }
 
-void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int attribute = currentInstruction[3].u.operand;
-    int getter = currentInstruction[4].u.operand;
-    int setter = currentInstruction[5].u.operand;
+    auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
+    int base = bytecode.base.offset();
+    int property = bytecode.property;
+    int attributes = bytecode.attributes;
+    int getter = bytecode.getter.offset();
+    int setter = bytecode.setter.offset();
 
     emitLoadPayload(base, regT1);
     emitLoadPayload(getter, regT3);
     emitLoadPayload(setter, regT4);
-    callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attribute, regT3, regT4);
+    callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attributes, regT3, regT4);
 }
 
-void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int32_t attributes = currentInstruction[3].u.operand;
-    int getter = currentInstruction[4].u.operand;
+    auto bytecode = currentInstruction->as<OpPutGetterByVal>();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
+    int32_t attributes = bytecode.attributes;
+    int getter = bytecode.accessor.offset();
 
     emitLoadPayload(base, regT2);
     emitLoad(property, regT1, regT0);
@@ -98,43 +102,48 @@ void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
     callOperation(operationPutGetterByVal, regT2, JSValueRegs(regT1, regT0), attributes, regT3);
 }
 
-void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int32_t attributes = currentInstruction[3].u.operand;
-    int getter = currentInstruction[4].u.operand;
+    auto bytecode = currentInstruction->as<OpPutSetterByVal>();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
+    int32_t attributes = bytecode.attributes;
+    int setter = bytecode.accessor.offset();
 
     emitLoadPayload(base, regT2);
     emitLoad(property, regT1, regT0);
-    emitLoadPayload(getter, regT3);
+    emitLoadPayload(setter, regT3);
     callOperation(operationPutSetterByVal, regT2, JSValueRegs(regT1, regT0), attributes, regT3);
 }
 
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+void JIT::emit_op_del_by_id(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpDelById>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property;
     emitLoad(base, regT1, regT0);
     callOperation(operationDeleteByIdJSResult, dst, JSValueRegs(regT1, regT0), m_codeBlock->identifier(property).impl());
 }
 
-void JIT::emit_op_del_by_val(Instruction* currentInstruction)
+void JIT::emit_op_del_by_val(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpDelByVal>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
     emitLoad2(base, regT1, regT0, property, regT3, regT2);
     callOperation(operationDeleteByValJSResult, dst, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
 }
 
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+void JIT::emit_op_get_by_val(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
-    ArrayProfile* profile = arrayProfileFor<OpGetByValShape>(currentInstruction);
+    auto bytecode = currentInstruction->as<OpGetByVal>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
+    ArrayProfile* profile = &metadata.arrayProfile;
     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
 
     emitLoad2(base, regT1, regT0, property, regT3, regT2);
@@ -177,7 +186,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
         resultOK.link(this);
     }
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 
     Label nextHotPath = label();
@@ -185,17 +194,18 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
 }
 
-JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
+JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
 {
-    int dst = currentInstruction[1].u.operand;
-
     // base: tag(regT1), payload(regT0)
     // property: tag(regT3), payload(regT2)
     // scratch: regT4
 
+    int dst = bytecode.dst.offset();
+
     slowCases.append(branchIfNotCell(regT3));
     emitByValIdentifierCheck(byValInfo, regT2, regT4, propertyName, slowCases);
 
+    const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
     JITGetByIdGenerator gen(
         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
         propertyName.impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get);
@@ -206,18 +216,19 @@ JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruct
     Label coldPathBegin = label();
     gen.slowPathJump().link(this);
 
-    Call call = callOperationWithProfile(operationGetByIdOptimize, dst, gen.stubInfo(), JSValueRegs(regT1, regT0), propertyName.impl());
+    Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), JSValueRegs(regT1, regT0), propertyName.impl());
     gen.reportSlowPathCall(coldPathBegin, call);
     slowDoneCase = jump();
 
     return gen;
 }
 
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int property = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpGetByVal>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
 
     linkSlowCaseIfNotJSCell(iter, base); // base cell check
@@ -229,7 +240,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
     emitNakedCall(CodeLocationLabel<NoPtrTag>(m_vm->getCTIStub(stringGetByValGenerator).retaggedCode<NoPtrTag>()));
     Jump failed = branchTestPtr(Zero, regT0);
     emitStoreCell(dst, regT0);
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+    emitJumpSlowToHot(jump(), currentInstruction->size());
     failed.link(this);
     notString.link(this);
     nonCell.link(this);
@@ -247,14 +258,22 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
     m_byValInstructionIndex++;
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
+}
+
+void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction)
+{
+    emit_op_put_by_val<OpPutByValDirect>(currentInstruction);
 }
 
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+template<typename Op>
+void JIT::emit_op_put_by_val(const Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction);
+    auto bytecode = currentInstruction->as<Op>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int base = bytecode.base.offset();
+    int property = bytecode.property.offset();
+    ArrayProfile* profile = &metadata.arrayProfile;
     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
     
     emitLoad2(base, regT1, regT0, property, regT3, regT2);
@@ -274,16 +293,16 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
     JITArrayMode mode = chooseArrayMode(profile);
     switch (mode) {
     case JITInt32:
-        slowCases = emitInt32PutByVal(currentInstruction, badType);
+        slowCases = emitInt32PutByVal(bytecode, badType);
         break;
     case JITDouble:
-        slowCases = emitDoublePutByVal(currentInstruction, badType);
+        slowCases = emitDoublePutByVal(bytecode, badType);
         break;
     case JITContiguous:
-        slowCases = emitContiguousPutByVal(currentInstruction, badType);
+        slowCases = emitContiguousPutByVal(bytecode, badType);
         break;
     case JITArrayStorage:
-        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+        slowCases = emitArrayStoragePutByVal(bytecode, badType);
         break;
     default:
         CRASH();
@@ -298,11 +317,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
 }
 
-JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
+template <typename Op>
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape)
 {
-    int base = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
-    ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction);
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int base = bytecode.base.offset();
+    int value = bytecode.value.offset();
+    ArrayProfile* profile = &metadata.arrayProfile;
 
     JumpList slowCases;
     
@@ -357,11 +378,13 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
     return slowCases;
 }
 
-JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+template <typename Op>
+JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType)
 {
-    int base = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
-    ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction);
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int base = bytecode.base.offset();
+    int value = bytecode.value.offset();
+    ArrayProfile* profile = &metadata.arrayProfile;
 
     JumpList slowCases;
     
@@ -394,13 +417,14 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
     return slowCases;
 }
 
-JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
+template <typename Op>
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
 {
     // base: tag(regT1), payload(regT0)
     // property: tag(regT3), payload(regT2)
 
-    int base = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
+    int base = bytecode.base.offset();
+    int value = bytecode.value.offset();
 
     slowCases.append(branchIfNotCell(regT3));
     emitByValIdentifierCheck(byValInfo, regT2, regT2, propertyName, slowCases);
@@ -411,6 +435,7 @@ JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruct
     emitLoadPayload(base, regT0);
     emitLoad(value, regT3, regT2);
 
+    const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
     JITPutByIdGenerator gen(
         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
         JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind);
@@ -430,18 +455,29 @@ JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruct
     return gen;
 }
 
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    int base = currentInstruction[1].u.operand;
-    int property = currentInstruction[2].u.operand;
-    int value = currentInstruction[3].u.operand;
+    bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct;
+    int base;
+    int property;
+    int value;
+
+    auto load = [&](auto bytecode) {
+        base = bytecode.base.offset();
+        property = bytecode.property.offset();
+        value = bytecode.value.offset();
+    };
+
+    if (isDirect)
+        load(currentInstruction->as<OpPutByValDirect>());
+    else
+        load(currentInstruction->as<OpPutByVal>());
+
     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
     
     linkAllSlowCases(iter);
     Label slowPath = label();
     
-    bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
-
     // The register selection below is chosen to reduce register swapping on ARM.
     // Swapping shouldn't happen on other platforms.
     emitLoad(base, regT2, regT1);
@@ -454,11 +490,12 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
     m_byValInstructionIndex++;
 }
 
-void JIT::emit_op_try_get_by_id(Instruction* currentInstruction)
+void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpTryGetById>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     emitLoad(base, regT1, regT0);
     emitJumpSlowCaseIfNotJSCell(base, regT1);
@@ -470,16 +507,17 @@ void JIT::emit_op_try_get_by_id(Instruction* currentInstruction)
     addSlowCase(gen.slowPathJump());
     m_getByIds.append(gen);
     
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int resultVReg = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpTryGetById>();
+    int resultVReg = bytecode.dst.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
 
@@ -491,11 +529,12 @@ void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<Slow
 }
 
 
-void JIT::emit_op_get_by_id_direct(Instruction* currentInstruction)
+void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpGetByIdDirect>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     emitLoad(base, regT1, regT0);
     emitJumpSlowCaseIfNotJSCell(base, regT1);
@@ -507,32 +546,34 @@ void JIT::emit_op_get_by_id_direct(Instruction* currentInstruction)
     addSlowCase(gen.slowPathJump());
     m_getByIds.append(gen);
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_get_by_id_direct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int resultVReg = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpGetByIdDirect>();
+    int resultVReg = bytecode.dst.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
 
     Label coldPathBegin = label();
 
-    Call call = callOperationWithProfile(operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl());
+    Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl());
 
     gen.reportSlowPathCall(coldPathBegin, call);
 }
 
 
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+void JIT::emit_op_get_by_id(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpGetById>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
     
     emitLoad(base, regT1, regT0);
     emitJumpSlowCaseIfNotJSCell(base, regT1);
@@ -547,32 +588,34 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
     addSlowCase(gen.slowPathJump());
     m_getByIds.append(gen);
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int resultVReg = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpGetById>();
+    int resultVReg = bytecode.dst.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
     
     Label coldPathBegin = label();
     
-    Call call = callOperationWithProfile(operationGetByIdOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl());
+    Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl());
     
     gen.reportSlowPathCall(coldPathBegin, call);
 }
 
-void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
+void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int thisVReg = currentInstruction[3].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+    auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    int thisVReg = bytecode.thisValue.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
     
     emitLoad(base, regT1, regT0);
     emitLoad(thisVReg, regT4, regT3);
@@ -586,35 +629,38 @@ void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
     addSlowCase(gen.slowPathJump());
     m_getByIdsWithThis.append(gen);
 
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_get_by_id_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int resultVReg = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+    auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
+    int resultVReg = bytecode.dst.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++];
     
     Label coldPathBegin = label();
     
-    Call call = callOperationWithProfile(operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), JSValueRegs(regT4, regT3), ident->impl());
+    Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), JSValueRegs(regT4, regT3), ident->impl());
     
     gen.reportSlowPathCall(coldPathBegin, call);
 }
 
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_by_id(const Instruction* currentInstruction)
 {
     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
     // such that the Structure & offset are always at the same distance from this.
     
-    int base = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
-    int direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+    auto bytecode = currentInstruction->as<OpPutById>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int base = bytecode.base.offset();
+    int value = bytecode.value.offset();
+    int direct = metadata.flags & PutByIdIsDirect;
     
     emitLoad2(base, regT1, regT0, value, regT3, regT2);
     
@@ -633,12 +679,13 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     m_putByIds.append(gen);
 }
 
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int base = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+    auto bytecode = currentInstruction->as<OpPutById>();
+    int base = bytecode.base.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     Label coldPathBegin(this);
 
@@ -653,11 +700,12 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
     gen.reportSlowPathCall(coldPathBegin, call);
 }
 
-void JIT::emit_op_in_by_id(Instruction* currentInstruction)
+void JIT::emit_op_in_by_id(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpInById>();
+    int dst = bytecode.dst.offset();
+    int base = bytecode.base.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     emitLoad(base, regT1, regT0);
     emitJumpSlowCaseIfNotJSCell(base, regT1);
@@ -672,12 +720,13 @@ void JIT::emit_op_in_by_id(Instruction* currentInstruction)
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_in_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int resultVReg = currentInstruction[1].u.operand;
-    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    auto bytecode = currentInstruction->as<OpInById>();
+    int resultVReg = bytecode.dst.offset();
+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.property));
 
     JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++];
 
@@ -705,12 +754,15 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
+void JIT::emit_op_resolve_scope(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int scope = currentInstruction[2].u.operand;
-    ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
-    unsigned depth = currentInstruction[5].u.operand;
+    auto bytecode = currentInstruction->as<OpResolveScope>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int dst = bytecode.dst.offset();
+    int scope = bytecode.scope.offset();
+    ResolveType resolveType = metadata.resolveType;
+    unsigned depth = metadata.localScopeDepth;
+
     auto emitCode = [&] (ResolveType resolveType) {
         switch (resolveType) {
         case GlobalProperty:
@@ -733,7 +785,7 @@ void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
             break;
         case ModuleVar:
             move(TrustedImm32(JSValue::CellTag), regT1);
-            move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
+            move(TrustedImmPtr(metadata.lexicalEnvironment.get()), regT0);
             emitStore(dst, regT1, regT0);
             break;
         case Dynamic:
@@ -809,13 +861,15 @@ void JIT::emitGetClosureVar(int scope, uintptr_t operand)
     load32(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0);
 }
 
-void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
+void JIT::emit_op_get_from_scope(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int scope = currentInstruction[2].u.operand;
-    ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
-    Structure** structureSlot = currentInstruction[5].u.structure.slot();
-    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+    auto bytecode = currentInstruction->as<OpGetFromScope>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int dst = bytecode.dst.offset();
+    int scope = bytecode.scope.offset();
+    ResolveType resolveType = metadata.getPutInfo.resolveType();
+    Structure** structureSlot = metadata.structure.slot();
+    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand);
     
     auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
         switch (resolveType) {
@@ -902,16 +956,17 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
         emitCode(resolveType, false);
         break;
     }
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    int dst = currentInstruction[1].u.operand;
-    callOperationWithProfile(operationGetFromScope, dst, currentInstruction);
+    auto bytecode = currentInstruction->as<OpGetFromScope>();
+    int dst = bytecode.dst.offset();
+    callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction);
 }
 
 void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
@@ -942,14 +997,16 @@ void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointS
     store32(regT2, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset));
 }
 
-void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
+void JIT::emit_op_put_to_scope(const Instruction* currentInstruction)
 {
-    int scope = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
-    GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+    auto bytecode = currentInstruction->as<OpPutToScope>();
+    auto& metadata = bytecode.metadata(m_codeBlock);
+    int scope = bytecode.scope.offset();
+    int value = bytecode.value.offset();
+    GetPutInfo getPutInfo = copiedGetPutInfo(bytecode);
     ResolveType resolveType = getPutInfo.resolveType();
-    Structure** structureSlot = currentInstruction[5].u.structure.slot();
-    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+    Structure** structureSlot = metadata.structure.slot();
+    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand);
     
     auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
         switch (resolveType) {
@@ -983,9 +1040,9 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
                 addSlowCase(branchIfEmpty(regT1));
             }
             if (indirectLoadForOperand)
-                emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
+                emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.watchpointSet);
             else
-                emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
+                emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.watchpointSet);
             break;
         }
         case LocalClosureVar:
@@ -993,7 +1050,7 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
         case ClosureVarWithVarInjectionChecks:
             emitWriteBarrier(scope, value, ShouldFilterValue);
             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
-            emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
+            emitPutClosureVar(scope, *operandSlot, value, metadata.watchpointSet);
             break;
         case ModuleVar:
         case Dynamic:
@@ -1041,12 +1098,12 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
     }
 }
 
-void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     linkAllSlowCases(iter);
 
-    GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
-    ResolveType resolveType = getPutInfo.resolveType();
+    auto bytecode = currentInstruction->as<OpPutToScope>();
+    ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType();
     if (resolveType == ModuleVar) {
         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
         slowPathCall.call();
@@ -1054,24 +1111,26 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC
         callOperation(operationPutToScope, currentInstruction);
 }
 
-void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
+void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction)
 {
-    int dst = currentInstruction[1].u.operand;
-    int arguments = currentInstruction[2].u.operand;
-    int index = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpGetFromArguments>();
+    int dst = bytecode.dst.offset();
+    int arguments = bytecode.arguments.offset();
+    int index = bytecode.index;
     
     emitLoadPayload(arguments, regT0);
     load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1);
     load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0);
-    emitValueProfilingSite();
+    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
     emitStore(dst, regT1, regT0);
 }
 
-void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
+void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction)
 {
-    int arguments = currentInstruction[1].u.operand;
-    int index = currentInstruction[2].u.operand;
-    int value = currentInstruction[3].u.operand;
+    auto bytecode = currentInstruction->as<OpPutToArguments>();
+    int arguments = bytecode.arguments.offset();
+    int index = bytecode.index;
+    int value = bytecode.value.offset();
     
     emitWriteBarrier(arguments, value, ShouldFilterValue);
     
index a3ee747..5bad3ef 100644 (file)
@@ -201,6 +201,8 @@ RegisterSet RegisterSet::vmCalleeSaveRegisters()
     result.set(FPRInfo::fpRegCS5);
     result.set(FPRInfo::fpRegCS6);
     result.set(FPRInfo::fpRegCS7);
+#elif CPU(ARM_THUMB2)
+    result.set(GPRInfo::regCS0);
 #endif
     return result;
 }
index 359d5ef..fa9a7ed 100644 (file)
@@ -134,7 +134,7 @@ void Data::performAssertions(VM& vm)
 #if ENABLE(C_LOOP)
     ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 1);
 #elif USE(JSVALUE32_64)
-    ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
+    ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 1);
 #elif (CPU(X86_64) && !OS(WINDOWS))  || CPU(ARM64)
     ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 4);
 #elif (CPU(X86_64) && OS(WINDOWS))
index 2f9ae88..65d5e8d 100644 (file)
@@ -558,10 +558,10 @@ end
 #         end
 #     )
 #
-if X86_64 or ARM64 or ARM64E
+if X86_64 or ARM64 or ARM64E or ARMv7
     macro probe(action)
         # save all the registers that the LLInt may use.
-        if ARM64 or ARM64E
+        if ARM64 or ARM64E or ARMv7
             push cfr, lr
         end
         push a0, a1
@@ -575,6 +575,8 @@ if X86_64 or ARM64 or ARM64E
             push csr4, csr5
             push csr6, csr7
             push csr8, csr9
+        elsif ARMv7
+            push csr0
         end
 
         action()
@@ -586,13 +588,15 @@ if X86_64 or ARM64 or ARM64E
             pop csr5, csr4
             pop csr3, csr2
             pop csr1, csr0
+        elsif ARMv7
+            pop csr0
         end
         pop t5, t4
         pop t3, t2
         pop t1, t0
         pop a3, a2
         pop a1, a0
-        if ARM64 or ARM64E
+        if ARM64 or ARM64E or ARMv7
             pop lr, cfr
         end
     end
@@ -759,7 +763,7 @@ macro restoreCalleeSavesUsedByLLInt()
 end
 
 macro copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm, temp)
-    if ARM64 or ARM64E or X86_64 or X86_64_WIN
+    if ARM64 or ARM64E or X86_64 or X86_64_WIN or ARMv7
         loadp VM::topEntryFrame[vm], temp
         vmEntryRecord(temp, temp)
         leap VMEntryRecord::calleeSaveRegistersBuffer[temp], temp
@@ -796,12 +800,14 @@ macro copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm, temp)
             storeq csr4, 32[temp]
             storeq csr5, 40[temp]
             storeq csr6, 48[temp]
+        elsif ARMv7
+            storep csr0, [temp]
         end
     end
 end
 
 macro restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(vm, temp)
-    if ARM64 or ARM64E or X86_64 or X86_64_WIN
+    if ARM64 or ARM64E or X86_64 or X86_64_WIN or ARMv7
         loadp VM::topEntryFrame[vm], temp
         vmEntryRecord(temp, temp)
         leap VMEntryRecord::calleeSaveRegistersBuffer[temp], temp
@@ -838,6 +844,8 @@ macro restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(vm, temp)
             loadq 32[temp], csr4
             loadq 40[temp], csr5
             loadq 48[temp], csr6
+        elsif ARMv7
+            loadp [temp], csr0
         end
     end
 end
index e2a7393..551fe34 100644 (file)
@@ -440,7 +440,8 @@ static ALWAYS_INLINE unsigned tryGetBytecodeIndex(unsigned llintPC, CodeBlock* c
     return 0;
 #else
     Instruction* instruction = bitwise_cast<Instruction*>(llintPC);
-    if (instruction >= codeBlock->instructions().begin() && instruction < codeBlock->instructions().end()) {
+
+    if (codeBlock->instructions().contains(instruction)) {
         isValid = true;
         return codeBlock->bytecodeOffset(instruction);
     }
index d11d9de..25a0ac2 100644 (file)
@@ -1,3 +1,15 @@
+2018-11-21  Dominik Infuehr  <dinfuehr@igalia.com>
+
+        Enable JIT on ARM/Linux
+        https://bugs.webkit.org/show_bug.cgi?id=191548
+
+        Reviewed by Yusuke Suzuki.
+
+        Enable JIT by default on ARMv7/Linux after it was disabled with
+        recent bytcode format change.
+
+        * wtf/Platform.h:
+
 2018-11-14  Keith Rollin  <krollin@apple.com>
 
         Fix #end vs. #endif typo.
index e2eb75b..b3f124d 100644 (file)
 #define ENABLE_JIT 1
 #endif
 
-/* Disable JIT for 32-bit builds. */
 #if USE(JSVALUE32_64)
+#if CPU(ARM_THUMB2) && OS(LINUX)
+/* On ARMv7/Linux the JIT is enabled unless explicitly disabled. */
+#if !defined(ENABLE_JIT)
+#define ENABLE_JIT 1
+#endif
+/* But still disable DFG for now. */
+#undef ENABLE_DFG_JIT
+#define ENABLE_DFG_JIT 0
+#else
+/* Disable JIT and force C_LOOP on all 32bit-architectures but ARMv7-Thumb2/Linux. */
 #undef ENABLE_JIT
 #define ENABLE_JIT 0
-/* Force C_LOOP on all architectures but ARMv7-Thumb2/Linux. */
-#if !(CPU(ARM_THUMB2) && OS(LINUX))
 #undef ENABLE_C_LOOP
 #define ENABLE_C_LOOP 1
 #endif
index 37adfe8..9fef771 100644 (file)
@@ -1,3 +1,13 @@
+2018-11-21  Dominik Infuehr  <dinfuehr@igalia.com>
+
+        Enable JIT on ARM/Linux
+        https://bugs.webkit.org/show_bug.cgi?id=191548
+
+        Reviewed by Yusuke Suzuki.
+
+        * bmalloc/IsoPageInlines.h:
+        (bmalloc::IsoPage<Config>::startAllocating):
+
 2018-11-01  Jiewen Tan  <jiewen_tan@apple.com>
 
         Replace CommonRandom SPI with API
index 0c47864..12b3183 100644 (file)
@@ -188,7 +188,7 @@ FreeList IsoPage<Config>::startAllocating()
         char* cellByte = reinterpret_cast<char*>(this) + index * Config::objectSize;
         if (verbose)
             fprintf(stderr, "%p: putting %p on free list.\n", this, cellByte);
-        FreeCell* cell = reinterpret_cast<FreeCell*>(cellByte);
+        FreeCell* cell = bitwise_cast<FreeCell*>(cellByte);
         cell->setNext(head, secret);
         head = cell;
         bytes += Config::objectSize;
index 2d55f95..04bcfe4 100644 (file)
@@ -68,12 +68,12 @@ macro(WEBKIT_OPTION_BEGIN)
         set(USE_SYSTEM_MALLOC_DEFAULT OFF)
         set(ENABLE_C_LOOP_DEFAULT OFF)
         set(ENABLE_SAMPLING_PROFILER_DEFAULT ON)
-    elseif (WTF_CPU_ARM AND WTF_OS_UNIX)
-        set(ENABLE_JIT_DEFAULT OFF)
+    elseif (WTF_CPU_ARM AND WTF_OS_LINUX)
+        set(ENABLE_JIT_DEFAULT ON)
         set(ENABLE_FTL_DEFAULT OFF)
         set(USE_SYSTEM_MALLOC_DEFAULT OFF)
         set(ENABLE_C_LOOP_DEFAULT OFF)
-        set(ENABLE_SAMPLING_PROFILER_DEFAULT OFF)
+        set(ENABLE_SAMPLING_PROFILER_DEFAULT ON)
     else ()
         set(ENABLE_JIT_DEFAULT OFF)
         set(ENABLE_FTL_DEFAULT OFF)