BytecodeIndex should be a proper C++ class
authorkeith_miller@apple.com <keith_miller@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 23 Oct 2019 00:55:38 +0000 (00:55 +0000)
committerkeith_miller@apple.com <keith_miller@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 23 Oct 2019 00:55:38 +0000 (00:55 +0000)
https://bugs.webkit.org/show_bug.cgi?id=203276

Reviewed by Mark Lam.

This patch makes a change to how we refer to the bytecode index in
a bytecode stream. Previously we just used an unsigned number to
represent the index, this patch changes most of the code to use a
BytecodeIndex class instead. The only places where this patch does
not change this is for jump and switch targets / deltas.

Additionally, this patch attempts to canonicalize the terminology
around how we refer to bytecode indices. Now we use the word index
to refer to the bytecode index class and offset to refer to the
unsigned byte offset into the instruction stream.

* JavaScriptCore.xcodeproj/project.pbxproj:
* Sources.txt:
* bytecode/ByValInfo.h:
(JSC::ByValInfo::ByValInfo):
(JSC::getByValInfoBytecodeIndex):
* bytecode/BytecodeBasicBlock.cpp:
(JSC::BytecodeBasicBlock::computeImpl):
* bytecode/BytecodeGeneratorification.cpp:
(JSC::GeneratorLivenessAnalysis::run):
* bytecode/BytecodeIndex.cpp: Added.
(JSC::BytecodeIndex::dump const):
* bytecode/BytecodeIndex.h: Added.
(JSC::BytecodeIndex::BytecodeIndex):
(JSC::BytecodeIndex::offset const):
(JSC::BytecodeIndex::asBits const):
(JSC::BytecodeIndex::hash const):
(JSC::BytecodeIndex::deletedValue):
(JSC::BytecodeIndex::isHashTableDeletedValue const):
(JSC::BytecodeIndex::operator bool const):
(JSC::BytecodeIndex::operator == const):
(JSC::BytecodeIndex::operator != const):
(JSC::BytecodeIndex::operator < const):
(JSC::BytecodeIndex::operator > const):
(JSC::BytecodeIndex::operator <= const):
(JSC::BytecodeIndex::operator >= const):
(JSC::BytecodeIndex::fromBits):
(JSC::BytecodeIndexHash::hash):
(JSC::BytecodeIndexHash::equal):
* bytecode/BytecodeLivenessAnalysis.cpp:
(JSC::BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeIndex):
(JSC::BytecodeLivenessAnalysis::computeFullLiveness):
(JSC::BytecodeLivenessAnalysis::computeKills):
(JSC::BytecodeLivenessAnalysis::dumpResults):
(JSC::BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset): Deleted.
* bytecode/BytecodeLivenessAnalysis.h:
* bytecode/BytecodeLivenessAnalysisInlines.h:
(JSC::BytecodeLivenessPropagation::stepOverInstruction):
(JSC::BytecodeLivenessPropagation::computeLocalLivenessForBytecodeIndex):
(JSC::BytecodeLivenessPropagation::computeLocalLivenessForBlock):
(JSC::BytecodeLivenessPropagation::getLivenessInfoAtBytecodeIndex):
(JSC::BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset): Deleted.
(JSC::BytecodeLivenessPropagation::getLivenessInfoAtBytecodeOffset): Deleted.
* bytecode/BytecodeUseDef.h:
(JSC::computeUsesForBytecodeIndex):
(JSC::computeDefsForBytecodeIndex):
(JSC::computeUsesForBytecodeOffset): Deleted.
(JSC::computeDefsForBytecodeOffset): Deleted.
* bytecode/CallLinkStatus.cpp:
(JSC::CallLinkStatus::computeFromLLInt):
(JSC::CallLinkStatus::computeFor):
(JSC::CallLinkStatus::computeExitSiteData):
* bytecode/CallLinkStatus.h:
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::getCallLinkInfoForBytecodeIndex):
(JSC::CodeBlock::addRareCaseProfile):
(JSC::CodeBlock::rareCaseProfileForBytecodeIndex):
(JSC::CodeBlock::rareCaseProfileCountForBytecodeIndex):
(JSC::CodeBlock::handlerForBytecodeIndex):
(JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndex):
(JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndexSlow):
(JSC::CodeBlock::lineNumberForBytecodeIndex):
(JSC::CodeBlock::columnNumberForBytecodeIndex):
(JSC::CodeBlock::expressionRangeForBytecodeIndex const):
(JSC::CodeBlock::hasOpDebugForLineAndColumn):
(JSC::CodeBlock::getArrayProfile):
(JSC::CodeBlock::tryGetValueProfileForBytecodeIndex):
(JSC::CodeBlock::valueProfilePredictionForBytecodeIndex):
(JSC::CodeBlock::valueProfileForBytecodeIndex):
(JSC::CodeBlock::validate):
(JSC::CodeBlock::arithProfileForBytecodeIndex):
(JSC::CodeBlock::couldTakeSpecialArithFastCase):
(JSC::CodeBlock::bytecodeIndexFromCallSiteIndex):
(JSC::CodeBlock::rareCaseProfileForBytecodeOffset): Deleted.
(JSC::CodeBlock::rareCaseProfileCountForBytecodeOffset): Deleted.
(JSC::CodeBlock::handlerForBytecodeOffset): Deleted.
(JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset): Deleted.
(JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow): Deleted.
(JSC::CodeBlock::lineNumberForBytecodeOffset): Deleted.
(JSC::CodeBlock::columnNumberForBytecodeOffset): Deleted.
(JSC::CodeBlock::expressionRangeForBytecodeOffset const): Deleted.
(JSC::CodeBlock::tryGetValueProfileForBytecodeOffset): Deleted.
(JSC::CodeBlock::valueProfilePredictionForBytecodeOffset): Deleted.
(JSC::CodeBlock::valueProfileForBytecodeOffset): Deleted.
(JSC::CodeBlock::arithProfileForBytecodeOffset): Deleted.
(JSC::CodeBlock::couldTakeSpecialFastCase): Deleted.
(JSC::CodeBlock::bytecodeOffsetFromCallSiteIndex): Deleted.
* bytecode/CodeBlock.h:
(JSC::CodeBlock::likelyToTakeSlowCase):
(JSC::CodeBlock::couldTakeSlowCase):
(JSC::CodeBlock::bytecodeIndex):
* bytecode/CodeOrigin.cpp:
(JSC::CodeOrigin::approximateHash const):
(JSC::CodeOrigin::dump const):
* bytecode/CodeOrigin.h:
(JSC::CodeOrigin::CodeOrigin):
(JSC::CodeOrigin::isSet const):
(JSC::CodeOrigin::isHashTableDeletedValue const):
(JSC::CodeOrigin::bytecodeIndex const):
(JSC::CodeOrigin::OutOfLineCodeOrigin::OutOfLineCodeOrigin):
(JSC::CodeOrigin::buildCompositeValue):
(JSC::CodeOrigin::hash const):
* bytecode/DFGExitProfile.cpp:
(JSC::DFG::FrequentExitSite::dump const):
(JSC::DFG::ExitProfile::exitSitesFor):
* bytecode/DFGExitProfile.h:
(JSC::DFG::FrequentExitSite::FrequentExitSite):
(JSC::DFG::FrequentExitSite::operator== const):
(JSC::DFG::FrequentExitSite::subsumes const):
(JSC::DFG::FrequentExitSite::hash const):
(JSC::DFG::FrequentExitSite::bytecodeIndex const):
(JSC::DFG::FrequentExitSite::isHashTableDeletedValue const):
(JSC::DFG::QueryableExitProfile::hasExitSite const):
(JSC::DFG::FrequentExitSite::bytecodeOffset const): Deleted.
* bytecode/DeferredSourceDump.cpp:
(JSC::DeferredSourceDump::DeferredSourceDump):
(JSC::DeferredSourceDump::dump):
* bytecode/DeferredSourceDump.h:
(): Deleted.
* bytecode/FullBytecodeLiveness.h:
(JSC::FullBytecodeLiveness::getLiveness const):
(JSC::FullBytecodeLiveness::operandIsLive const):
* bytecode/GetByIdStatus.cpp:
(JSC::GetByIdStatus::computeFromLLInt):
(JSC::GetByIdStatus::computeFor):
(JSC::GetByIdStatus::computeForStubInfo):
* bytecode/GetByIdStatus.h:
* bytecode/ICStatusUtils.cpp:
(JSC::hasBadCacheExitSite):
* bytecode/ICStatusUtils.h:
* bytecode/InByIdStatus.cpp:
(JSC::InByIdStatus::computeFor):
* bytecode/InByIdStatus.h:
* bytecode/InlineCallFrame.cpp:
(JSC::InlineCallFrame::dumpInContext const):
* bytecode/InstanceOfStatus.cpp:
(JSC::InstanceOfStatus::computeFor):
* bytecode/InstanceOfStatus.h:
* bytecode/InstructionStream.h:
(JSC::InstructionStream::BaseRef::offset const):
(JSC::InstructionStream::BaseRef::index const):
(JSC::InstructionStream::at const):
* bytecode/LazyOperandValueProfile.h:
(JSC::LazyOperandValueProfileKey::LazyOperandValueProfileKey):
(JSC::LazyOperandValueProfileKey::operator== const):
(JSC::LazyOperandValueProfileKey::hash const):
(JSC::LazyOperandValueProfileKey::bytecodeIndex const):
(JSC::LazyOperandValueProfileKey::isHashTableDeletedValue const):
(JSC::LazyOperandValueProfileKey::bytecodeOffset const): Deleted.
* bytecode/MethodOfGettingAValueProfile.cpp:
(JSC::MethodOfGettingAValueProfile::fromLazyOperand):
* bytecode/MethodOfGettingAValueProfile.h:
* bytecode/PutByIdStatus.cpp:
(JSC::PutByIdStatus::computeFromLLInt):
(JSC::PutByIdStatus::computeFor):
* bytecode/PutByIdStatus.h:
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::StructureStubInfo):
* bytecode/UnlinkedCodeBlock.cpp:
(JSC::UnlinkedCodeBlock::lineNumberForBytecodeIndex):
(JSC::UnlinkedCodeBlock::expressionRangeForBytecodeIndex const):
(JSC::UnlinkedCodeBlock::handlerForBytecodeIndex):
(JSC::UnlinkedCodeBlock::lineNumberForBytecodeOffset): Deleted.
(JSC::UnlinkedCodeBlock::expressionRangeForBytecodeOffset const): Deleted.
(JSC::UnlinkedCodeBlock::handlerForBytecodeOffset): Deleted.
* bytecode/UnlinkedCodeBlock.h:
* bytecode/ValueProfile.h:
(JSC::RareCaseProfile::RareCaseProfile):
(JSC::getRareCaseProfileBytecodeIndex):
(JSC::getRareCaseProfileBytecodeOffset): Deleted.
* bytecompiler/BytecodeGenerator.cpp:
(JSC::ForInContext::finalize):
* debugger/DebuggerCallFrame.cpp:
(JSC::DebuggerCallFrame::currentPosition):
* dfg/DFGBasicBlock.cpp:
(JSC::DFG::BasicBlock::BasicBlock):
* dfg/DFGBasicBlock.h:
(JSC::DFG::getBytecodeBeginForBlock):
(JSC::DFG::blockForBytecodeIndex):
(JSC::DFG::blockForBytecodeOffset): Deleted.
* dfg/DFGBlockInsertionSet.cpp:
(JSC::DFG::BlockInsertionSet::insert):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::flushForTerminalImpl):
(JSC::DFG::ByteCodeParser::flushIfTerminal):
(JSC::DFG::ByteCodeParser::branchData):
(JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
(JSC::DFG::ByteCodeParser::getPrediction):
(JSC::DFG::ByteCodeParser::getArrayMode):
(JSC::DFG::ByteCodeParser::makeSafe):
(JSC::DFG::ByteCodeParser::makeDivSafe):
(JSC::DFG::ByteCodeParser::allocateTargetableBlock):
(JSC::DFG::ByteCodeParser::allocateUntargetableBlock):
(JSC::DFG::ByteCodeParser::makeBlockTargetable):
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleRecursiveTailCall):
(JSC::DFG::ByteCodeParser::inlineCall):
(JSC::DFG::ByteCodeParser::handleCallVariant):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::linkBlock):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
* dfg/DFGCommonData.cpp:
(JSC::DFG::CommonData::addCodeOrigin):
(JSC::DFG::CommonData::addUniqueCallSiteIndex):
(JSC::DFG::CommonData::lastCallSite const):
* dfg/DFGCommonData.h:
(JSC::DFG::CommonData::catchOSREntryDataForBytecodeIndex):
(JSC::DFG::CommonData::appendCatchEntrypoint):
* dfg/DFGDriver.cpp:
(JSC::DFG::compileImpl):
(JSC::DFG::compile):
* dfg/DFGDriver.h:
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::dump):
(JSC::DFG::Graph::methodOfGettingAValueProfileFor):
(JSC::DFG::Graph::willCatchExceptionInMachineFrame):
* dfg/DFGGraph.h:
* dfg/DFGJITCode.cpp:
(JSC::DFG::JITCode::clearOSREntryBlockAndResetThresholds):
* dfg/DFGJITCode.h:
(JSC::DFG::JITCode::appendOSREntryData):
(JSC::DFG::JITCode::osrEntryDataForBytecodeIndex):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::JITCompiler):
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::setStartOfCode):
* dfg/DFGLiveCatchVariablePreservationPhase.cpp:
(JSC::DFG::LiveCatchVariablePreservationPhase::handleBlockForTryCatch):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::OSREntryData::dumpInContext const):
(JSC::DFG::prepareOSREntry):
(JSC::DFG::prepareCatchOSREntry):
* dfg/DFGOSREntry.h:
(JSC::DFG::getOSREntryDataBytecodeIndex):
(JSC::DFG::prepareOSREntry):
* dfg/DFGOSREntrypointCreationPhase.cpp:
(JSC::DFG::OSREntrypointCreationPhase::run):
* dfg/DFGOSRExit.cpp:
(JSC::DFG::OSRExit::executeOSRExit):
(JSC::DFG::reifyInlinedCallFrames):
(JSC::DFG::adjustAndJumpToTarget):
(JSC::DFG::printOSRExit):
(JSC::DFG::OSRExit::compileExit):
(JSC::DFG::OSRExit::debugOperationPrintSpeculationFailure):
* dfg/DFGOSRExit.h:
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::callerReturnPC):
(JSC::DFG::reifyInlinedCallFrames):
(JSC::DFG::adjustAndJumpToTarget):
* dfg/DFGOSRExitCompilerCommon.h:
* dfg/DFGOperations.cpp:
* dfg/DFGOperations.h:
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::Plan):
(JSC::DFG::Plan::compileInThreadImpl):
(JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
* dfg/DFGPlan.h:
(JSC::DFG::Plan::osrEntryBytecodeIndex const):
(JSC::DFG::Plan::tierUpInLoopHierarchy):
(JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
* dfg/DFGSSAConversionPhase.cpp:
(JSC::DFG::SSAConversionPhase::run):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::checkArgumentTypes):
(JSC::DFG::SpeculativeJIT::compileValueAdd):
(JSC::DFG::SpeculativeJIT::compileValueSub):
(JSC::DFG::SpeculativeJIT::compileValueNegate):
(JSC::DFG::SpeculativeJIT::compileValueMul):
(JSC::DFG::SpeculativeJIT::emitSwitchCharStringJump):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGTierUpCheckInjectionPhase.cpp:
(JSC::DFG::TierUpCheckInjectionPhase::run):
(JSC::DFG::TierUpCheckInjectionPhase::buildNaturalLoopToLoopHintMap):
* dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
(JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
* dfg/DFGValidate.cpp:
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLForOSREntryJITCode.h:
(JSC::FTL::ForOSREntryJITCode::setBytecodeIndex):
(JSC::FTL::ForOSREntryJITCode::bytecodeIndex const):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::lower):
(JSC::FTL::DFG::LowerDFGToB3::compileValueAdd):
(JSC::FTL::DFG::LowerDFGToB3::compileValueSub):
(JSC::FTL::DFG::LowerDFGToB3::compileValueMul):
(JSC::FTL::DFG::LowerDFGToB3::compileArithAddOrSub):
(JSC::FTL::DFG::LowerDFGToB3::compileValueNegate):
* ftl/FTLOSREntry.cpp:
(JSC::FTL::prepareOSREntry):
* ftl/FTLOSREntry.h:
* interpreter/CallFrame.cpp:
(JSC::CallFrame::callSiteIndex const):
(JSC::CallFrame::unsafeCallSiteIndex const):
(JSC::CallFrame::setCurrentVPC):
(JSC::CallFrame::bytecodeIndex):
(JSC::CallFrame::codeOrigin):
(JSC::CallFrame::dump):
(JSC::CallFrame::bytecodeOffset): Deleted.
* interpreter/CallFrame.h:
(JSC::CallSiteIndex::CallSiteIndex):
(JSC::CallSiteIndex::operator bool const):
(JSC::CallSiteIndex::operator== const):
(JSC::CallSiteIndex::bits const):
(JSC::CallSiteIndex::bytecodeIndex const):
(JSC::DisposableCallSiteIndex::DisposableCallSiteIndex):
(): Deleted.
* interpreter/Interpreter.cpp:
(JSC::GetStackTraceFunctor::operator() const):
(JSC::findExceptionHandler):
* interpreter/ShadowChicken.cpp:
(JSC::ShadowChicken::update):
* interpreter/StackVisitor.cpp:
(JSC::StackVisitor::readNonInlinedFrame):
(JSC::StackVisitor::readInlinedFrame):
(JSC::StackVisitor::Frame::retrieveExpressionInfo const):
(JSC::StackVisitor::Frame::dump const):
* interpreter/StackVisitor.h:
(JSC::StackVisitor::Frame::bytecodeIndex const):
(JSC::StackVisitor::Frame::bytecodeOffset const): Deleted.
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::emitEnterOptimizationCheck):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::compileWithoutLinking):
(JSC::JIT::link):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JIT.h:
(JSC::CallRecord::CallRecord):
(JSC::SlowCaseEntry::SlowCaseEntry):
(JSC::SwitchRecord::SwitchRecord):
(JSC::ByValCompilationInfo::ByValCompilationInfo):
* jit/JITCall.cpp:
(JSC::JIT::compileCallEvalSlowCase):
(JSC::JIT::compileOpCall):
* jit/JITCodeMap.h:
(JSC::JITCodeMap::Entry::Entry):
(JSC::JITCodeMap::Entry::bytecodeIndex const):
(JSC::JITCodeMap::append):
(JSC::JITCodeMap::find const):
* jit/JITDisassembler.cpp:
(JSC::JITDisassembler::dumpVectorForInstructions):
(JSC::JITDisassembler::reportInstructions):
* jit/JITDisassembler.h:
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::emitNakedTailCall):
(JSC::JIT::updateTopCallFrame):
(JSC::JIT::linkAllSlowCasesForBytecodeIndex):
(JSC::JIT::addSlowCase):
(JSC::JIT::addJump):
(JSC::JIT::emitJumpSlowToHot):
(JSC::JIT::emitGetVirtualRegister):
(JSC::JIT::linkAllSlowCasesForBytecodeOffset): Deleted.
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_instanceof):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::emit_op_switch_char):
(JSC::JIT::emit_op_switch_string):
(JSC::JIT::emitSlow_op_loop_hint):
(JSC::JIT::emit_op_has_indexed_property):
(JSC::JIT::emit_op_log_shadow_chicken_tail):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_instanceof):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::emit_op_switch_char):
(JSC::JIT::emit_op_switch_string):
(JSC::JIT::emit_op_has_indexed_property):
* jit/JITOperations.cpp:
(JSC::getByVal):
(JSC::tryGetByValOptimize):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitGetByValWithCachedId):
(JSC::JIT::emit_op_put_by_val):
(JSC::JIT::emitPutByValWithCachedId):
(JSC::JIT::emit_op_try_get_by_id):
(JSC::JIT::emit_op_get_by_id_direct):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_by_id_with_this):
(JSC::JIT::emit_op_put_by_id):
(JSC::JIT::emit_op_in_by_id):
* jit/JITWorklist.cpp:
(JSC::JITWorklist::Plan::Plan):
(JSC::JITWorklist::Plan::compileNow):
(JSC::JITWorklist::compileLater):
(JSC::JITWorklist::compileNow):
* jit/JITWorklist.h:
* jit/PCToCodeOriginMap.cpp:
(JSC::PCToCodeOriginMap::PCToCodeOriginMap):
(JSC::PCToCodeOriginMap::findPC const):
* jit/PCToCodeOriginMap.h:
(JSC::PCToCodeOriginMapBuilder::defaultCodeOrigin):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::jitCompileAndSetHeuristics):
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* profiler/ProfilerOrigin.cpp:
(JSC::Profiler::Origin::Origin):
(JSC::Profiler::Origin::dump const):
(JSC::Profiler::Origin::toJS const):
* profiler/ProfilerOrigin.h:
(JSC::Profiler::Origin::Origin):
(JSC::Profiler::Origin::operator! const):
(JSC::Profiler::Origin::bytecodeIndex const):
(JSC::Profiler::Origin::hash const):
(JSC::Profiler::Origin::isHashTableDeletedValue const):
* runtime/Error.cpp:
(JSC::getBytecodeIndex):
(JSC::getBytecodeOffset): Deleted.
* runtime/Error.h:
* runtime/ErrorInstance.cpp:
(JSC::appendSourceToError):
(JSC::ErrorInstance::finishCreation):
* runtime/SamplingProfiler.cpp:
(JSC::tryGetBytecodeIndex):
(JSC::SamplingProfiler::processUnverifiedStackTraces):
(JSC::SamplingProfiler::reportTopBytecodes):
* runtime/SamplingProfiler.h:
(JSC::SamplingProfiler::StackFrame::CodeLocation::hasBytecodeIndex const):
* runtime/StackFrame.cpp:
(JSC::StackFrame::StackFrame):
(JSC::StackFrame::computeLineAndColumn const):
* runtime/StackFrame.h:
(JSC::StackFrame::hasBytecodeIndex const):
(JSC::StackFrame::bytecodeIndex):
(JSC::StackFrame::hasBytecodeOffset const): Deleted.
(JSC::StackFrame::bytecodeOffset): Deleted.
* tools/VMInspector.cpp:
(JSC::VMInspector::dumpRegisters):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@251468 268f45cc-cd09-0410-ab3c-d52691b4dbfc

115 files changed:
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/Sources.txt
Source/JavaScriptCore/bytecode/ByValInfo.h
Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
Source/JavaScriptCore/bytecode/BytecodeIndex.cpp [new file with mode: 0644]
Source/JavaScriptCore/bytecode/BytecodeIndex.h [new file with mode: 0644]
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
Source/JavaScriptCore/bytecode/BytecodeUseDef.h
Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
Source/JavaScriptCore/bytecode/CallLinkStatus.h
Source/JavaScriptCore/bytecode/CodeBlock.cpp
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/CodeOrigin.cpp
Source/JavaScriptCore/bytecode/CodeOrigin.h
Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
Source/JavaScriptCore/bytecode/DFGExitProfile.h
Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
Source/JavaScriptCore/bytecode/DeferredSourceDump.h
Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
Source/JavaScriptCore/bytecode/GetByIdStatus.h
Source/JavaScriptCore/bytecode/ICStatusUtils.cpp
Source/JavaScriptCore/bytecode/ICStatusUtils.h
Source/JavaScriptCore/bytecode/InByIdStatus.cpp
Source/JavaScriptCore/bytecode/InByIdStatus.h
Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
Source/JavaScriptCore/bytecode/InstanceOfStatus.cpp
Source/JavaScriptCore/bytecode/InstanceOfStatus.h
Source/JavaScriptCore/bytecode/InstructionStream.h
Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
Source/JavaScriptCore/bytecode/PutByIdStatus.h
Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
Source/JavaScriptCore/bytecode/ValueProfile.h
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
Source/JavaScriptCore/debugger/DebuggerCallFrame.cpp
Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
Source/JavaScriptCore/dfg/DFGBasicBlock.h
Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGCommonData.cpp
Source/JavaScriptCore/dfg/DFGCommonData.h
Source/JavaScriptCore/dfg/DFGDriver.cpp
Source/JavaScriptCore/dfg/DFGDriver.h
Source/JavaScriptCore/dfg/DFGGraph.cpp
Source/JavaScriptCore/dfg/DFGGraph.h
Source/JavaScriptCore/dfg/DFGJITCode.cpp
Source/JavaScriptCore/dfg/DFGJITCode.h
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.h
Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp
Source/JavaScriptCore/dfg/DFGOSREntry.cpp
Source/JavaScriptCore/dfg/DFGOSREntry.h
Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
Source/JavaScriptCore/dfg/DFGOSRExit.cpp
Source/JavaScriptCore/dfg/DFGOSRExit.h
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
Source/JavaScriptCore/dfg/DFGOperations.cpp
Source/JavaScriptCore/dfg/DFGOperations.h
Source/JavaScriptCore/dfg/DFGPlan.cpp
Source/JavaScriptCore/dfg/DFGPlan.h
Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
Source/JavaScriptCore/dfg/DFGValidate.cpp
Source/JavaScriptCore/ftl/FTLCompile.cpp
Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/ftl/FTLOSREntry.cpp
Source/JavaScriptCore/ftl/FTLOSREntry.h
Source/JavaScriptCore/interpreter/CallFrame.cpp
Source/JavaScriptCore/interpreter/CallFrame.h
Source/JavaScriptCore/interpreter/Interpreter.cpp
Source/JavaScriptCore/interpreter/ShadowChicken.cpp
Source/JavaScriptCore/interpreter/StackVisitor.cpp
Source/JavaScriptCore/interpreter/StackVisitor.h
Source/JavaScriptCore/jit/JIT.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITCall.cpp
Source/JavaScriptCore/jit/JITCodeMap.h
Source/JavaScriptCore/jit/JITDisassembler.cpp
Source/JavaScriptCore/jit/JITDisassembler.h
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JITWorklist.cpp
Source/JavaScriptCore/jit/JITWorklist.h
Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
Source/JavaScriptCore/jit/PCToCodeOriginMap.h
Source/JavaScriptCore/jit/SlowPathCall.h
Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
Source/JavaScriptCore/profiler/ProfilerOrigin.cpp
Source/JavaScriptCore/profiler/ProfilerOrigin.h
Source/JavaScriptCore/runtime/Error.cpp
Source/JavaScriptCore/runtime/Error.h
Source/JavaScriptCore/runtime/ErrorInstance.cpp
Source/JavaScriptCore/runtime/SamplingProfiler.cpp
Source/JavaScriptCore/runtime/SamplingProfiler.h
Source/JavaScriptCore/runtime/StackFrame.cpp
Source/JavaScriptCore/runtime/StackFrame.h
Source/JavaScriptCore/tools/VMInspector.cpp

index 513dba4..ce1b8c3 100644 (file)
@@ -474,6 +474,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS
     bytecode/ArrayProfile.h
     bytecode/ByValInfo.h
     bytecode/BytecodeConventions.h
+    bytecode/BytecodeIndex.h
     bytecode/BytecodeIntrinsicRegistry.h
     bytecode/CallEdge.h
     bytecode/CallLinkInfo.h
index 727a589..a256022 100644 (file)
@@ -1,3 +1,461 @@
+2019-10-22  Keith Miller  <keith_miller@apple.com>
+
+        BytecodeIndex should be a proper C++ class
+        https://bugs.webkit.org/show_bug.cgi?id=203276
+
+        Reviewed by Mark Lam.
+
+        This patch makes a change to how we refer to the bytecode index in
+        a bytecode stream. Previously we just used an unsigned number to
+        represent the index, this patch changes most of the code to use a
+        BytecodeIndex class instead. The only places where this patch does
+        not change this is for jump and switch targets / deltas.
+
+        Additionally, this patch attempts to canonicalize the terminology
+        around how we refer to bytecode indices. Now we use the word index
+        to refer to the bytecode index class and offset to refer to the
+        unsigned byte offset into the instruction stream.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * Sources.txt:
+        * bytecode/ByValInfo.h:
+        (JSC::ByValInfo::ByValInfo):
+        (JSC::getByValInfoBytecodeIndex):
+        * bytecode/BytecodeBasicBlock.cpp:
+        (JSC::BytecodeBasicBlock::computeImpl):
+        * bytecode/BytecodeGeneratorification.cpp:
+        (JSC::GeneratorLivenessAnalysis::run):
+        * bytecode/BytecodeIndex.cpp: Added.
+        (JSC::BytecodeIndex::dump const):
+        * bytecode/BytecodeIndex.h: Added.
+        (JSC::BytecodeIndex::BytecodeIndex):
+        (JSC::BytecodeIndex::offset const):
+        (JSC::BytecodeIndex::asBits const):
+        (JSC::BytecodeIndex::hash const):
+        (JSC::BytecodeIndex::deletedValue):
+        (JSC::BytecodeIndex::isHashTableDeletedValue const):
+        (JSC::BytecodeIndex::operator bool const):
+        (JSC::BytecodeIndex::operator == const):
+        (JSC::BytecodeIndex::operator != const):
+        (JSC::BytecodeIndex::operator < const):
+        (JSC::BytecodeIndex::operator > const):
+        (JSC::BytecodeIndex::operator <= const):
+        (JSC::BytecodeIndex::operator >= const):
+        (JSC::BytecodeIndex::fromBits):
+        (JSC::BytecodeIndexHash::hash):
+        (JSC::BytecodeIndexHash::equal):
+        * bytecode/BytecodeLivenessAnalysis.cpp:
+        (JSC::BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeIndex):
+        (JSC::BytecodeLivenessAnalysis::computeFullLiveness):
+        (JSC::BytecodeLivenessAnalysis::computeKills):
+        (JSC::BytecodeLivenessAnalysis::dumpResults):
+        (JSC::BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset): Deleted.
+        * bytecode/BytecodeLivenessAnalysis.h:
+        * bytecode/BytecodeLivenessAnalysisInlines.h:
+        (JSC::BytecodeLivenessPropagation::stepOverInstruction):
+        (JSC::BytecodeLivenessPropagation::computeLocalLivenessForBytecodeIndex):
+        (JSC::BytecodeLivenessPropagation::computeLocalLivenessForBlock):
+        (JSC::BytecodeLivenessPropagation::getLivenessInfoAtBytecodeIndex):
+        (JSC::BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset): Deleted.
+        (JSC::BytecodeLivenessPropagation::getLivenessInfoAtBytecodeOffset): Deleted.
+        * bytecode/BytecodeUseDef.h:
+        (JSC::computeUsesForBytecodeIndex):
+        (JSC::computeDefsForBytecodeIndex):
+        (JSC::computeUsesForBytecodeOffset): Deleted.
+        (JSC::computeDefsForBytecodeOffset): Deleted.
+        * bytecode/CallLinkStatus.cpp:
+        (JSC::CallLinkStatus::computeFromLLInt):
+        (JSC::CallLinkStatus::computeFor):
+        (JSC::CallLinkStatus::computeExitSiteData):
+        * bytecode/CallLinkStatus.h:
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::getCallLinkInfoForBytecodeIndex):
+        (JSC::CodeBlock::addRareCaseProfile):
+        (JSC::CodeBlock::rareCaseProfileForBytecodeIndex):
+        (JSC::CodeBlock::rareCaseProfileCountForBytecodeIndex):
+        (JSC::CodeBlock::handlerForBytecodeIndex):
+        (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndex):
+        (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndexSlow):
+        (JSC::CodeBlock::lineNumberForBytecodeIndex):
+        (JSC::CodeBlock::columnNumberForBytecodeIndex):
+        (JSC::CodeBlock::expressionRangeForBytecodeIndex const):
+        (JSC::CodeBlock::hasOpDebugForLineAndColumn):
+        (JSC::CodeBlock::getArrayProfile):
+        (JSC::CodeBlock::tryGetValueProfileForBytecodeIndex):
+        (JSC::CodeBlock::valueProfilePredictionForBytecodeIndex):
+        (JSC::CodeBlock::valueProfileForBytecodeIndex):
+        (JSC::CodeBlock::validate):
+        (JSC::CodeBlock::arithProfileForBytecodeIndex):
+        (JSC::CodeBlock::couldTakeSpecialArithFastCase):
+        (JSC::CodeBlock::bytecodeIndexFromCallSiteIndex):
+        (JSC::CodeBlock::rareCaseProfileForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::rareCaseProfileCountForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::handlerForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow): Deleted.
+        (JSC::CodeBlock::lineNumberForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::columnNumberForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::expressionRangeForBytecodeOffset const): Deleted.
+        (JSC::CodeBlock::tryGetValueProfileForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::valueProfilePredictionForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::valueProfileForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::arithProfileForBytecodeOffset): Deleted.
+        (JSC::CodeBlock::couldTakeSpecialFastCase): Deleted.
+        (JSC::CodeBlock::bytecodeOffsetFromCallSiteIndex): Deleted.
+        * bytecode/CodeBlock.h:
+        (JSC::CodeBlock::likelyToTakeSlowCase):
+        (JSC::CodeBlock::couldTakeSlowCase):
+        (JSC::CodeBlock::bytecodeIndex):
+        * bytecode/CodeOrigin.cpp:
+        (JSC::CodeOrigin::approximateHash const):
+        (JSC::CodeOrigin::dump const):
+        * bytecode/CodeOrigin.h:
+        (JSC::CodeOrigin::CodeOrigin):
+        (JSC::CodeOrigin::isSet const):
+        (JSC::CodeOrigin::isHashTableDeletedValue const):
+        (JSC::CodeOrigin::bytecodeIndex const):
+        (JSC::CodeOrigin::OutOfLineCodeOrigin::OutOfLineCodeOrigin):
+        (JSC::CodeOrigin::buildCompositeValue):
+        (JSC::CodeOrigin::hash const):
+        * bytecode/DFGExitProfile.cpp:
+        (JSC::DFG::FrequentExitSite::dump const):
+        (JSC::DFG::ExitProfile::exitSitesFor):
+        * bytecode/DFGExitProfile.h:
+        (JSC::DFG::FrequentExitSite::FrequentExitSite):
+        (JSC::DFG::FrequentExitSite::operator== const):
+        (JSC::DFG::FrequentExitSite::subsumes const):
+        (JSC::DFG::FrequentExitSite::hash const):
+        (JSC::DFG::FrequentExitSite::bytecodeIndex const):
+        (JSC::DFG::FrequentExitSite::isHashTableDeletedValue const):
+        (JSC::DFG::QueryableExitProfile::hasExitSite const):
+        (JSC::DFG::FrequentExitSite::bytecodeOffset const): Deleted.
+        * bytecode/DeferredSourceDump.cpp:
+        (JSC::DeferredSourceDump::DeferredSourceDump):
+        (JSC::DeferredSourceDump::dump):
+        * bytecode/DeferredSourceDump.h:
+        (): Deleted.
+        * bytecode/FullBytecodeLiveness.h:
+        (JSC::FullBytecodeLiveness::getLiveness const):
+        (JSC::FullBytecodeLiveness::operandIsLive const):
+        * bytecode/GetByIdStatus.cpp:
+        (JSC::GetByIdStatus::computeFromLLInt):
+        (JSC::GetByIdStatus::computeFor):
+        (JSC::GetByIdStatus::computeForStubInfo):
+        * bytecode/GetByIdStatus.h:
+        * bytecode/ICStatusUtils.cpp:
+        (JSC::hasBadCacheExitSite):
+        * bytecode/ICStatusUtils.h:
+        * bytecode/InByIdStatus.cpp:
+        (JSC::InByIdStatus::computeFor):
+        * bytecode/InByIdStatus.h:
+        * bytecode/InlineCallFrame.cpp:
+        (JSC::InlineCallFrame::dumpInContext const):
+        * bytecode/InstanceOfStatus.cpp:
+        (JSC::InstanceOfStatus::computeFor):
+        * bytecode/InstanceOfStatus.h:
+        * bytecode/InstructionStream.h:
+        (JSC::InstructionStream::BaseRef::offset const):
+        (JSC::InstructionStream::BaseRef::index const):
+        (JSC::InstructionStream::at const):
+        * bytecode/LazyOperandValueProfile.h:
+        (JSC::LazyOperandValueProfileKey::LazyOperandValueProfileKey):
+        (JSC::LazyOperandValueProfileKey::operator== const):
+        (JSC::LazyOperandValueProfileKey::hash const):
+        (JSC::LazyOperandValueProfileKey::bytecodeIndex const):
+        (JSC::LazyOperandValueProfileKey::isHashTableDeletedValue const):
+        (JSC::LazyOperandValueProfileKey::bytecodeOffset const): Deleted.
+        * bytecode/MethodOfGettingAValueProfile.cpp:
+        (JSC::MethodOfGettingAValueProfile::fromLazyOperand):
+        * bytecode/MethodOfGettingAValueProfile.h:
+        * bytecode/PutByIdStatus.cpp:
+        (JSC::PutByIdStatus::computeFromLLInt):
+        (JSC::PutByIdStatus::computeFor):
+        * bytecode/PutByIdStatus.h:
+        * bytecode/StructureStubInfo.cpp:
+        (JSC::StructureStubInfo::StructureStubInfo):
+        * bytecode/UnlinkedCodeBlock.cpp:
+        (JSC::UnlinkedCodeBlock::lineNumberForBytecodeIndex):
+        (JSC::UnlinkedCodeBlock::expressionRangeForBytecodeIndex const):
+        (JSC::UnlinkedCodeBlock::handlerForBytecodeIndex):
+        (JSC::UnlinkedCodeBlock::lineNumberForBytecodeOffset): Deleted.
+        (JSC::UnlinkedCodeBlock::expressionRangeForBytecodeOffset const): Deleted.
+        (JSC::UnlinkedCodeBlock::handlerForBytecodeOffset): Deleted.
+        * bytecode/UnlinkedCodeBlock.h:
+        * bytecode/ValueProfile.h:
+        (JSC::RareCaseProfile::RareCaseProfile):
+        (JSC::getRareCaseProfileBytecodeIndex):
+        (JSC::getRareCaseProfileBytecodeOffset): Deleted.
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::ForInContext::finalize):
+        * debugger/DebuggerCallFrame.cpp:
+        (JSC::DebuggerCallFrame::currentPosition):
+        * dfg/DFGBasicBlock.cpp:
+        (JSC::DFG::BasicBlock::BasicBlock):
+        * dfg/DFGBasicBlock.h:
+        (JSC::DFG::getBytecodeBeginForBlock):
+        (JSC::DFG::blockForBytecodeIndex):
+        (JSC::DFG::blockForBytecodeOffset): Deleted.
+        * dfg/DFGBlockInsertionSet.cpp:
+        (JSC::DFG::BlockInsertionSet::insert):
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::flushForTerminalImpl):
+        (JSC::DFG::ByteCodeParser::flushIfTerminal):
+        (JSC::DFG::ByteCodeParser::branchData):
+        (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
+        (JSC::DFG::ByteCodeParser::getPrediction):
+        (JSC::DFG::ByteCodeParser::getArrayMode):
+        (JSC::DFG::ByteCodeParser::makeSafe):
+        (JSC::DFG::ByteCodeParser::makeDivSafe):
+        (JSC::DFG::ByteCodeParser::allocateTargetableBlock):
+        (JSC::DFG::ByteCodeParser::allocateUntargetableBlock):
+        (JSC::DFG::ByteCodeParser::makeBlockTargetable):
+        (JSC::DFG::ByteCodeParser::handleCall):
+        (JSC::DFG::ByteCodeParser::handleRecursiveTailCall):
+        (JSC::DFG::ByteCodeParser::inlineCall):
+        (JSC::DFG::ByteCodeParser::handleCallVariant):
+        (JSC::DFG::ByteCodeParser::handleInlining):
+        (JSC::DFG::ByteCodeParser::parseBlock):
+        (JSC::DFG::ByteCodeParser::linkBlock):
+        (JSC::DFG::ByteCodeParser::parseCodeBlock):
+        (JSC::DFG::ByteCodeParser::parse):
+        * dfg/DFGCommonData.cpp:
+        (JSC::DFG::CommonData::addCodeOrigin):
+        (JSC::DFG::CommonData::addUniqueCallSiteIndex):
+        (JSC::DFG::CommonData::lastCallSite const):
+        * dfg/DFGCommonData.h:
+        (JSC::DFG::CommonData::catchOSREntryDataForBytecodeIndex):
+        (JSC::DFG::CommonData::appendCatchEntrypoint):
+        * dfg/DFGDriver.cpp:
+        (JSC::DFG::compileImpl):
+        (JSC::DFG::compile):
+        * dfg/DFGDriver.h:
+        * dfg/DFGGraph.cpp:
+        (JSC::DFG::Graph::dump):
+        (JSC::DFG::Graph::methodOfGettingAValueProfileFor):
+        (JSC::DFG::Graph::willCatchExceptionInMachineFrame):
+        * dfg/DFGGraph.h:
+        * dfg/DFGJITCode.cpp:
+        (JSC::DFG::JITCode::clearOSREntryBlockAndResetThresholds):
+        * dfg/DFGJITCode.h:
+        (JSC::DFG::JITCode::appendOSREntryData):
+        (JSC::DFG::JITCode::osrEntryDataForBytecodeIndex):
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::JITCompiler):
+        (JSC::DFG::JITCompiler::compile):
+        (JSC::DFG::JITCompiler::compileFunction):
+        * dfg/DFGJITCompiler.h:
+        (JSC::DFG::JITCompiler::setStartOfCode):
+        * dfg/DFGLiveCatchVariablePreservationPhase.cpp:
+        (JSC::DFG::LiveCatchVariablePreservationPhase::handleBlockForTryCatch):
+        * dfg/DFGOSREntry.cpp:
+        (JSC::DFG::OSREntryData::dumpInContext const):
+        (JSC::DFG::prepareOSREntry):
+        (JSC::DFG::prepareCatchOSREntry):
+        * dfg/DFGOSREntry.h:
+        (JSC::DFG::getOSREntryDataBytecodeIndex):
+        (JSC::DFG::prepareOSREntry):
+        * dfg/DFGOSREntrypointCreationPhase.cpp:
+        (JSC::DFG::OSREntrypointCreationPhase::run):
+        * dfg/DFGOSRExit.cpp:
+        (JSC::DFG::OSRExit::executeOSRExit):
+        (JSC::DFG::reifyInlinedCallFrames):
+        (JSC::DFG::adjustAndJumpToTarget):
+        (JSC::DFG::printOSRExit):
+        (JSC::DFG::OSRExit::compileExit):
+        (JSC::DFG::OSRExit::debugOperationPrintSpeculationFailure):
+        * dfg/DFGOSRExit.h:
+        * dfg/DFGOSRExitCompilerCommon.cpp:
+        (JSC::DFG::callerReturnPC):
+        (JSC::DFG::reifyInlinedCallFrames):
+        (JSC::DFG::adjustAndJumpToTarget):
+        * dfg/DFGOSRExitCompilerCommon.h:
+        * dfg/DFGOperations.cpp:
+        * dfg/DFGOperations.h:
+        * dfg/DFGPlan.cpp:
+        (JSC::DFG::Plan::Plan):
+        (JSC::DFG::Plan::compileInThreadImpl):
+        (JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
+        * dfg/DFGPlan.h:
+        (JSC::DFG::Plan::osrEntryBytecodeIndex const):
+        (JSC::DFG::Plan::tierUpInLoopHierarchy):
+        (JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
+        * dfg/DFGSSAConversionPhase.cpp:
+        (JSC::DFG::SSAConversionPhase::run):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
+        (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+        (JSC::DFG::SpeculativeJIT::compileValueAdd):
+        (JSC::DFG::SpeculativeJIT::compileValueSub):
+        (JSC::DFG::SpeculativeJIT::compileValueNegate):
+        (JSC::DFG::SpeculativeJIT::compileValueMul):
+        (JSC::DFG::SpeculativeJIT::emitSwitchCharStringJump):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGTierUpCheckInjectionPhase.cpp:
+        (JSC::DFG::TierUpCheckInjectionPhase::run):
+        (JSC::DFG::TierUpCheckInjectionPhase::buildNaturalLoopToLoopHintMap):
+        * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
+        (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
+        * dfg/DFGValidate.cpp:
+        * ftl/FTLCompile.cpp:
+        (JSC::FTL::compile):
+        * ftl/FTLForOSREntryJITCode.h:
+        (JSC::FTL::ForOSREntryJITCode::setBytecodeIndex):
+        (JSC::FTL::ForOSREntryJITCode::bytecodeIndex const):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::lower):
+        (JSC::FTL::DFG::LowerDFGToB3::compileValueAdd):
+        (JSC::FTL::DFG::LowerDFGToB3::compileValueSub):
+        (JSC::FTL::DFG::LowerDFGToB3::compileValueMul):
+        (JSC::FTL::DFG::LowerDFGToB3::compileArithAddOrSub):
+        (JSC::FTL::DFG::LowerDFGToB3::compileValueNegate):
+        * ftl/FTLOSREntry.cpp:
+        (JSC::FTL::prepareOSREntry):
+        * ftl/FTLOSREntry.h:
+        * interpreter/CallFrame.cpp:
+        (JSC::CallFrame::callSiteIndex const):
+        (JSC::CallFrame::unsafeCallSiteIndex const):
+        (JSC::CallFrame::setCurrentVPC):
+        (JSC::CallFrame::bytecodeIndex):
+        (JSC::CallFrame::codeOrigin):
+        (JSC::CallFrame::dump):
+        (JSC::CallFrame::bytecodeOffset): Deleted.
+        * interpreter/CallFrame.h:
+        (JSC::CallSiteIndex::CallSiteIndex):
+        (JSC::CallSiteIndex::operator bool const):
+        (JSC::CallSiteIndex::operator== const):
+        (JSC::CallSiteIndex::bits const):
+        (JSC::CallSiteIndex::bytecodeIndex const):
+        (JSC::DisposableCallSiteIndex::DisposableCallSiteIndex):
+        (): Deleted.
+        * interpreter/Interpreter.cpp:
+        (JSC::GetStackTraceFunctor::operator() const):
+        (JSC::findExceptionHandler):
+        * interpreter/ShadowChicken.cpp:
+        (JSC::ShadowChicken::update):
+        * interpreter/StackVisitor.cpp:
+        (JSC::StackVisitor::readNonInlinedFrame):
+        (JSC::StackVisitor::readInlinedFrame):
+        (JSC::StackVisitor::Frame::retrieveExpressionInfo const):
+        (JSC::StackVisitor::Frame::dump const):
+        * interpreter/StackVisitor.h:
+        (JSC::StackVisitor::Frame::bytecodeIndex const):
+        (JSC::StackVisitor::Frame::bytecodeOffset const): Deleted.
+        * jit/JIT.cpp:
+        (JSC::JIT::JIT):
+        (JSC::JIT::emitEnterOptimizationCheck):
+        (JSC::JIT::privateCompileMainPass):
+        (JSC::JIT::privateCompileSlowCases):
+        (JSC::JIT::compileWithoutLinking):
+        (JSC::JIT::link):
+        (JSC::JIT::privateCompileExceptionHandlers):
+        * jit/JIT.h:
+        (JSC::CallRecord::CallRecord):
+        (JSC::SlowCaseEntry::SlowCaseEntry):
+        (JSC::SwitchRecord::SwitchRecord):
+        (JSC::ByValCompilationInfo::ByValCompilationInfo):
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileCallEvalSlowCase):
+        (JSC::JIT::compileOpCall):
+        * jit/JITCodeMap.h:
+        (JSC::JITCodeMap::Entry::Entry):
+        (JSC::JITCodeMap::Entry::bytecodeIndex const):
+        (JSC::JITCodeMap::append):
+        (JSC::JITCodeMap::find const):
+        * jit/JITDisassembler.cpp:
+        (JSC::JITDisassembler::dumpVectorForInstructions):
+        (JSC::JITDisassembler::reportInstructions):
+        * jit/JITDisassembler.h:
+        * jit/JITInlines.h:
+        (JSC::JIT::emitNakedCall):
+        (JSC::JIT::emitNakedTailCall):
+        (JSC::JIT::updateTopCallFrame):
+        (JSC::JIT::linkAllSlowCasesForBytecodeIndex):
+        (JSC::JIT::addSlowCase):
+        (JSC::JIT::addJump):
+        (JSC::JIT::emitJumpSlowToHot):
+        (JSC::JIT::emitGetVirtualRegister):
+        (JSC::JIT::linkAllSlowCasesForBytecodeOffset): Deleted.
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_instanceof):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_switch_imm):
+        (JSC::JIT::emit_op_switch_char):
+        (JSC::JIT::emit_op_switch_string):
+        (JSC::JIT::emitSlow_op_loop_hint):
+        (JSC::JIT::emit_op_has_indexed_property):
+        (JSC::JIT::emit_op_log_shadow_chicken_tail):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_instanceof):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_switch_imm):
+        (JSC::JIT::emit_op_switch_char):
+        (JSC::JIT::emit_op_switch_string):
+        (JSC::JIT::emit_op_has_indexed_property):
+        * jit/JITOperations.cpp:
+        (JSC::getByVal):
+        (JSC::tryGetByValOptimize):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emitGetByValWithCachedId):
+        (JSC::JIT::emit_op_put_by_val):
+        (JSC::JIT::emitPutByValWithCachedId):
+        (JSC::JIT::emit_op_try_get_by_id):
+        (JSC::JIT::emit_op_get_by_id_direct):
+        (JSC::JIT::emit_op_get_by_id):
+        (JSC::JIT::emit_op_get_by_id_with_this):
+        (JSC::JIT::emit_op_put_by_id):
+        (JSC::JIT::emit_op_in_by_id):
+        * jit/JITWorklist.cpp:
+        (JSC::JITWorklist::Plan::Plan):
+        (JSC::JITWorklist::Plan::compileNow):
+        (JSC::JITWorklist::compileLater):
+        (JSC::JITWorklist::compileNow):
+        * jit/JITWorklist.h:
+        * jit/PCToCodeOriginMap.cpp:
+        (JSC::PCToCodeOriginMap::PCToCodeOriginMap):
+        (JSC::PCToCodeOriginMap::findPC const):
+        * jit/PCToCodeOriginMap.h:
+        (JSC::PCToCodeOriginMapBuilder::defaultCodeOrigin):
+        * jit/SlowPathCall.h:
+        (JSC::JITSlowPathCall::call):
+        * llint/LLIntSlowPaths.cpp:
+        (JSC::LLInt::jitCompileAndSetHeuristics):
+        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+        * profiler/ProfilerOrigin.cpp:
+        (JSC::Profiler::Origin::Origin):
+        (JSC::Profiler::Origin::dump const):
+        (JSC::Profiler::Origin::toJS const):
+        * profiler/ProfilerOrigin.h:
+        (JSC::Profiler::Origin::Origin):
+        (JSC::Profiler::Origin::operator! const):
+        (JSC::Profiler::Origin::bytecodeIndex const):
+        (JSC::Profiler::Origin::hash const):
+        (JSC::Profiler::Origin::isHashTableDeletedValue const):
+        * runtime/Error.cpp:
+        (JSC::getBytecodeIndex):
+        (JSC::getBytecodeOffset): Deleted.
+        * runtime/Error.h:
+        * runtime/ErrorInstance.cpp:
+        (JSC::appendSourceToError):
+        (JSC::ErrorInstance::finishCreation):
+        * runtime/SamplingProfiler.cpp:
+        (JSC::tryGetBytecodeIndex):
+        (JSC::SamplingProfiler::processUnverifiedStackTraces):
+        (JSC::SamplingProfiler::reportTopBytecodes):
+        * runtime/SamplingProfiler.h:
+        (JSC::SamplingProfiler::StackFrame::CodeLocation::hasBytecodeIndex const):
+        * runtime/StackFrame.cpp:
+        (JSC::StackFrame::StackFrame):
+        (JSC::StackFrame::computeLineAndColumn const):
+        * runtime/StackFrame.h:
+        (JSC::StackFrame::hasBytecodeIndex const):
+        (JSC::StackFrame::bytecodeIndex):
+        (JSC::StackFrame::hasBytecodeOffset const): Deleted.
+        (JSC::StackFrame::bytecodeOffset): Deleted.
+        * tools/VMInspector.cpp:
+        (JSC::VMInspector::dumpRegisters):
+
 2019-10-22  Yusuke Suzuki  <ysuzuki@apple.com>
 
         Unreviewed, make 32bit JIT built
index f088b95..e664e40 100644 (file)
                535557141D9D9EA5006D583B /* WasmMemory.h in Headers */ = {isa = PBXBuildFile; fileRef = 535557131D9D9EA5006D583B /* WasmMemory.h */; settings = {ATTRIBUTES = (Private, ); }; };
                535C24611F78928E006EC40E /* create_regex_tables in Headers */ = {isa = PBXBuildFile; fileRef = A718F8211178EB4B002465A7 /* create_regex_tables */; };
                535C246C1F7A1778006EC40E /* UnifiedSource136.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 535C246B1F7A1777006EC40E /* UnifiedSource136.cpp */; };
+               53663FDA23562F96005EA68C /* BytecodeIndex.h in Headers */ = {isa = PBXBuildFile; fileRef = 53663FD923562F90005EA68C /* BytecodeIndex.h */; settings = {ATTRIBUTES = (Private, ); }; };
                536B310C1F71C5990037FC33 /* UnifiedSource119.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 536B30871F71C5380037FC33 /* UnifiedSource119.cpp */; };
                536B310D1F71C5990037FC33 /* UnifiedSource125.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 536B30881F71C5380037FC33 /* UnifiedSource125.cpp */; };
                536B310E1F71C5990037FC33 /* UnifiedSource131.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 536B30891F71C5380037FC33 /* UnifiedSource131.cpp */; };
                535557151D9DFA32006D583B /* WasmMemory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WasmMemory.cpp; sourceTree = "<group>"; };
                535C246B1F7A1777006EC40E /* UnifiedSource136.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = UnifiedSource136.cpp; path = "DerivedSources/JavaScriptCore/unified-sources/UnifiedSource136.cpp"; sourceTree = BUILT_PRODUCTS_DIR; };
                535E08C9225460AB00DF00CA /* postprocess-header-rule */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-header-rule"; sourceTree = "<group>"; };
+               5360DABB2356ADCA003F6AB8 /* BytecodeIndex.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = BytecodeIndex.cpp; sourceTree = "<group>"; };
+               53663FD923562F90005EA68C /* BytecodeIndex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeIndex.h; sourceTree = "<group>"; };
                53696E5720A3A70200D7E01E /* BytecodeStructs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeStructs.h; sourceTree = "<group>"; };
                536B30871F71C5380037FC33 /* UnifiedSource119.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = UnifiedSource119.cpp; path = "DerivedSources/JavaScriptCore/unified-sources/UnifiedSource119.cpp"; sourceTree = BUILT_PRODUCTS_DIR; };
                536B30881F71C5380037FC33 /* UnifiedSource125.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = UnifiedSource125.cpp; path = "DerivedSources/JavaScriptCore/unified-sources/UnifiedSource125.cpp"; sourceTree = BUILT_PRODUCTS_DIR; };
                                E3D264261D38C042000BE174 /* BytecodeGeneratorification.cpp */,
                                E3D264271D38C042000BE174 /* BytecodeGeneratorification.h */,
                                E3D264281D38C042000BE174 /* BytecodeGraph.h */,
+                               5360DABB2356ADCA003F6AB8 /* BytecodeIndex.cpp */,
+                               53663FD923562F90005EA68C /* BytecodeIndex.h */,
                                7094C4DC1AE439530041A2EE /* BytecodeIntrinsicRegistry.cpp */,
                                7094C4DD1AE439530041A2EE /* BytecodeIntrinsicRegistry.h */,
                                0F2DD80A1AB3D85800BBB8E8 /* BytecodeKills.h */,
                                969A07230ED1CE3300F1F681 /* BytecodeGenerator.h in Headers */,
                                E328DAE81D38D005001A2529 /* BytecodeGeneratorification.h in Headers */,
                                E328DAE91D38D005001A2529 /* BytecodeGraph.h in Headers */,
+                               53663FDA23562F96005EA68C /* BytecodeIndex.h in Headers */,
                                7094C4DF1AE439530041A2EE /* BytecodeIntrinsicRegistry.h in Headers */,
                                0F2DD80B1AB3D85800BBB8E8 /* BytecodeKills.h in Headers */,
                                C2FCAE1317A9C24E0034C735 /* BytecodeLivenessAnalysis.h in Headers */,
index 7ced68b..c328fba 100644 (file)
@@ -197,6 +197,7 @@ bytecode/ArrayProfile.cpp
 bytecode/BytecodeBasicBlock.cpp
 bytecode/BytecodeDumper.cpp
 bytecode/BytecodeGeneratorification.cpp
+bytecode/BytecodeIndex.cpp
 bytecode/BytecodeIntrinsicRegistry.cpp
 bytecode/BytecodeLivenessAnalysis.cpp
 bytecode/BytecodeRewriter.cpp
index 3399d58..b7e9379 100644 (file)
@@ -225,7 +225,7 @@ inline JITArrayMode jitArrayModeForStructure(Structure* structure)
 struct ByValInfo {
     ByValInfo() { }
 
-    ByValInfo(unsigned bytecodeIndex, CodeLocationJump<JSInternalPtrTag> notIndexJump, CodeLocationJump<JSInternalPtrTag> badTypeJump, CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, CodeLocationLabel<JSInternalPtrTag> badTypeDoneTarget, CodeLocationLabel<JSInternalPtrTag> badTypeNextHotPathTarget, CodeLocationLabel<JSInternalPtrTag> slowPathTarget)
+    ByValInfo(BytecodeIndex bytecodeIndex, CodeLocationJump<JSInternalPtrTag> notIndexJump, CodeLocationJump<JSInternalPtrTag> badTypeJump, CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, CodeLocationLabel<JSInternalPtrTag> badTypeDoneTarget, CodeLocationLabel<JSInternalPtrTag> badTypeNextHotPathTarget, CodeLocationLabel<JSInternalPtrTag> slowPathTarget)
         : notIndexJump(notIndexJump)
         , badTypeJump(badTypeJump)
         , exceptionHandler(exceptionHandler)
@@ -249,7 +249,7 @@ struct ByValInfo {
     CodeLocationLabel<JSInternalPtrTag> badTypeNextHotPathTarget;
     CodeLocationLabel<JSInternalPtrTag> slowPathTarget;
     ArrayProfile* arrayProfile;
-    unsigned bytecodeIndex;
+    BytecodeIndex bytecodeIndex;
     unsigned slowPathCount;
     RefPtr<JITStubRoutine> stubRoutine;
     Identifier cachedId;
@@ -260,7 +260,7 @@ struct ByValInfo {
     bool seen : 1;
 };
 
-inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
+inline BytecodeIndex getByValInfoBytecodeIndex(ByValInfo* info)
 {
     return info->bytecodeIndex;
 }
index 10c65bb..02d644b 100644 (file)
@@ -127,7 +127,7 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, const InstructionStream&
             // block because the finally block will create its own catch, which will generate a HandlerInfo.
             if (isThrow(opcodeID)) {
                 ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength());
-                auto* handler = codeBlock->handlerForBytecodeOffset(instruction.offset());
+                auto* handler = codeBlock->handlerForBytecodeIndex(BytecodeIndex(instruction.offset()));
                 fallsThrough = false;
                 if (!handler) {
                     linkBlocks(block, exit.get());
index 286051c..43ce561 100644 (file)
@@ -200,7 +200,7 @@ public:
         runLivenessFixpoint(codeBlock, instructions, m_generatorification.graph());
 
         for (YieldData& data : m_generatorification.yields())
-            data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), m_generatorification.instructions().at(data.point).next().offset());
+            data.liveness = getLivenessInfoAtBytecodeIndex(codeBlock, instructions, m_generatorification.graph(), BytecodeIndex(m_generatorification.instructions().at(data.point).next().offset()));
     }
 
 private:
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIndex.cpp b/Source/JavaScriptCore/bytecode/BytecodeIndex.cpp
new file mode 100644 (file)
index 0000000..17a385b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+* Copyright (C) 2019 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+*    notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+*    notice, this list of conditions and the following disclaimer in the
+*    documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+#include "BytecodeIndex.h"
+
+namespace JSC {
+
+void BytecodeIndex::dump(WTF::PrintStream& out) const
+{
+    out.print("bc#", offset());
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIndex.h b/Source/JavaScriptCore/bytecode/BytecodeIndex.h
new file mode 100644 (file)
index 0000000..4ead855
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+* Copyright (C) 2019 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+*    notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+*    notice, this list of conditions and the following disclaimer in the
+*    documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+
+#include <wtf/HashTraits.h>
+
+namespace WTF {
+class PrintStream;
+}
+
+namespace JSC {
+
+class BytecodeIndex {
+public:
+    BytecodeIndex() = default;
+    explicit BytecodeIndex(uint32_t bytecodeOffset)
+        : m_offset(bytecodeOffset)
+    { }
+
+    uint32_t offset() const { return m_offset; }
+    uint32_t asBits() const { return m_offset; }
+
+    unsigned hash() const { return WTF::intHash(m_offset); }
+    static BytecodeIndex deletedValue() { return fromBits(invalidOffset - 1); }
+    bool isHashTableDeletedValue() const { return *this == deletedValue(); }
+
+    static BytecodeIndex fromBits(uint32_t bits);
+
+    // Comparison operators.
+    explicit operator bool() const { return m_offset != invalidOffset && m_offset != deletedValue().offset(); }
+    bool operator ==(const BytecodeIndex& other) const { return asBits() == other.asBits(); }
+    bool operator !=(const BytecodeIndex& other) const { return !(*this == other); }
+
+    bool operator <(const BytecodeIndex& other) const { return asBits() < other.asBits(); }
+    bool operator >(const BytecodeIndex& other) const { return asBits() > other.asBits(); }
+    bool operator <=(const BytecodeIndex& other) const { return asBits() <= other.asBits(); }
+    bool operator >=(const BytecodeIndex& other) const { return asBits() >= other.asBits(); }
+
+
+    void dump(WTF::PrintStream&) const;
+
+private:
+    static constexpr uint32_t invalidOffset = std::numeric_limits<uint32_t>::max();
+
+    uint32_t m_offset { invalidOffset };
+};
+
+inline BytecodeIndex BytecodeIndex::fromBits(uint32_t bits)
+{
+    BytecodeIndex result;
+    result.m_offset = bits;
+    return result;
+}
+
+struct BytecodeIndexHash {
+    static unsigned hash(const BytecodeIndex& key) { return key.hash(); }
+    static bool equal(const BytecodeIndex& a, const BytecodeIndex& b) { return a == b; }
+    static constexpr bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::BytecodeIndex> {
+    typedef JSC::BytecodeIndexHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::BytecodeIndex> : SimpleClassHashTraits<JSC::BytecodeIndex> {
+    static constexpr bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
index 330c7c7..a1c2821 100644 (file)
@@ -46,20 +46,20 @@ BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock)
         dumpResults(codeBlock);
 }
 
-void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, FastBitVector& result)
+void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeIndex(CodeBlock* codeBlock, BytecodeIndex bytecodeIndex, FastBitVector& result)
 {
-    BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+    BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeIndex.offset());
     ASSERT(block);
     ASSERT(!block->isEntryBlock());
     ASSERT(!block->isExitBlock());
     result.resize(block->out().numBits());
-    computeLocalLivenessForBytecodeOffset(codeBlock, codeBlock->instructions(), m_graph, block, bytecodeOffset, result);
+    computeLocalLivenessForBytecodeIndex(codeBlock, codeBlock->instructions(), m_graph, block, bytecodeIndex, result);
 }
 
-FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset)
+FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeIndex(CodeBlock* codeBlock, BytecodeIndex bytecodeIndex)
 {
     FastBitVector out;
-    getLivenessInfoAtBytecodeOffset(codeBlock, bytecodeOffset, out);
+    getLivenessInfoAtBytecodeIndex(codeBlock, bytecodeIndex, out);
     return out;
 }
 
@@ -77,7 +77,7 @@ void BytecodeLivenessAnalysis::computeFullLiveness(CodeBlock* codeBlock, FullByt
         
         for (unsigned i = block->offsets().size(); i--;) {
             unsigned bytecodeOffset = block->offsets()[i];
-            stepOverInstruction(codeBlock, codeBlock->instructions(), m_graph, bytecodeOffset, out);
+            stepOverInstruction(codeBlock, codeBlock->instructions(), m_graph, BytecodeIndex(bytecodeOffset), out);
             result.m_map[bytecodeOffset] = out;
         }
     }
@@ -100,7 +100,7 @@ void BytecodeLivenessAnalysis::computeKills(CodeBlock* codeBlock, BytecodeKills&
         for (unsigned i = block->offsets().size(); i--;) {
             unsigned bytecodeOffset = block->offsets()[i];
             stepOverInstruction(
-                codeBlock, codeBlock->instructions(), m_graph, bytecodeOffset,
+                codeBlock, codeBlock->instructions(), m_graph, BytecodeIndex(bytecodeOffset),
                 [&] (unsigned index) {
                     // This is for uses.
                     if (out[index])
@@ -170,7 +170,7 @@ void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock)
             const auto currentInstruction = instructions.at(bytecodeOffset);
 
             dataLogF("Live variables:");
-            FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(codeBlock, bytecodeOffset);
+            FastBitVector liveBefore = getLivenessInfoAtBytecodeIndex(codeBlock, BytecodeIndex(bytecodeOffset));
             dumpBitVector(liveBefore);
             dataLogF("\n");
             codeBlock->dumpBytecode(WTF::dataFile(), currentInstruction);
index 7bef4cf..eac185d 100644 (file)
@@ -37,15 +37,15 @@ class FullBytecodeLiveness;
 
 class BytecodeLivenessPropagation {
 protected:
-    template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, const UseFunctor&, const DefFunctor&);
+    template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, BytecodeIndex, const UseFunctor&, const DefFunctor&);
 
-    template<typename CodeBlockType> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, FastBitVector& out);
+    template<typename CodeBlockType> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, BytecodeIndex, FastBitVector& out);
 
-    template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBytecodeOffset(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result);
+    template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBytecodeIndex(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*, BytecodeIndex, FastBitVector& result);
 
     template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBlock(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*);
 
-    template<typename CodeBlockType, typename Instructions> FastBitVector getLivenessInfoAtBytecodeOffset(CodeBlockType*, const Instructions&, BytecodeGraph&, unsigned bytecodeOffset);
+    template<typename CodeBlockType, typename Instructions> FastBitVector getLivenessInfoAtBytecodeIndex(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeIndex);
 
     template<typename CodeBlockType, typename Instructions> void runLivenessFixpoint(CodeBlockType*, const Instructions&, BytecodeGraph&);
 };
@@ -57,7 +57,7 @@ public:
     friend class BytecodeLivenessPropagation;
     BytecodeLivenessAnalysis(CodeBlock*);
     
-    FastBitVector getLivenessInfoAtBytecodeOffset(CodeBlock*, unsigned bytecodeOffset);
+    FastBitVector getLivenessInfoAtBytecodeIndex(CodeBlock*, BytecodeIndex);
     
     void computeFullLiveness(CodeBlock*, FullBytecodeLiveness& result);
     void computeKills(CodeBlock*, BytecodeKills& result);
@@ -65,7 +65,7 @@ public:
 private:
     void dumpResults(CodeBlock*);
 
-    void getLivenessInfoAtBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, FastBitVector&);
+    void getLivenessInfoAtBytecodeIndex(CodeBlock*, BytecodeIndex, FastBitVector&);
 
     BytecodeGraph m_graph;
 };
index 71ff56f..7a90032 100644 (file)
@@ -61,7 +61,7 @@ inline bool isValidRegisterForLiveness(VirtualRegister operand)
 // Simplified interface to bytecode use/def, which determines defs first and then uses, and includes
 // exception handlers in the uses.
 template<typename CodeBlockType, typename UseFunctor, typename DefFunctor>
-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, const UseFunctor& use, const DefFunctor& def)
+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, BytecodeIndex bytecodeIndex, const UseFunctor& use, const DefFunctor& def)
 {
     // This abstractly execute the instruction in reverse. Instructions logically first use operands and
     // then define operands. This logical ordering is necessary for operations that use and def the same
@@ -78,17 +78,17 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code
     // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
     // first add it to the out set (the use), and then we'd remove it (the def).
 
-    auto* instruction = instructions.at(bytecodeOffset).ptr();
+    auto* instruction = instructions.at(bytecodeIndex).ptr();
     OpcodeID opcodeID = instruction->opcodeID();
 
-    computeDefsForBytecodeOffset(
+    computeDefsForBytecodeIndex(
         codeBlock, opcodeID, instruction,
         [&] (VirtualRegister operand) {
             if (isValidRegisterForLiveness(operand))
                 def(operand.toLocal());
         });
 
-    computeUsesForBytecodeOffset(
+    computeUsesForBytecodeIndex(
         codeBlock, opcodeID, instruction,
         [&] (VirtualRegister operand) {
             if (isValidRegisterForLiveness(operand))
@@ -97,7 +97,7 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code
 
     // If we have an exception handler, we want the live-in variables of the 
     // exception handler block to be included in the live-in of this particular bytecode.
-    if (auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
+    if (auto* handler = codeBlock->handlerForBytecodeIndex(bytecodeIndex)) {
         BytecodeBasicBlock* handlerBlock = graph.findBasicBlockWithLeaderOffset(handler->target);
         ASSERT(handlerBlock);
         handlerBlock->in().forEachSetBit(use);
@@ -105,10 +105,10 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code
 }
 
 template<typename CodeBlockType>
-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, FastBitVector& out)
+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, BytecodeIndex bytecodeIndex, FastBitVector& out)
 {
     stepOverInstruction(
-        codeBlock, instructions, graph, bytecodeOffset,
+        codeBlock, instructions, graph, bytecodeIndex,
         [&] (unsigned bitIndex) {
             // This is the use functor, so we set the bit.
             out[bitIndex] = true;
@@ -120,7 +120,7 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code
 }
 
 template<typename CodeBlockType, typename Instructions>
-inline bool BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, BytecodeBasicBlock* block, unsigned targetOffset, FastBitVector& result)
+inline bool BytecodeLivenessPropagation::computeLocalLivenessForBytecodeIndex(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, BytecodeBasicBlock* block, BytecodeIndex targetIndex, FastBitVector& result)
 {
     ASSERT(!block->isExitBlock());
     ASSERT(!block->isEntryBlock());
@@ -129,9 +129,9 @@ inline bool BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset(C
 
     for (int i = block->offsets().size() - 1; i >= 0; i--) {
         unsigned bytecodeOffset = block->offsets()[i];
-        if (targetOffset > bytecodeOffset)
+        if (targetIndex.offset() > bytecodeOffset)
             break;
-        stepOverInstruction(codeBlock, instructions, graph, bytecodeOffset, out);
+        stepOverInstruction(codeBlock, instructions, graph, BytecodeIndex(bytecodeOffset), out);
     }
 
     return result.setAndCheck(out);
@@ -142,19 +142,19 @@ inline bool BytecodeLivenessPropagation::computeLocalLivenessForBlock(CodeBlockT
 {
     if (block->isExitBlock() || block->isEntryBlock())
         return false;
-    return computeLocalLivenessForBytecodeOffset(codeBlock, instructions, graph, block, block->leaderOffset(), block->in());
+    return computeLocalLivenessForBytecodeIndex(codeBlock, instructions, graph, block, BytecodeIndex(block->leaderOffset()), block->in());
 }
 
 template<typename CodeBlockType, typename Instructions>
-inline FastBitVector BytecodeLivenessPropagation::getLivenessInfoAtBytecodeOffset(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, unsigned bytecodeOffset)
+inline FastBitVector BytecodeLivenessPropagation::getLivenessInfoAtBytecodeIndex(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, BytecodeIndex bytecodeIndex)
 {
-    BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+    BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(bytecodeIndex.offset());
     ASSERT(block);
     ASSERT(!block->isEntryBlock());
     ASSERT(!block->isExitBlock());
     FastBitVector out;
     out.resize(block->out().numBits());
-    computeLocalLivenessForBytecodeOffset(codeBlock, instructions, graph, block, bytecodeOffset, out);
+    computeLocalLivenessForBytecodeIndex(codeBlock, instructions, graph, block, bytecodeIndex, out);
     return out;
 }
 
index 022f3bf..ca33479 100644 (file)
@@ -46,7 +46,7 @@ namespace JSC {
 #define DEFS USES_OR_DEFS
 
 template<typename Block, typename Functor>
-void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor)
+void computeUsesForBytecodeIndex(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor)
 {
     if (opcodeID != op_enter && (codeBlock->wasCompiledWithDebuggingOpcodes() || codeBlock->usesEval()) && codeBlock->scopeRegister().isValid())
         functor(codeBlock->scopeRegister());
@@ -297,7 +297,7 @@ void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Ins
 }
 
 template<typename Block, typename Functor>
-void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor)
+void computeDefsForBytecodeIndex(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor)
 {
     switch (opcodeID) {
     case op_wide16:
index 9a000fc..f13d173 100644 (file)
@@ -55,7 +55,7 @@ CallLinkStatus::CallLinkStatus(JSValue value)
     m_variants.append(CallVariant(value.asCell()));
 }
 
-CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex)
 {
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
@@ -67,7 +67,7 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeB
     }
 #endif
 
-    auto instruction = profiledBlock->instructions().at(bytecodeIndex);
+    auto instruction = profiledBlock->instructions().at(bytecodeIndex.offset());
     OpcodeID op = instruction->opcodeID();
 
     LLIntCallLinkInfo* callLinkInfo;
@@ -90,7 +90,7 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeB
 }
 
 CallLinkStatus CallLinkStatus::computeFor(
-    CodeBlock* profiledBlock, unsigned bytecodeIndex, const ICStatusMap& map,
+    CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex, const ICStatusMap& map,
     ExitSiteData exitSiteData)
 {
     ConcurrentJSLocker locker(profiledBlock->m_lock);
@@ -114,12 +114,12 @@ CallLinkStatus CallLinkStatus::computeFor(
 }
 
 CallLinkStatus CallLinkStatus::computeFor(
-    CodeBlock* profiledBlock, unsigned bytecodeIndex, const ICStatusMap& map)
+    CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex, const ICStatusMap& map)
 {
     return computeFor(profiledBlock, bytecodeIndex, map, computeExitSiteData(profiledBlock, bytecodeIndex));
 }
 
-CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex)
 {
     ExitSiteData exitSiteData;
 #if ENABLE(DFG_JIT)
index 0a101ca..468a3db 100644 (file)
@@ -66,10 +66,10 @@ public:
         ExitFlag takesSlowPath;
         ExitFlag badFunction;
     };
-    static ExitSiteData computeExitSiteData(CodeBlock*, unsigned bytecodeIndex);
+    static ExitSiteData computeExitSiteData(CodeBlock*, BytecodeIndex);
     
-    static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex, const ICStatusMap&, ExitSiteData);
-    static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex, const ICStatusMap&);
+    static CallLinkStatus computeFor(CodeBlock*, BytecodeIndex, const ICStatusMap&, ExitSiteData);
+    static CallLinkStatus computeFor(CodeBlock*, BytecodeIndex, const ICStatusMap&);
 
 #if ENABLE(JIT)
     // Computes the status assuming that we never took slow path and never previously
@@ -117,7 +117,7 @@ public:
 private:
     void makeClosureCall();
     
-    static CallLinkStatus computeFromLLInt(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+    static CallLinkStatus computeFromLLInt(const ConcurrentJSLocker&, CodeBlock*, BytecodeIndex);
 #if ENABLE(JIT)
     static CallLinkStatus computeFromCallLinkInfo(
         const ConcurrentJSLocker&, CallLinkInfo&);
index 4e947a3..7e21168 100644 (file)
@@ -1526,7 +1526,7 @@ CallLinkInfo* CodeBlock::addCallLinkInfo()
     return ensureJITData(locker).m_callLinkInfos.add();
 }
 
-CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(BytecodeIndex index)
 {
     ConcurrentJSLocker locker(m_lock);
     if (auto* jitData = m_jitData.get()) {
@@ -1538,27 +1538,27 @@ CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
     return nullptr;
 }
 
-RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+RareCaseProfile* CodeBlock::addRareCaseProfile(BytecodeIndex bytecodeIndex)
 {
     ConcurrentJSLocker locker(m_lock);
     auto& jitData = ensureJITData(locker);
-    jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+    jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeIndex));
     return &jitData.m_rareCaseProfiles.last();
 }
 
-RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex bytecodeIndex)
 {
     if (auto* jitData = m_jitData.get()) {
-        return tryBinarySearch<RareCaseProfile, int>(
-            jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
-            getRareCaseProfileBytecodeOffset);
+        return tryBinarySearch<RareCaseProfile, BytecodeIndex>(
+            jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeIndex,
+            getRareCaseProfileBytecodeIndex);
     }
     return nullptr;
 }
 
-unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+unsigned CodeBlock::rareCaseProfileCountForBytecodeIndex(const ConcurrentJSLocker& locker, BytecodeIndex bytecodeIndex)
 {
-    RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
+    RareCaseProfile* profile = rareCaseProfileForBytecodeIndex(locker, bytecodeIndex);
     if (profile)
         return profile->m_counter;
     return 0;
@@ -1730,10 +1730,10 @@ bool CodeBlock::hasOptimizedReplacement()
 }
 #endif
 
-HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
+HandlerInfo* CodeBlock::handlerForBytecodeIndex(BytecodeIndex bytecodeIndex, RequiredHandler requiredHandler)
 {
-    RELEASE_ASSERT(bytecodeOffset < instructions().size());
-    return handlerForIndex(bytecodeOffset, requiredHandler);
+    RELEASE_ASSERT(bytecodeIndex.offset() < instructions().size());
+    return handlerForIndex(bytecodeIndex.offset(), requiredHandler);
 }
 
 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
@@ -1762,9 +1762,9 @@ DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteInd
 
 
 
-void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    auto& instruction = instructions().at(bytecodeOffset);
+    auto& instruction = instructions().at(bytecodeIndex);
     OpCatch op = instruction->as<OpCatch>();
     auto& metadata = op.metadata(this);
     if (!!metadata.m_buffer) {
@@ -1784,10 +1784,10 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream
         return;
     }
 
-    ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
+    ensureCatchLivenessIsComputedForBytecodeIndexSlow(op, bytecodeIndex);
 }
 
-void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch& op, BytecodeIndex bytecodeIndex)
 {
     BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
 
@@ -1796,8 +1796,8 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch
     // we can avoid profiling them and extracting them when doing OSR entry
     // into the DFG.
 
-    auto nextOffset = instructions().at(bytecodeOffset).next().offset();
-    FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
+    auto nextOffset = instructions().at(bytecodeIndex).next().offset();
+    FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeIndex(this, BytecodeIndex(nextOffset));
     Vector<VirtualRegister> liveOperands;
     liveOperands.reserveInitialCapacity(liveLocals.bitCount());
     liveLocals.forEachSetBit([&] (unsigned liveLocal) {
@@ -1842,26 +1842,26 @@ void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSi
     RELEASE_ASSERT_NOT_REACHED();
 }
 
-unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+unsigned CodeBlock::lineNumberForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    RELEASE_ASSERT(bytecodeOffset < instructions().size());
-    return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+    RELEASE_ASSERT(bytecodeIndex.offset() < instructions().size());
+    return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeIndex(bytecodeIndex);
 }
 
-unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
+unsigned CodeBlock::columnNumberForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
     int divot;
     int startOffset;
     int endOffset;
     unsigned line;
     unsigned column;
-    expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column);
     return column;
 }
 
-void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
+void CodeBlock::expressionRangeForBytecodeIndex(BytecodeIndex bytecodeIndex, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
-    m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    m_unlinkedCode->expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column);
     divot += sourceOffset();
     column += line ? 1 : firstLineColumnOffset();
     line += ownerExecutable()->firstLine();
@@ -1875,7 +1875,7 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, Optional<unsigned> col
             int unused;
             unsigned opDebugLine;
             unsigned opDebugColumn;
-            expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
+            expressionRangeForBytecodeIndex(it.index(), unused, unused, unused, opDebugLine, opDebugColumn);
             if (line == opDebugLine && (!column || column == opDebugColumn))
                 return true;
         }
@@ -2623,9 +2623,9 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
 }
 #endif
 
-ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, BytecodeIndex bytecodeIndex)
 {
-    auto instruction = instructions().at(bytecodeOffset);
+    auto instruction = instructions().at(bytecodeIndex);
     switch (instruction->opcodeID()) {
 #define CASE1(Op) \
     case Op::opcodeID: \
@@ -2655,10 +2655,10 @@ ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned byt
     return nullptr;
 }
 
-ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(BytecodeIndex bytecodeIndex)
 {
     ConcurrentJSLocker locker(m_lock);
-    return getArrayProfile(locker, bytecodeOffset);
+    return getArrayProfile(locker, bytecodeIndex);
 }
 
 #if ENABLE(DFG_JIT)
@@ -2966,9 +2966,9 @@ String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
     return emptyString();
 }
 
-ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
+ValueProfile* CodeBlock::tryGetValueProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    auto instruction = instructions().at(bytecodeOffset);
+    auto instruction = instructions().at(bytecodeIndex);
     switch (instruction->opcodeID()) {
 
 #define CASE(Op) \
@@ -2985,23 +2985,23 @@ ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
     }
 }
 
-SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+SpeculatedType CodeBlock::valueProfilePredictionForBytecodeIndex(const ConcurrentJSLocker& locker, BytecodeIndex bytecodeIndex)
 {
-    if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
+    if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeIndex(bytecodeIndex))
         return valueProfile->computeUpdatedPrediction(locker);
     return SpecNone;
 }
 
-ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+ValueProfile& CodeBlock::valueProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
+    return *tryGetValueProfileForBytecodeIndex(bytecodeIndex);
 }
 
 void CodeBlock::validate()
 {
     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
     
-    FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
+    FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeIndex(this, BytecodeIndex(0));
     
     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
         beginValidationDidFail();
@@ -3025,7 +3025,7 @@ void CodeBlock::validate()
     const InstructionStream& instructionStream = instructions();
     for (const auto& instruction : instructionStream) {
         OpcodeID opcode = instruction->opcodeID();
-        if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
+        if (!!baselineAlternative()->handlerForBytecodeIndex(BytecodeIndex(instruction.offset()))) {
             if (opcode == op_catch || opcode == op_enter) {
                 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
                 // inside of a try block because they are responsible for bootstrapping state. And they
@@ -3083,9 +3083,9 @@ const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
     return instructions().at(offset + target).ptr();
 }
 
-ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
+ArithProfile* CodeBlock::arithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
+    return arithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr());
 }
 
 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
@@ -3108,11 +3108,11 @@ ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
     return nullptr;
 }
 
-bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
+bool CodeBlock::couldTakeSpecialArithFastCase(BytecodeIndex bytecodeIndex)
 {
     if (!hasBaselineJITProfiling())
         return false;
-    ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
+    ArithProfile* profile = arithProfileForBytecodeIndex(bytecodeIndex);
     if (!profile)
         return false;
     return profile->tookSpecialFastPath();
@@ -3231,28 +3231,28 @@ Optional<CodeOrigin> CodeBlock::findPC(void* pc)
 }
 #endif // ENABLE(JIT)
 
-Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+Optional<BytecodeIndex> CodeBlock::bytecodeIndexFromCallSiteIndex(CallSiteIndex callSiteIndex)
 {
-    Optional<unsigned> bytecodeOffset;
+    Optional<BytecodeIndex> bytecodeIndex;
     JITType jitType = this->jitType();
     if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
 #if USE(JSVALUE64)
-        bytecodeOffset = callSiteIndex.bits();
+        bytecodeIndex = callSiteIndex.bytecodeIndex();
 #else
         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
-        bytecodeOffset = this->bytecodeOffset(instruction);
+        bytecodeIndex = this->bytecodeIndex(instruction);
 #endif
     } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
 #if ENABLE(DFG_JIT)
         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
         CodeOrigin origin = codeOrigin(callSiteIndex);
-        bytecodeOffset = origin.bytecodeIndex();
+        bytecodeIndex = origin.bytecodeIndex();
 #else
         RELEASE_ASSERT_NOT_REACHED();
 #endif
     }
 
-    return bytecodeOffset;
+    return bytecodeIndex;
 }
 
 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
index d200fd5..996a87d 100644 (file)
@@ -239,15 +239,15 @@ public:
         return index >= m_numVars;
     }
 
-    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    HandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler);
     HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
     void removeExceptionHandlerForCallSite(DisposableCallSiteIndex);
-    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
-    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
-    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+    unsigned lineNumberForBytecodeIndex(BytecodeIndex);
+    unsigned columnNumberForBytecodeIndex(BytecodeIndex);
+    void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot,
         int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
 
-    Optional<unsigned> bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
+    Optional<BytecodeIndex> bytecodeIndexFromCallSiteIndex(CallSiteIndex);
 
     void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
     void getICStatusMap(ICStatusMap& result);
@@ -309,7 +309,7 @@ public:
     // This is a slow function call used primarily for compiling OSR exits in the case
     // that there had been inlining. Chances are if you want to use this, you're really
     // looking for a CallLinkInfoMap to amortize the cost of calling this.
-    CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+    CallLinkInfo* getCallLinkInfoForBytecodeIndex(BytecodeIndex);
     
     void setJITCodeMap(JITCodeMap&& jitCodeMap)
     {
@@ -328,25 +328,25 @@ public:
     void setCalleeSaveRegisters(RegisterSet);
     void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
 
-    RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
-    RareCaseProfile* rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
-    unsigned rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
+    RareCaseProfile* addRareCaseProfile(BytecodeIndex);
+    RareCaseProfile* rareCaseProfileForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
+    unsigned rareCaseProfileCountForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
 
-    bool likelyToTakeSlowCase(int bytecodeOffset)
+    bool likelyToTakeSlowCase(BytecodeIndex bytecodeIndex)
     {
         if (!hasBaselineJITProfiling())
             return false;
         ConcurrentJSLocker locker(m_lock);
-        unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
+        unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex);
         return value >= Options::likelyToTakeSlowCaseMinimumCount();
     }
 
-    bool couldTakeSlowCase(int bytecodeOffset)
+    bool couldTakeSlowCase(BytecodeIndex bytecodeIndex)
     {
         if (!hasBaselineJITProfiling())
             return false;
         ConcurrentJSLocker locker(m_lock);
-        unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
+        unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex);
         return value >= Options::couldTakeSlowCaseMinimumCount();
     }
 
@@ -382,6 +382,11 @@ public:
         return returnAddress - instructionsBegin;
     }
 
+    inline BytecodeIndex bytecodeIndex(const Instruction* returnAddress)
+    {
+        return BytecodeIndex(bytecodeOffset(returnAddress));
+    }
+
     const InstructionStream& instructions() const { return m_unlinkedCode->instructions(); }
 
     size_t predictedMachineCodeSize();
@@ -483,8 +488,8 @@ public:
         return result;
     }
 
-    ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset);
-    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
+    ValueProfile& valueProfileForBytecodeIndex(BytecodeIndex);
+    SpeculatedType valueProfilePredictionForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
 
     template<typename Functor> void forEachValueProfile(const Functor&);
     template<typename Functor> void forEachArrayProfile(const Functor&);
@@ -492,13 +497,13 @@ public:
     template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
     template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
 
-    ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
+    ArithProfile* arithProfileForBytecodeIndex(BytecodeIndex);
     ArithProfile* arithProfileForPC(const Instruction*);
 
-    bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset);
+    bool couldTakeSpecialArithFastCase(BytecodeIndex bytecodeOffset);
 
-    ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
-    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, BytecodeIndex);
+    ArrayProfile* getArrayProfile(BytecodeIndex);
 
     // Exception handling support
 
@@ -876,7 +881,7 @@ public:
 
     DisposableCallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
 
-    void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
+    void ensureCatchLivenessIsComputedForBytecodeIndex(BytecodeIndex);
 
     bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }
 
@@ -943,7 +948,7 @@ private:
 
     unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; }
     unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); }
-    ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset);
+    ValueProfile* tryGetValueProfileForBytecodeIndex(BytecodeIndex);
 
     Seconds timeSinceCreation()
     {
@@ -960,7 +965,7 @@ private:
     }
 
     void insertBasicBlockBoundariesForControlFlowProfiler();
-    void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset);
+    void ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch&, BytecodeIndex);
 
     int m_numCalleeLocals;
     int m_numVars;
index 2de8048..83fbb90 100644 (file)
@@ -91,7 +91,7 @@ unsigned CodeOrigin::approximateHash(InlineCallFrame* terminal) const
     unsigned result = 2;
     CodeOrigin codeOrigin = *this;
     for (;;) {
-        result += codeOrigin.bytecodeIndex();
+        result += codeOrigin.bytecodeIndex().asBits();
 
         auto* inlineCallFrame = codeOrigin.inlineCallFrame();
 
@@ -152,7 +152,7 @@ void CodeOrigin::dump(PrintStream& out) const
                 out.print("(closure) ");
         }
         
-        out.print("bc#", stack[i].bytecodeIndex());
+        out.print(stack[i].bytecodeIndex());
     }
 }
 
index d5f30c2..3414d4f 100644 (file)
@@ -25,6 +25,8 @@
 
 #pragma once
 
+#include "BytecodeIndex.h"
+
 #include <limits.h>
 #include <wtf/HashMap.h>
 #include <wtf/PrintStream.h>
@@ -41,25 +43,23 @@ class CodeOrigin {
 public:
     CodeOrigin()
 #if CPU(ADDRESS64)
-        : m_compositeValue(buildCompositeValue(nullptr, s_invalidBytecodeIndex))
+        : m_compositeValue(buildCompositeValue(nullptr, BytecodeIndex()))
 #else
-        : m_bytecodeIndex(s_invalidBytecodeIndex)
-        , m_inlineCallFrame(nullptr)
+        : m_inlineCallFrame(nullptr)
 #endif
     {
     }
-    
+
     CodeOrigin(WTF::HashTableDeletedValueType)
 #if CPU(ADDRESS64)
-        : m_compositeValue(buildCompositeValue(deletedMarker(), s_invalidBytecodeIndex))
+        : m_compositeValue(buildCompositeValue(deletedMarker(), BytecodeIndex()))
 #else
-        : m_bytecodeIndex(s_invalidBytecodeIndex)
-        , m_inlineCallFrame(deletedMarker())
+        : m_inlineCallFrame(deletedMarker())
 #endif
     {
     }
     
-    explicit CodeOrigin(unsigned bytecodeIndex, InlineCallFrame* inlineCallFrame = nullptr)
+    explicit CodeOrigin(BytecodeIndex bytecodeIndex, InlineCallFrame* inlineCallFrame = nullptr)
 #if CPU(ADDRESS64)
         : m_compositeValue(buildCompositeValue(inlineCallFrame, bytecodeIndex))
 #else
@@ -67,7 +67,7 @@ public:
         , m_inlineCallFrame(inlineCallFrame)
 #endif
     {
-        ASSERT(bytecodeIndex < s_invalidBytecodeIndex);
+        ASSERT(!!bytecodeIndex);
 #if CPU(ADDRESS64)
         ASSERT(!(bitwise_cast<uintptr_t>(inlineCallFrame) & ~s_maskCompositeValueForPointer));
 #endif
@@ -124,7 +124,7 @@ public:
 #if CPU(ADDRESS64)
         return !(m_compositeValue & s_maskIsBytecodeIndexInvalid);
 #else
-        return m_bytecodeIndex != s_invalidBytecodeIndex;
+        return !!m_bytecodeIndex;
 #endif
     }
     explicit operator bool() const { return isSet(); }
@@ -134,7 +134,7 @@ public:
 #if CPU(ADDRESS64)
         return !isSet() && (m_compositeValue & s_maskCompositeValueForPointer);
 #else
-        return m_bytecodeIndex == s_invalidBytecodeIndex && !!m_inlineCallFrame;
+        return !!m_bytecodeIndex && !!m_inlineCallFrame;
 #endif
     }
     
@@ -167,14 +167,14 @@ public:
     JS_EXPORT_PRIVATE void dump(PrintStream&) const;
     void dumpInContext(PrintStream&, DumpContext*) const;
 
-    unsigned bytecodeIndex() const
+    BytecodeIndex bytecodeIndex() const
     {
 #if CPU(ADDRESS64)
         if (!isSet())
-            return s_invalidBytecodeIndex;
+            return BytecodeIndex();
         if (UNLIKELY(isOutOfLine()))
             return outOfLineCodeOrigin()->bytecodeIndex;
-        return m_compositeValue >> (64 - s_freeBitsAtTop);
+        return BytecodeIndex::fromBits(m_compositeValue >> (64 - s_freeBitsAtTop));
 #else
         return m_bytecodeIndex;
 #endif
@@ -192,8 +192,6 @@ public:
     }
 
 private:
-    static constexpr unsigned s_invalidBytecodeIndex = UINT_MAX;
-
 #if CPU(ADDRESS64)
     static constexpr uintptr_t s_maskIsOutOfLine = 1;
     static constexpr uintptr_t s_maskIsBytecodeIndexInvalid = 2;
@@ -202,9 +200,9 @@ private:
         WTF_MAKE_FAST_ALLOCATED;
     public:
         InlineCallFrame* inlineCallFrame;
-        unsigned bytecodeIndex;
+        BytecodeIndex bytecodeIndex;
         
-        OutOfLineCodeOrigin(InlineCallFrame* inlineCallFrame, unsigned bytecodeIndex)
+        OutOfLineCodeOrigin(InlineCallFrame* inlineCallFrame, BytecodeIndex bytecodeIndex)
             : inlineCallFrame(inlineCallFrame)
             , bytecodeIndex(bytecodeIndex)
         {
@@ -235,17 +233,17 @@ private:
 #if CPU(ADDRESS64)
     static constexpr unsigned s_freeBitsAtTop = 64 - WTF_CPU_EFFECTIVE_ADDRESS_WIDTH;
     static constexpr uintptr_t s_maskCompositeValueForPointer = ((1ULL << WTF_CPU_EFFECTIVE_ADDRESS_WIDTH) - 1) & ~(8ULL - 1);
-    static uintptr_t buildCompositeValue(InlineCallFrame* inlineCallFrame, unsigned bytecodeIndex)
+    static uintptr_t buildCompositeValue(InlineCallFrame* inlineCallFrame, BytecodeIndex bytecodeIndex)
     {
-        if (bytecodeIndex == s_invalidBytecodeIndex)
+        if (!bytecodeIndex)
             return bitwise_cast<uintptr_t>(inlineCallFrame) | s_maskIsBytecodeIndexInvalid;
 
-        if (UNLIKELY(bytecodeIndex >= 1 << s_freeBitsAtTop)) {
+        if (UNLIKELY(bytecodeIndex.asBits() >= 1 << s_freeBitsAtTop)) {
             auto* outOfLine = new OutOfLineCodeOrigin(inlineCallFrame, bytecodeIndex);
             return bitwise_cast<uintptr_t>(outOfLine) | s_maskIsOutOfLine;
         }
 
-        uintptr_t encodedBytecodeIndex = static_cast<uintptr_t>(bytecodeIndex) << (64 - s_freeBitsAtTop);
+        uintptr_t encodedBytecodeIndex = static_cast<uintptr_t>(bytecodeIndex.asBits()) << (64 - s_freeBitsAtTop);
         ASSERT(!(encodedBytecodeIndex & bitwise_cast<uintptr_t>(inlineCallFrame)));
         return encodedBytecodeIndex | bitwise_cast<uintptr_t>(inlineCallFrame);
     }
@@ -258,14 +256,14 @@ private:
     // Finally the last s_freeBitsAtTop are the bytecodeIndex if it is inline
     uintptr_t m_compositeValue;
 #else
-    unsigned m_bytecodeIndex;
+    BytecodeIndex m_bytecodeIndex;
     InlineCallFrame* m_inlineCallFrame;
 #endif
 };
 
 inline unsigned CodeOrigin::hash() const
 {
-    return WTF::IntHash<unsigned>::hash(bytecodeIndex()) +
+    return WTF::IntHash<unsigned>::hash(bytecodeIndex().asBits()) +
         WTF::PtrHash<InlineCallFrame*>::hash(inlineCallFrame());
 }
 
index b1a01ff..a82f92d 100644 (file)
@@ -35,7 +35,7 @@ namespace JSC { namespace DFG {
 
 void FrequentExitSite::dump(PrintStream& out) const
 {
-    out.print("bc#", m_bytecodeOffset, ": ", m_kind, "/", m_jitType, "/", m_inlineKind);
+    out.print(m_bytecodeIndex, ": ", m_kind, "/", m_jitType, "/", m_inlineKind);
 }
 
 ExitProfile::ExitProfile() { }
@@ -75,7 +75,7 @@ bool ExitProfile::add(CodeBlock* owner, const FrequentExitSite& site)
     return true;
 }
 
-Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex)
+Vector<FrequentExitSite> ExitProfile::exitSitesFor(BytecodeIndex bytecodeIndex)
 {
     Vector<FrequentExitSite> result;
     
@@ -83,7 +83,7 @@ Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex)
         return result;
     
     for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
-        if (m_frequentExitSites->at(i).bytecodeOffset() == bytecodeIndex)
+        if (m_frequentExitSites->at(i).bytecodeIndex() == bytecodeIndex)
             result.append(m_frequentExitSites->at(i));
     }
     
index 7f6019f..f8077bc 100644 (file)
@@ -39,7 +39,7 @@ namespace JSC { namespace DFG {
 class FrequentExitSite {
 public:
     FrequentExitSite()
-        : m_bytecodeOffset(0) // 0 = empty value
+        : m_bytecodeIndex(BytecodeIndex(0))
         , m_kind(ExitKindUnset)
         , m_jitType(ExitFromAnything)
         , m_inlineKind(ExitFromAnyInlineKind)
@@ -47,15 +47,15 @@ public:
     }
     
     FrequentExitSite(WTF::HashTableDeletedValueType)
-        : m_bytecodeOffset(1) // 1 = deleted value
+        : m_bytecodeIndex(WTF::HashTableDeletedValue)
         , m_kind(ExitKindUnset)
         , m_jitType(ExitFromAnything)
         , m_inlineKind(ExitFromAnyInlineKind)
     {
     }
     
-    explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything, ExitingInlineKind inlineKind = ExitFromAnyInlineKind)
-        : m_bytecodeOffset(bytecodeOffset)
+    explicit FrequentExitSite(BytecodeIndex bytecodeIndex, ExitKind kind, ExitingJITType jitType = ExitFromAnything, ExitingInlineKind inlineKind = ExitFromAnyInlineKind)
+        : m_bytecodeIndex(bytecodeIndex)
         , m_kind(kind)
         , m_jitType(jitType)
         , m_inlineKind(inlineKind)
@@ -63,14 +63,14 @@ public:
         if (m_kind == ArgumentsEscaped) {
             // Count this one globally. It doesn't matter where in the code block the arguments excaped;
             // the fact that they did is not associated with any particular instruction.
-            m_bytecodeOffset = 0;
+            m_bytecodeIndex = BytecodeIndex(0);
         }
     }
     
     // Use this constructor if you wish for the exit site to be counted globally within its
     // code block.
     explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything, ExitingInlineKind inlineKind = ExitFromAnyInlineKind)
-        : m_bytecodeOffset(0)
+        : m_bytecodeIndex(BytecodeIndex(0))
         , m_kind(kind)
         , m_jitType(jitType)
         , m_inlineKind(inlineKind)
@@ -84,7 +84,7 @@ public:
     
     bool operator==(const FrequentExitSite& other) const
     {
-        return m_bytecodeOffset == other.m_bytecodeOffset
+        return m_bytecodeIndex == other.m_bytecodeIndex
             && m_kind == other.m_kind
             && m_jitType == other.m_jitType
             && m_inlineKind == other.m_inlineKind;
@@ -92,7 +92,7 @@ public:
     
     bool subsumes(const FrequentExitSite& other) const
     {
-        if (m_bytecodeOffset != other.m_bytecodeOffset)
+        if (m_bytecodeIndex != other.m_bytecodeIndex)
             return false;
         if (m_kind != other.m_kind)
             return false;
@@ -107,10 +107,10 @@ public:
     
     unsigned hash() const
     {
-        return WTF::intHash(m_bytecodeOffset) + m_kind + static_cast<unsigned>(m_jitType) * 7 + static_cast<unsigned>(m_inlineKind) * 11;
+        return m_bytecodeIndex.hash() + m_kind + static_cast<unsigned>(m_jitType) * 7 + static_cast<unsigned>(m_inlineKind) * 11;
     }
     
-    unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+    BytecodeIndex bytecodeIndex() const { return m_bytecodeIndex; }
     ExitKind kind() const { return m_kind; }
     ExitingJITType jitType() const { return m_jitType; }
     ExitingInlineKind inlineKind() const { return m_inlineKind; }
@@ -131,13 +131,13 @@ public:
 
     bool isHashTableDeletedValue() const
     {
-        return m_kind == ExitKindUnset && m_bytecodeOffset;
+        return m_kind == ExitKindUnset && m_bytecodeIndex.isHashTableDeletedValue();
     }
     
     void dump(PrintStream& out) const;
 
 private:
-    unsigned m_bytecodeOffset;
+    BytecodeIndex m_bytecodeIndex;
     ExitKind m_kind;
     ExitingJITType m_jitType;
     ExitingInlineKind m_inlineKind;
@@ -183,7 +183,7 @@ public:
     
     // Get the frequent exit sites for a bytecode index. This is O(n), and is
     // meant to only be used from debugging/profiling code.
-    Vector<FrequentExitSite> exitSitesFor(unsigned bytecodeIndex);
+    Vector<FrequentExitSite> exitSitesFor(BytecodeIndex);
     
     // This is O(n) and should be called on less-frequently executed code paths
     // in the compiler. It should be strictly cheaper than building a
@@ -222,7 +222,7 @@ public:
         return hasExitSite(FrequentExitSite(kind));
     }
     
-    bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const
+    bool hasExitSite(BytecodeIndex bytecodeIndex, ExitKind kind) const
     {
         return hasExitSite(FrequentExitSite(bytecodeIndex, kind));
     }
index aba8494..bf22102 100644 (file)
@@ -38,7 +38,7 @@ DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock)
 {
 }
 
-DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITType rootJITType, unsigned callerBytecodeIndex)
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITType rootJITType, BytecodeIndex callerBytecodeIndex)
     : m_codeBlock(codeBlock->vm(), codeBlock)
     , m_rootCodeBlock(codeBlock->vm(), rootCodeBlock)
     , m_rootJITType(rootJITType)
@@ -56,7 +56,7 @@ void DeferredSourceDump::dump()
     dataLog(*m_codeBlock);
 
     if (isInlinedFrame)
-        dataLog(" at ", CodeBlockWithJITType(*m_rootCodeBlock, m_rootJITType), " ", "bc#", m_callerBytecodeIndex);
+        dataLog(" at ", CodeBlockWithJITType(*m_rootCodeBlock, m_rootJITType), " ", m_callerBytecodeIndex);
 
     dataLog("\n'''");
     m_codeBlock->dumpSource();
index b5a7bf2..e8d2ca8 100644 (file)
@@ -36,7 +36,7 @@ class DeferredSourceDump {
     WTF_MAKE_FAST_ALLOCATED;
 public:
     DeferredSourceDump(CodeBlock*);
-    DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITType rootJITType, unsigned callerBytecodeIndex);
+    DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITType rootJITType, BytecodeIndex callerBytecodeIndex);
 
     void dump();
 
@@ -44,7 +44,7 @@ private:
     Strong<CodeBlock> m_codeBlock;
     Strong<CodeBlock> m_rootCodeBlock;
     JITType m_rootJITType;
-    unsigned m_callerBytecodeIndex { UINT_MAX };
+    BytecodeIndex m_callerBytecodeIndex;
 };
 
 } // namespace JSC
index 073ce27..9a88437 100644 (file)
@@ -31,17 +31,18 @@ namespace JSC {
 
 class BytecodeLivenessAnalysis;
 
-typedef HashMap<unsigned, FastBitVector, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeToBitmapMap;
+typedef HashMap<BytecodeIndex, FastBitVector> BytecodeToBitmapMap;
 
 class FullBytecodeLiveness {
     WTF_MAKE_FAST_ALLOCATED;
 public:
-    const FastBitVector& getLiveness(unsigned bytecodeIndex) const
+    const FastBitVector& getLiveness(BytecodeIndex bytecodeIndex) const
     {
-        return m_map[bytecodeIndex];
+        // FIXME: What should this do when we have checkpoints?
+        return m_map[bytecodeIndex.offset()];
     }
     
-    bool operandIsLive(int operand, unsigned bytecodeIndex) const
+    bool operandIsLive(int operand, BytecodeIndex bytecodeIndex) const
     {
         return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand);
     }
index beeb0ed..622b3e6 100644 (file)
@@ -52,11 +52,11 @@ bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
     return appendICStatusVariant(m_variants, variant);
 }
 
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid)
 {
     VM& vm = profiledBlock->vm();
     
-    auto instruction = profiledBlock->instructions().at(bytecodeIndex);
+    auto instruction = profiledBlock->instructions().at(bytecodeIndex.offset());
 
     StructureID structureID;
     switch (instruction->opcodeID()) {
@@ -102,7 +102,7 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
     return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
 }
 
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData)
 {
     ConcurrentJSLocker locker(profiledBlock->m_lock);
 
@@ -130,7 +130,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& m
 #if ENABLE(DFG_JIT)
 GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    unsigned bytecodeIndex = codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeIndex = codeOrigin.bytecodeIndex();
     GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
         locker, profiledBlock, stubInfo, uid,
         CallLinkStatus::computeExitSiteData(profiledBlock, bytecodeIndex));
@@ -303,7 +303,7 @@ GetByIdStatus GetByIdStatus::computeFor(
     CodeBlock* profiledBlock, ICStatusMap& baselineMap,
     ICStatusContextStack& icContextStack, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    unsigned bytecodeIndex = codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeIndex = codeOrigin.bytecodeIndex();
     CallLinkStatus::ExitSiteData callExitSiteData = CallLinkStatus::computeExitSiteData(profiledBlock, bytecodeIndex);
     ExitFlag didExit = hasBadCacheExitSite(profiledBlock, bytecodeIndex);
     
index 383d586..1cc0e22 100644 (file)
@@ -103,7 +103,7 @@ public:
         m_variants.append(variant);
     }
     
-    static GetByIdStatus computeFor(CodeBlock*, ICStatusMap&, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag, CallLinkStatus::ExitSiteData);
+    static GetByIdStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, UniquedStringImpl* uid, ExitFlag, CallLinkStatus::ExitSiteData);
     static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid);
     
     static GetByIdStatus computeFor(CodeBlock* baselineBlock, ICStatusMap& baselineMap, ICStatusContextStack& dfgContextStack, CodeOrigin, UniquedStringImpl* uid);
@@ -153,7 +153,7 @@ private:
         const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*,
         UniquedStringImpl* uid, CallLinkStatus::ExitSiteData);
 #endif
-    static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static GetByIdStatus computeFromLLInt(CodeBlock*, BytecodeIndex, UniquedStringImpl* uid);
     
     bool appendVariant(const GetByIdVariant&);
     
index add2eb0..8beeaba 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace JSC {
 
-ExitFlag hasBadCacheExitSite(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+ExitFlag hasBadCacheExitSite(CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex)
 {
 #if ENABLE(DFG_JIT)
     UnlinkedCodeBlock* unlinkedCodeBlock = profiledBlock->unlinkedCodeBlock();
index 077cbe1..9820ad9 100644 (file)
@@ -71,7 +71,7 @@ void filterICStatusVariants(VariantVectorType& variants, const StructureSet& set
         });
 }
 
-ExitFlag hasBadCacheExitSite(CodeBlock* profiledBlock, unsigned bytecodeIndex);
+ExitFlag hasBadCacheExitSite(CodeBlock* profiledBlock, BytecodeIndex);
 
 } // namespace JSC
 
index b4588b5..9237053 100644 (file)
@@ -43,7 +43,7 @@ bool InByIdStatus::appendVariant(const InByIdVariant& variant)
 }
 
 #if ENABLE(JIT)
-InByIdStatus InByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit)
+InByIdStatus InByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit)
 {
     ConcurrentJSLocker locker(profiledBlock->m_lock);
 
@@ -64,7 +64,7 @@ InByIdStatus InByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map
     return result;
 }
 
-InByIdStatus InByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
+InByIdStatus InByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid)
 {
     return computeFor(profiledBlock, map, bytecodeIndex, uid, hasBadCacheExitSite(profiledBlock, bytecodeIndex));
 }
@@ -73,7 +73,7 @@ InByIdStatus InByIdStatus::computeFor(
     CodeBlock* profiledBlock, ICStatusMap& baselineMap,
     ICStatusContextStack& contextStack, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    unsigned bytecodeIndex = codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeIndex = codeOrigin.bytecodeIndex();
     ExitFlag didExit = hasBadCacheExitSite(profiledBlock, bytecodeIndex);
     
     for (ICStatusContext* context : contextStack) {
index 8aeb760..eb65aea 100644 (file)
@@ -80,8 +80,8 @@ public:
         RELEASE_ASSERT_NOT_REACHED();
     }
     
-    static InByIdStatus computeFor(CodeBlock*, ICStatusMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
-    static InByIdStatus computeFor(CodeBlock*, ICStatusMap&, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag);
+    static InByIdStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, UniquedStringImpl* uid);
+    static InByIdStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, UniquedStringImpl* uid, ExitFlag);
     static InByIdStatus computeFor(CodeBlock* baselineBlock, ICStatusMap& baselineMap, ICStatusContextStack& contextStack, CodeOrigin, UniquedStringImpl* uid);
 
 #if ENABLE(DFG_JIT)
index e5361b6..3e3e65f 100644 (file)
@@ -69,7 +69,7 @@ void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) cons
     out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
     if (isStrictMode())
         out.print(" (StrictMode)");
-    out.print(", bc#", directCaller.bytecodeIndex(), ", ", static_cast<Kind>(kind));
+    out.print(", ", directCaller.bytecodeIndex(), ", ", static_cast<Kind>(kind));
     if (isClosureCall)
         out.print(", closure call");
     else
index d3ba9c9..ed00e69 100644 (file)
@@ -40,7 +40,7 @@ void InstanceOfStatus::appendVariant(const InstanceOfVariant& variant)
 }
 
 InstanceOfStatus InstanceOfStatus::computeFor(
-    CodeBlock* codeBlock, ICStatusMap& infoMap, unsigned bytecodeIndex)
+    CodeBlock* codeBlock, ICStatusMap& infoMap, BytecodeIndex bytecodeIndex)
 {
     ConcurrentJSLocker locker(codeBlock->m_lock);
     
index f0e20d9..6933260 100644 (file)
@@ -79,7 +79,7 @@ public:
         RELEASE_ASSERT_NOT_REACHED();
     }
     
-    static InstanceOfStatus computeFor(CodeBlock*, ICStatusMap&, unsigned bytecodeIndex);
+    static InstanceOfStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex);
     
 #if ENABLE(DFG_JIT)
     static InstanceOfStatus computeForStubInfo(const ConcurrentJSLocker&, StructureStubInfo*);
index 99b5a5a..5a6eca6 100644 (file)
@@ -77,10 +77,8 @@ private:
             return BaseRef { m_instructions, m_index + ptr()->size() };
         }
 
-        inline Offset offset() const
-        {
-            return m_index;
-        }
+        inline Offset offset() const { return m_index; }
+        inline BytecodeIndex index() const { return BytecodeIndex(offset()); }
 
         bool isValid() const
         {
@@ -152,6 +150,7 @@ public:
         return iterator { m_instructions, m_instructions.size() };
     }
 
+    inline const Ref at(BytecodeIndex index) const { return at(index.offset()); }
     inline const Ref at(Offset offset) const
     {
         ASSERT(offset < m_instructions.size());
index 01a3ee7..bfb94b3 100644 (file)
@@ -39,19 +39,18 @@ class ScriptExecutable;
 class LazyOperandValueProfileKey {
 public:
     LazyOperandValueProfileKey()
-        : m_bytecodeOffset(0) // 0 = empty value
-        , m_operand(VirtualRegister()) // not a valid operand index in our current scheme
+        : m_operand(VirtualRegister()) // not a valid operand index in our current scheme
     {
     }
     
     LazyOperandValueProfileKey(WTF::HashTableDeletedValueType)
-        : m_bytecodeOffset(1) // 1 = deleted value
+        : m_bytecodeIndex(WTF::HashTableDeletedValue)
         , m_operand(VirtualRegister()) // not a valid operand index in our current scheme
     {
     }
     
-    LazyOperandValueProfileKey(unsigned bytecodeOffset, VirtualRegister operand)
-        : m_bytecodeOffset(bytecodeOffset)
+    LazyOperandValueProfileKey(BytecodeIndex bytecodeIndex, VirtualRegister operand)
+        : m_bytecodeIndex(bytecodeIndex)
         , m_operand(operand)
     {
         ASSERT(m_operand.isValid());
@@ -64,19 +63,19 @@ public:
     
     bool operator==(const LazyOperandValueProfileKey& other) const
     {
-        return m_bytecodeOffset == other.m_bytecodeOffset
+        return m_bytecodeIndex == other.m_bytecodeIndex
             && m_operand == other.m_operand;
     }
     
     unsigned hash() const
     {
-        return WTF::intHash(m_bytecodeOffset) + m_operand.offset();
+        return m_bytecodeIndex.hash() + m_operand.offset();
     }
     
-    unsigned bytecodeOffset() const
+    BytecodeIndex bytecodeIndex() const
     {
         ASSERT(!!*this);
-        return m_bytecodeOffset;
+        return m_bytecodeIndex;
     }
 
     VirtualRegister operand() const
@@ -87,10 +86,10 @@ public:
     
     bool isHashTableDeletedValue() const
     {
-        return !m_operand.isValid() && m_bytecodeOffset;
+        return !m_operand.isValid() && m_bytecodeIndex.isHashTableDeletedValue();
     }
 private: 
-    unsigned m_bytecodeOffset;
+    BytecodeIndex m_bytecodeIndex;
     VirtualRegister m_operand;
 };
 
index 7e3cc94..7104d01 100644 (file)
@@ -41,7 +41,7 @@ MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
     MethodOfGettingAValueProfile result;
     result.m_kind = LazyOperand;
     result.u.lazyOperand.codeBlock = codeBlock;
-    result.u.lazyOperand.bytecodeOffset = key.bytecodeOffset();
+    result.u.lazyOperand.bytecodeOffset = key.bytecodeIndex();
     result.u.lazyOperand.operand = key.operand().offset();
     return result;
 }
index b5e84b1..cfb8347 100644 (file)
@@ -31,6 +31,7 @@
 // these #if's will disappear...
 #if ENABLE(DFG_JIT)
 
+#include "BytecodeIndex.h"
 #include "GPRInfo.h"
 
 namespace JSC {
@@ -83,12 +84,16 @@ private:
     };
     
     Kind m_kind;
-    union {
+    union Data {
+        Data()
+            : profile(nullptr)
+        { }
+
         ValueProfile* profile;
         ArithProfile* arithProfile;
         struct {
             CodeBlock* codeBlock;
-            unsigned bytecodeOffset;
+            BytecodeIndex bytecodeOffset;
             int operand;
         } lazyOperand;
     } u;
index efde373..3250be5 100644 (file)
@@ -47,11 +47,11 @@ bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
     return appendICStatusVariant(m_variants, variant);
 }
 
-PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid)
 {
     VM& vm = profiledBlock->vm();
     
-    auto instruction = profiledBlock->instructions().at(bytecodeIndex);
+    auto instruction = profiledBlock->instructions().at(bytecodeIndex.offset());
     auto bytecode = instruction->as<OpPutById>();
     auto& metadata = bytecode.metadata(profiledBlock);
 
@@ -92,7 +92,7 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
 }
 
 #if ENABLE(JIT)
-PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData)
 {
     ConcurrentJSLocker locker(profiledBlock->m_lock);
     
@@ -237,7 +237,7 @@ PutByIdStatus PutByIdStatus::computeForStubInfo(
 
 PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, ICStatusMap& baselineMap, ICStatusContextStack& contextStack, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    unsigned bytecodeIndex = codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeIndex = codeOrigin.bytecodeIndex();
     CallLinkStatus::ExitSiteData callExitSiteData = CallLinkStatus::computeExitSiteData(baselineBlock, bytecodeIndex);
     ExitFlag didExit = hasBadCacheExitSite(baselineBlock, bytecodeIndex);
 
index 92b1b1a..b0dd9f1 100644 (file)
@@ -93,7 +93,7 @@ public:
         m_variants.append(variant);
     }
     
-    static PutByIdStatus computeFor(CodeBlock*, ICStatusMap&, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag, CallLinkStatus::ExitSiteData);
+    static PutByIdStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, UniquedStringImpl* uid, ExitFlag, CallLinkStatus::ExitSiteData);
     static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect);
     
     static PutByIdStatus computeFor(CodeBlock* baselineBlock, ICStatusMap& baselineMap, ICStatusContextStack& contextStack, CodeOrigin, UniquedStringImpl* uid);
@@ -131,7 +131,7 @@ private:
         const ConcurrentJSLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid,
         CallLinkStatus::ExitSiteData);
 #endif
-    static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static PutByIdStatus computeFromLLInt(CodeBlock*, BytecodeIndex, UniquedStringImpl* uid);
     
     bool appendVariant(const PutByIdVariant&);
     
index 58264b1..d7b3b64 100644 (file)
@@ -40,8 +40,7 @@ static constexpr bool verbose = false;
 }
 
 StructureStubInfo::StructureStubInfo(AccessType accessType)
-    : callSiteIndex(UINT_MAX)
-    , accessType(accessType)
+    : accessType(accessType)
     , cacheType(CacheType::Unset)
     , countdown(1) // For a totally clear stub, we'll patch it after the first execution.
     , repatchCount(0)
index b2716bf..234fa57 100644 (file)
@@ -111,15 +111,15 @@ size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell, VM& vm)
     return Base::estimatedSize(cell, vm) + extraSize;
 }
 
-int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+int UnlinkedCodeBlock::lineNumberForBytecodeIndex(BytecodeIndex bytecodeIndex)
 {
-    ASSERT(bytecodeOffset < instructions().size());
+    ASSERT(bytecodeIndex.offset() < instructions().size());
     int divot { 0 };
     int startOffset { 0 };
     int endOffset { 0 };
     unsigned line { 0 };
     unsigned column { 0 };
-    expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column);
     return line;
 }
 
@@ -179,10 +179,10 @@ void UnlinkedCodeBlock::dumpExpressionRangeInfo()
 }
 #endif
 
-void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset,
+void UnlinkedCodeBlock::expressionRangeForBytecodeIndex(BytecodeIndex bytecodeIndex,
     int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
-    ASSERT(bytecodeOffset < instructions().size());
+    ASSERT(bytecodeIndex.offset() < instructions().size());
 
     if (!m_expressionInfo.size()) {
         startOffset = 0;
@@ -199,7 +199,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
     int high = expressionInfo.size();
     while (low < high) {
         int mid = low + (high - low) / 2;
-        if (expressionInfo[mid].instructionOffset <= bytecodeOffset)
+        if (expressionInfo[mid].instructionOffset <= bytecodeIndex.offset())
             low = mid + 1;
         else
             high = mid;
@@ -325,9 +325,9 @@ const InstructionStream& UnlinkedCodeBlock::instructions() const
     return *m_instructions;
 }
 
-UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeIndex(BytecodeIndex bytecodeIndex, RequiredHandler requiredHandler)
 {
-    return handlerForIndex(bytecodeOffset, requiredHandler);
+    return handlerForIndex(bytecodeIndex.offset(), requiredHandler);
 }
 
 UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
index 0002c3e..1ba907f 100644 (file)
@@ -217,7 +217,7 @@ public:
     unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
     unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
 
-    UnlinkedHandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    UnlinkedHandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler);
     UnlinkedHandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
 
     bool isBuiltinFunction() const { return m_isBuiltinFunction; }
@@ -279,9 +279,9 @@ public:
 
     bool hasRareData() const { return m_rareData.get(); }
 
-    int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+    int lineNumberForBytecodeIndex(BytecodeIndex);
 
-    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+    void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot,
         int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
 
     bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot);
index 05b09be..bbb8a4b 100644 (file)
@@ -161,19 +161,19 @@ struct ValueProfile : public ValueProfileWithLogNumberOfBuckets<0> {
 // This is a mini value profile to catch pathologies. It is a counter that gets
 // incremented when we take the slow path on any instruction.
 struct RareCaseProfile {
-    RareCaseProfile(int bytecodeOffset)
-        : m_bytecodeOffset(bytecodeOffset)
+    RareCaseProfile(BytecodeIndex bytecodeIndex)
+        : m_bytecodeIndex(bytecodeIndex)
         , m_counter(0)
     {
     }
     
-    int m_bytecodeOffset;
+    BytecodeIndex m_bytecodeIndex;
     uint32_t m_counter;
 };
 
-inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
+inline BytecodeIndex getRareCaseProfileBytecodeIndex(RareCaseProfile* rareCaseProfile)
 {
-    return rareCaseProfile->m_bytecodeOffset;
+    return rareCaseProfile->m_bytecodeIndex;
 }
 
 struct ValueProfileAndOperand : public ValueProfile {
index e7914aa..154bd3c 100644 (file)
@@ -5049,7 +5049,7 @@ void ForInContext::finalize(BytecodeGenerator& generator, UnlinkedCodeBlock* cod
         OpcodeID opcodeID = instruction->opcodeID();
 
         ASSERT(opcodeID != op_enter);
-        computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction.ptr(), [&] (VirtualRegister operand) {
+        computeDefsForBytecodeIndex(codeBlock, opcodeID, instruction.ptr(), [&] (VirtualRegister operand) {
             if (local()->virtualRegister() == operand)
                 invalidate();
         });
index 2ae0b07..2ce72e8 100644 (file)
@@ -299,9 +299,9 @@ TextPosition DebuggerCallFrame::currentPosition(VM& vm)
 
     if (isTailDeleted()) {
         CodeBlock* codeBlock = m_shadowChickenFrame.codeBlock;
-        if (Optional<unsigned> bytecodeOffset = codeBlock->bytecodeOffsetFromCallSiteIndex(m_shadowChickenFrame.callSiteIndex)) {
-            return TextPosition(OrdinalNumber::fromOneBasedInt(codeBlock->lineNumberForBytecodeOffset(*bytecodeOffset)),
-                OrdinalNumber::fromOneBasedInt(codeBlock->columnNumberForBytecodeOffset(*bytecodeOffset)));
+        if (Optional<BytecodeIndex> bytecodeIndex = codeBlock->bytecodeIndexFromCallSiteIndex(m_shadowChickenFrame.callSiteIndex)) {
+            return TextPosition(OrdinalNumber::fromOneBasedInt(codeBlock->lineNumberForBytecodeIndex(*bytecodeIndex)),
+                OrdinalNumber::fromOneBasedInt(codeBlock->columnNumberForBytecodeIndex(*bytecodeIndex)));
         }
     }
 
index 1ddc62e..8cb0962 100644 (file)
@@ -32,8 +32,7 @@
 
 namespace JSC { namespace DFG {
 
-BasicBlock::BasicBlock(
-    unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals, float executionCount)
+BasicBlock::BasicBlock(BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals, float executionCount)
     : bytecodeBegin(bytecodeBegin)
     , index(NoBlock)
     , cfaStructureClobberStateAtHead(StructuresAreWatched)
index 4c58c9c..26a3067 100644 (file)
@@ -46,7 +46,7 @@ typedef Vector<Node*, 8> BlockNodeList;
 
 struct BasicBlock : RefCounted<BasicBlock> {
     BasicBlock(
-        unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals,
+        BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals,
         float executionCount);
     ~BasicBlock();
     
@@ -178,7 +178,7 @@ struct BasicBlock : RefCounted<BasicBlock> {
     
     // This value is used internally for block linking and OSR entry. It is mostly meaningless
     // for other purposes due to inlining.
-    unsigned bytecodeBegin;
+    BytecodeIndex bytecodeBegin;
     
     BlockIndex index;
 
@@ -259,14 +259,14 @@ private:
 
 typedef Vector<BasicBlock*> BlockList;
     
-static inline unsigned getBytecodeBeginForBlock(BasicBlock** basicBlock)
+static inline BytecodeIndex getBytecodeBeginForBlock(BasicBlock** basicBlock)
 {
     return (*basicBlock)->bytecodeBegin;
 }
 
-static inline BasicBlock* blockForBytecodeOffset(Vector<BasicBlock*>& linkingTargets, unsigned bytecodeBegin)
+static inline BasicBlock* blockForBytecodeIndex(Vector<BasicBlock*>& linkingTargets, BytecodeIndex bytecodeBegin)
 {
-    return *binarySearch<BasicBlock*, unsigned>(linkingTargets, linkingTargets.size(), bytecodeBegin, getBytecodeBeginForBlock);
+    return *binarySearch<BasicBlock*, BytecodeIndex>(linkingTargets, linkingTargets.size(), bytecodeBegin, getBytecodeBeginForBlock);
 }
 
 } } // namespace JSC::DFG
index fbc2db4..7508e21 100644 (file)
@@ -51,11 +51,7 @@ void BlockInsertionSet::insert(size_t index, Ref<BasicBlock>&& block)
 
 BasicBlock* BlockInsertionSet::insert(size_t index, float executionCount)
 {
-    Ref<BasicBlock> block = adoptRef(*new BasicBlock(
-        UINT_MAX,
-        m_graph.block(0)->variablesAtHead.numberOfArguments(),
-        m_graph.block(0)->variablesAtHead.numberOfLocals(),
-        executionCount));
+    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_graph.block(0)->variablesAtHead.numberOfArguments(), m_graph.block(0)->variablesAtHead.numberOfLocals(), executionCount));
     block->isReachable = true;
     auto* result = block.ptr();
     insert(index, WTFMove(block));
index 661013a..223445c 100644 (file)
@@ -148,10 +148,10 @@ private:
     // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
     // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
     // than to move the right index all the way to the treatment of op_ret.
-    BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
+    BasicBlock* allocateTargetableBlock(BytecodeIndex);
     BasicBlock* allocateUntargetableBlock();
     // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
-    void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
+    void makeBlockTargetable(BasicBlock*, BytecodeIndex);
     void addJumpTo(BasicBlock*);
     void addJumpTo(unsigned bytecodeIndex);
     // Handle calls. This resolves issues surrounding inlining and intrinsics.
@@ -174,8 +174,8 @@ private:
     bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
     unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
     enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
-    CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
-    CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
+    CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
+    CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
     template<typename ChecksFunctor>
     void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
@@ -567,7 +567,7 @@ private:
     {
         origin.walkUpInlineStack(
             [&] (CodeOrigin origin) {
-                unsigned bytecodeIndex = origin.bytecodeIndex();
+                BytecodeIndex bytecodeIndex = origin.bytecodeIndex();
                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
                 flushImpl(inlineCallFrame, addFlushDirect);
 
@@ -642,11 +642,11 @@ private:
     
     void flushIfTerminal(SwitchData& data)
     {
-        if (data.fallThrough.bytecodeIndex() > m_currentIndex)
+        if (data.fallThrough.bytecodeIndex() > m_currentIndex.offset())
             return;
         
         for (unsigned i = data.cases.size(); i--;) {
-            if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
+            if (data.cases[i].target.bytecodeIndex() > m_currentIndex.offset())
                 return;
         }
         
@@ -709,7 +709,7 @@ private:
     {
         // We assume that branches originating from bytecode always have a fall-through. We
         // use this assumption to avoid checking for the creation of terminal blocks.
-        ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
+        ASSERT((taken > m_currentIndex.offset()) || (notTaken > m_currentIndex.offset()));
         BranchData* data = m_graph.m_branchData.add();
         *data = BranchData::withBytecodeIndices(taken, notTaken);
         return data;
@@ -834,14 +834,14 @@ private:
         return objectNode;
     }
     
-    SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
+    SpeculatedType getPredictionWithoutOSRExit(BytecodeIndex bytecodeIndex)
     {
         auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
         {
             SpeculatedType prediction;
             {
                 ConcurrentJSLocker locker(codeBlock->m_lock);
-                prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
+                prediction = codeBlock->valueProfilePredictionForBytecodeIndex(locker, codeOrigin.bytecodeIndex());
             }
             auto* fuzzerAgent = m_vm->fuzzerAgent();
             if (UNLIKELY(fuzzerAgent))
@@ -860,7 +860,7 @@ private:
         // chain and use its prediction. If we only have
         // inlined tail call frames, we use SpecFullTop
         // to avoid a spurious OSR exit.
-        auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
+        auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex.offset());
         OpcodeID opcodeID = instruction->opcodeID();
 
         switch (opcodeID) {
@@ -894,7 +894,7 @@ private:
         return SpecNone;
     }
 
-    SpeculatedType getPrediction(unsigned bytecodeIndex)
+    SpeculatedType getPrediction(BytecodeIndex bytecodeIndex)
     {
         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
 
@@ -920,7 +920,7 @@ private:
     ArrayMode getArrayMode(Array::Action action)
     {
         CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
-        ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
+        ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeIndex(m_currentInstruction));
         return getArrayMode(*profile, action);
     }
 
@@ -943,7 +943,7 @@ private:
             return node;
 
         {
-            ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
+            ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex);
             if (arithProfile) {
                 switch (node->op()) {
                 case ArithAdd:
@@ -956,7 +956,7 @@ private:
                     if (arithProfile->didObserveBigInt())
                         node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
-                
+
                 case ValueMul:
                 case ArithMul: {
                     if (arithProfile->didObserveInt52Overflow())
@@ -987,7 +987,7 @@ private:
                         node->mergeFlags(NodeMayHaveBigIntResult);
                     break;
                 }
-                
+
                 default:
                     break;
                 }
@@ -1028,13 +1028,13 @@ private:
         // care about when the outcome of the division is not an integer, which
         // is what the special fast case counter tells us.
         
-        if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
+        if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialArithFastCase(m_currentIndex))
             return node;
         
         // FIXME: It might be possible to make this more granular.
         node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
         
-        ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
+        ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex);
         if (arithProfile->didObserveBigInt())
             node->mergeFlags(NodeMayHaveBigIntResult);
 
@@ -1061,7 +1061,7 @@ private:
     // The current block being generated.
     BasicBlock* m_currentBlock;
     // The bytecode index of the current instruction being generated.
-    unsigned m_currentIndex;
+    BytecodeIndex m_currentIndex;
     // The semantic origin of the current node if different from the current Index.
     CodeOrigin m_currentSemanticOrigin;
     // True if it's OK to OSR exit right now.
@@ -1191,14 +1191,14 @@ private:
     bool m_hasAnyForceOSRExits { false };
 };
 
-BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
+BasicBlock* ByteCodeParser::allocateTargetableBlock(BytecodeIndex bytecodeIndex)
 {
-    ASSERT(bytecodeIndex != UINT_MAX);
+    ASSERT(bytecodeIndex);
     Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
     BasicBlock* blockPtr = block.ptr();
     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
     if (m_inlineStackTop->m_blockLinkingTargets.size())
-        ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
+        ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset());
     m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
     m_graph.appendBlock(WTFMove(block));
     return blockPtr;
@@ -1206,19 +1206,19 @@ BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
 
 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
 {
-    Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
+    Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_numArguments, m_numLocals, 1));
     BasicBlock* blockPtr = block.ptr();
     m_graph.appendBlock(WTFMove(block));
     return blockPtr;
 }
 
-void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
+void ByteCodeParser::makeBlockTargetable(BasicBlock* block, BytecodeIndex bytecodeIndex)
 {
-    RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
+    RELEASE_ASSERT(!block->bytecodeBegin);
     block->bytecodeBegin = bytecodeIndex;
     // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
     if (m_inlineStackTop->m_blockLinkingTargets.size())
-        ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
+        ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset());
     m_inlineStackTop->m_blockLinkingTargets.append(block);
 }
 
@@ -1278,7 +1278,7 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(
 
         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
-            argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
+            argumentCountIncludingThis, BytecodeIndex(m_currentIndex.offset() + instructionSize), op, kind, prediction);
         if (optimizationResult == CallOptimizationResult::OptimizedToJump)
             return Terminal;
         if (optimizationResult == CallOptimizationResult::Inlined) {
@@ -1457,17 +1457,17 @@ bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant c
             setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
 
         // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
-        unsigned oldIndex = m_currentIndex;
+        BytecodeIndex oldIndex = m_currentIndex;
         auto oldStackTop = m_inlineStackTop;
         m_inlineStackTop = stackEntry;
-        m_currentIndex = opcodeLengths[op_enter];
+        m_currentIndex = BytecodeIndex(opcodeLengths[op_enter]);
         m_exitOK = true;
         processSetLocalQueue();
         m_currentIndex = oldIndex;
         m_inlineStackTop = oldStackTop;
         m_exitOK = false;
 
-        BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
+        BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, BytecodeIndex>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), BytecodeIndex(opcodeLengths[op_enter]), getBytecodeBeginForBlock);
         RELEASE_ASSERT(entryBlockPtr);
         addJumpTo(*entryBlockPtr);
         return true;
@@ -1625,8 +1625,8 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
 
     // This is where the actual inlining really happens.
-    unsigned oldIndex = m_currentIndex;
-    m_currentIndex = 0;
+    BytecodeIndex oldIndex = m_currentIndex;
+    m_currentIndex = BytecodeIndex(0);
 
     switch (kind) {
     case InlineCallFrame::GetterCall:
@@ -1761,7 +1761,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, Ca
     m_currentInstruction = savedCurrentInstruction;
 }
 
-ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
+ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
 {
     VERBOSE_LOG("    Considering callee ", callee, "\n");
 
@@ -1789,7 +1789,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c
         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
         inliningBalance--;
         if (continuationBlock) {
-            m_currentIndex = nextOffset;
+            m_currentIndex = nextIndex;
             m_exitOK = true;
             processSetLocalQueue();
             addJumpTo(continuationBlock);
@@ -1972,7 +1972,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
     int registerOffset, VirtualRegister thisArgument,
     int argumentCountIncludingThis,
-    unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
+    BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
 {
     VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
     
@@ -1985,7 +1985,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
         return handleCallVariant(
             callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
-            argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
+            argumentCountIncludingThis, nextIndex, kind, prediction, inliningBalance, nullptr, true);
     }
 
     // We need to create some kind of switch over callee. For now we only do this if we believe that
@@ -2061,9 +2061,9 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     
     VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
 
-    unsigned oldOffset = m_currentIndex;
+    BytecodeIndex oldIndex = m_currentIndex;
     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
-        m_currentIndex = oldOffset;
+        m_currentIndex = oldIndex;
         BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
         m_currentBlock = calleeEntryBlock;
         prepareToParseBlock();
@@ -2075,7 +2075,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
         
         auto inliningResult = handleCallVariant(
             myCallTargetNode, result, callLinkStatus[i], registerOffset,
-            thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
+            thisArgument, argumentCountIncludingThis, nextIndex, kind, prediction,
             inliningBalance, continuationBlock, false);
         
         if (inliningResult == CallOptimizationResult::DidNothing) {
@@ -2104,7 +2104,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
 
     // Slow path block
     m_currentBlock = allocateUntargetableBlock();
-    m_currentIndex = oldOffset;
+    m_currentIndex = oldIndex;
     m_exitOK = true;
     data.fallThrough = BranchTarget(m_currentBlock);
     prepareToParseBlock();
@@ -2124,7 +2124,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
         VERBOSE_LOG("couldTakeSlowPath was false\n");
     }
 
-    m_currentIndex = nextOffset;
+    m_currentIndex = nextIndex;
     m_exitOK = true; // Origin changed, so it's fine to exit again.
     processSetLocalQueue();
 
@@ -2136,7 +2136,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
 
     prepareToParseBlock();
     
-    m_currentIndex = oldOffset;
+    m_currentIndex = oldIndex;
     m_currentBlock = continuationBlock;
     m_exitOK = true;
     
@@ -4693,14 +4693,14 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI
 // Doesn't allow using `continue`.
 #define NEXT_OPCODE(name) \
     if (true) { \
-        m_currentIndex += currentInstruction->size(); \
+        m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \
         goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
     } else \
         WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
     continue
 
 #define LAST_OPCODE_LINKED(name) do { \
-        m_currentIndex += currentInstruction->size(); \
+        m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \
         m_exitOK = false; \
         return; \
     } while (false)
@@ -4724,7 +4724,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI
 void ByteCodeParser::parseBlock(unsigned limit)
 {
     auto& instructions = m_inlineStackTop->m_codeBlock->instructions();
-    unsigned blockBegin = m_currentIndex;
+    BytecodeIndex blockBegin = m_currentIndex;
 
     // If we are the first basic block, introduce markers for arguments. This allows
     // us to track if a use of an argument may use the actual argument passed, as
@@ -4768,7 +4768,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
         processSetLocalQueue();
         
         // Don't extend over jump destinations.
-        if (m_currentIndex == limit) {
+        if (m_currentIndex.offset() == limit) {
             // Ordinarily we want to plant a jump. But refuse to do this if the block is
             // empty. This is a special case for inlining, which might otherwise create
             // some empty blocks in some cases. When parseBlock() returns with an empty
@@ -4778,7 +4778,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             // to be true.
 
             if (!m_currentBlock->isEmpty())
-                addJumpTo(m_currentIndex);
+                addJumpTo(m_currentIndex.offset());
             return;
         }
         
@@ -5803,7 +5803,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             ASSERT(!m_currentBlock->terminal());
             auto bytecode = currentInstruction->as<OpJmp>();
             int relativeOffset = jumpTarget(bytecode.m_targetLabel);
-            addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+            addToGraph(Jump, OpInfo(m_currentIndex.offset() + relativeOffset));
             if (relativeOffset <= 0)
                 flushForTerminal();
             LAST_OPCODE(op_jmp);
@@ -5813,7 +5813,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpJtrue>();
             unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
             Node* condition = get(bytecode.m_condition);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jtrue);
         }
 
@@ -5821,7 +5821,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             auto bytecode = currentInstruction->as<OpJfalse>();
             unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
             Node* condition = get(bytecode.m_condition);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jfalse);
         }
 
@@ -5831,7 +5831,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jeq_null);
         }
 
@@ -5841,7 +5841,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* value = get(bytecode.m_value);
             Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
             Node* condition = addToGraph(CompareEq, value, nullConstant);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jneq_null);
         }
 
@@ -5850,7 +5850,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
             Node* value = get(bytecode.m_value);
             Node* condition = addToGraph(IsUndefinedOrNull, value);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jundefined_or_null);
         }
 
@@ -5859,7 +5859,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
             Node* value = get(bytecode.m_value);
             Node* condition = addToGraph(IsUndefinedOrNull, value);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jnundefined_or_null);
         }
 
@@ -5869,7 +5869,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jless);
         }
 
@@ -5879,7 +5879,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jlesseq);
         }
 
@@ -5889,7 +5889,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreater);
         }
 
@@ -5899,7 +5899,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jgreatereq);
         }
 
@@ -5909,7 +5909,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jeq);
         }
 
@@ -5919,7 +5919,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jstricteq);
         }
 
@@ -5929,7 +5929,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLess, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jnless);
         }
 
@@ -5939,7 +5939,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareLessEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jnlesseq);
         }
 
@@ -5949,7 +5949,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreater, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jngreater);
         }
 
@@ -5959,7 +5959,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jngreatereq);
         }
 
@@ -5969,7 +5969,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jneq);
         }
 
@@ -5979,7 +5979,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareStrictEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
             LAST_OPCODE(op_jnstricteq);
         }
 
@@ -5989,7 +5989,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelow, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbelow);
         }
 
@@ -5999,7 +5999,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* op1 = get(bytecode.m_lhs);
             Node* op2 = get(bytecode.m_rhs);
             Node* condition = addToGraph(CompareBelowEq, op1, op2);
-            addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
+            addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition);
             LAST_OPCODE(op_jbeloweq);
         }
 
@@ -6008,12 +6008,12 @@ void ByteCodeParser::parseBlock(unsigned limit)
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchImm;
             data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
+            data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
                     continue;
-                unsigned target = m_currentIndex + table.branchOffsets[i];
+                unsigned target = m_currentIndex.offset() + table.branchOffsets[i];
                 if (target == data.fallThrough.bytecodeIndex())
                     continue;
                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
@@ -6028,12 +6028,12 @@ void ByteCodeParser::parseBlock(unsigned limit)
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchChar;
             data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
+            data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
                 if (!table.branchOffsets[i])
                     continue;
-                unsigned target = m_currentIndex + table.branchOffsets[i];
+                unsigned target = m_currentIndex.offset() + table.branchOffsets[i];
                 if (target == data.fallThrough.bytecodeIndex())
                     continue;
                 data.cases.append(
@@ -6049,12 +6049,12 @@ void ByteCodeParser::parseBlock(unsigned limit)
             SwitchData& data = *m_graph.m_switchData.add();
             data.kind = SwitchString;
             data.switchTableIndex = bytecode.m_tableIndex;
-            data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
+            data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset));
             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
             StringJumpTable::StringOffsetTable::iterator iter;
             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
-                unsigned target = m_currentIndex + iter->value.branchOffset;
+                unsigned target = m_currentIndex.offset() + iter->value.branchOffset;
                 if (target == data.fallThrough.bytecodeIndex())
                     continue;
                 data.cases.append(
@@ -6079,7 +6079,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             if (m_inlineStackTop->m_returnValue.isValid())
                 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
 
-            if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
+            if (!m_inlineStackTop->m_continuationBlock && m_currentIndex.offset() + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
                 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
                 // It is untargetable, because we do not know the appropriate index.
                 // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets
@@ -6207,7 +6207,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 ArgumentsVector& entrypointArguments = addResult.iterator->value;
                 entrypointArguments.resize(m_numArguments);
 
-                unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size();
+                BytecodeIndex exitBytecodeIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size());
 
                 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) {
                     VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument));
@@ -6316,7 +6316,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
             Node* child = get(bytecode.m_value);
             if (bytecode.metadata(codeBlock).m_hasJumped) {
                 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
-                addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
+                addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition);
                 LAST_OPCODE(op_jneq_ptr);
             }
             addToGraph(CheckCell, OpInfo(frozenPointer), child);
@@ -7135,21 +7135,21 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
     
     switch (node->op()) {
     case Jump:
-        node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
+        node->targetBlock() = blockForBytecodeIndex(possibleTargets, BytecodeIndex(node->targetBytecodeOffsetDuringParsing()));
         break;
         
     case Branch: {
         BranchData* data = node->branchData();
-        data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
-        data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
+        data->taken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->takenBytecodeIndex()));
+        data->notTaken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->notTakenBytecodeIndex()));
         break;
     }
         
     case Switch: {
         SwitchData* data = node->switchData();
         for (unsigned i = node->switchData()->cases.size(); i--;)
-            data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
-        data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
+            data->cases[i].target.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->cases[i].target.bytecodeIndex()));
+        data->fallThrough.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->fallThrough.bytecodeIndex()));
         break;
     }
         
@@ -7323,7 +7323,7 @@ void ByteCodeParser::parseCodeBlock()
     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
         unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
-        ASSERT(m_currentIndex < limit);
+        ASSERT(m_currentIndex.offset() < limit);
 
         // Loop until we reach the current limit (i.e. next jump target).
         do {
@@ -7346,23 +7346,23 @@ void ByteCodeParser::parseCodeBlock()
             parseBlock(limit);
 
             // We should not have gone beyond the limit.
-            ASSERT(m_currentIndex <= limit);
+            ASSERT(m_currentIndex.offset() <= limit);
 
             if (m_currentBlock->isEmpty()) {
                 // This case only happens if the last instruction was an inlined call with early returns
                 // or polymorphic (creating an empty continuation block),
                 // and then we hit the limit before putting anything in the continuation block.
-                ASSERT(m_currentIndex == limit);
+                ASSERT(m_currentIndex.offset() == limit);
                 makeBlockTargetable(m_currentBlock, m_currentIndex);
             } else {
-                ASSERT(m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()));
+                ASSERT(m_currentBlock->terminal() || (m_currentIndex.offset() == codeBlock->instructions().size() && inlineCallFrame()));
                 m_currentBlock = nullptr;
             }
-        } while (m_currentIndex < limit);
+        } while (m_currentIndex.offset() < limit);
     }
 
     // Should have reached the end of the instructions.
-    ASSERT(m_currentIndex == codeBlock->instructions().size());
+    ASSERT(m_currentIndex.offset() == codeBlock->instructions().size());
     
     VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n");
 }
@@ -7525,7 +7525,7 @@ void ByteCodeParser::handleCreateInternalFieldObject(const ClassInfo* classInfo,
 void ByteCodeParser::parse()
 {
     // Set during construction.
-    ASSERT(!m_currentIndex);
+    ASSERT(!m_currentIndex.offset());
     
     VERBOSE_LOG("Parsing ", *m_codeBlock, "\n");
     
index 7c80750..7987b87 100644 (file)
@@ -56,7 +56,7 @@ CallSiteIndex CommonData::addCodeOrigin(CodeOrigin codeOrigin)
         codeOrigins.append(codeOrigin);
     unsigned index = codeOrigins.size() - 1;
     ASSERT(codeOrigins[index] == codeOrigin);
-    return CallSiteIndex(index);
+    return CallSiteIndex(BytecodeIndex(index));
 }
 
 CallSiteIndex CommonData::addUniqueCallSiteIndex(CodeOrigin codeOrigin)
@@ -64,13 +64,13 @@ CallSiteIndex CommonData::addUniqueCallSiteIndex(CodeOrigin codeOrigin)
     codeOrigins.append(codeOrigin);
     unsigned index = codeOrigins.size() - 1;
     ASSERT(codeOrigins[index] == codeOrigin);
-    return CallSiteIndex(index);
+    return CallSiteIndex(BytecodeIndex(index));
 }
 
 CallSiteIndex CommonData::lastCallSite() const
 {
     RELEASE_ASSERT(codeOrigins.size());
-    return CallSiteIndex(codeOrigins.size() - 1);
+    return CallSiteIndex(BytecodeIndex(codeOrigins.size() - 1));
 }
 
 DisposableCallSiteIndex CommonData::addDisposableCallSiteIndex(CodeOrigin codeOrigin)
index 4b2167f..2ed830f 100644 (file)
@@ -94,14 +94,14 @@ public:
     void installVMTrapBreakpoints(CodeBlock* owner);
     bool isVMTrapBreakpoint(void* address);
 
-    CatchEntrypointData* catchOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+    CatchEntrypointData* catchOSREntryDataForBytecodeIndex(BytecodeIndex bytecodeIndex)
     {
-        return tryBinarySearch<CatchEntrypointData, unsigned>(
+        return tryBinarySearch<CatchEntrypointData, BytecodeIndex>(
             catchEntrypoints, catchEntrypoints.size(), bytecodeIndex,
             [] (const CatchEntrypointData* item) { return item->bytecodeIndex; });
     }
 
-    void appendCatchEntrypoint(unsigned bytecodeIndex, MacroAssemblerCodePtr<ExceptionHandlerPtrTag> machineCode, Vector<FlushFormat>&& argumentFormats)
+    void appendCatchEntrypoint(BytecodeIndex bytecodeIndex, MacroAssemblerCodePtr<ExceptionHandlerPtrTag> machineCode, Vector<FlushFormat>&& argumentFormats)
     {
         catchEntrypoints.append(CatchEntrypointData { machineCode,  WTFMove(argumentFormats), bytecodeIndex });
     }
index db69f1b..17b3707 100644 (file)
@@ -70,7 +70,7 @@ static FunctionWhitelist& ensureGlobalDFGWhitelist()
 
 static CompilationResult compileImpl(
     VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode,
-    unsigned osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
+    BytecodeIndex osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
     Ref<DeferredCompilationCallback>&& callback)
 {
     if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionsSize())
@@ -116,7 +116,7 @@ static CompilationResult compileImpl(
 }
 #else // ENABLE(DFG_JIT)
 static CompilationResult compileImpl(
-    VM&, CodeBlock*, CodeBlock*, CompilationMode, unsigned, const Operands<Optional<JSValue>>&,
+    VM&, CodeBlock*, CodeBlock*, CompilationMode, BytecodeIndex, const Operands<Optional<JSValue>>&,
     Ref<DeferredCompilationCallback>&&)
 {
     return CompilationFailed;
@@ -125,7 +125,7 @@ static CompilationResult compileImpl(
 
 CompilationResult compile(
     VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode,
-    unsigned osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
+    BytecodeIndex osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
     Ref<DeferredCompilationCallback>&& callback)
 {
     CompilationResult result = compileImpl(
index 01f74ee..c7ba7f6 100644 (file)
@@ -42,7 +42,7 @@ JS_EXPORT_PRIVATE unsigned getNumCompilations();
 // compile. Even if we do a synchronous compile, we call the callback with the result.
 CompilationResult compile(
     VM&, CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationMode,
-    unsigned osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
+    BytecodeIndex osrEntryBytecodeIndex, const Operands<Optional<JSValue>>& mustHandleValues,
     Ref<DeferredCompilationCallback>&&);
 
 } } // namespace JSC::DFG
index 9d5b7d9..adbf502 100644 (file)
@@ -404,7 +404,7 @@ void Graph::dump(PrintStream& out, const char* prefixStr, Node* node, DumpContex
     if (clobbersExitState(*this, node))
         out.print(comma, "ClobbersExit");
     if (node->origin.isSet()) {
-        out.print(comma, "bc#", node->origin.semantic.bytecodeIndex());
+        out.print(comma, node->origin.semantic.bytecodeIndex());
         if (node->origin.semantic != node->origin.forExit && node->origin.forExit.isSet())
             out.print(comma, "exit: ", node->origin.forExit);
     }
@@ -1649,10 +1649,10 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren
             }
 
             if (node->hasHeapPrediction())
-                return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex());
+                return &profiledBlock->valueProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex());
 
             if (profiledBlock->hasBaselineJITProfiling()) {
-                if (ArithProfile* result = profiledBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex()))
+                if (ArithProfile* result = profiledBlock->arithProfileForBytecodeIndex(node->origin.semantic.bytecodeIndex()))
                     return result;
             }
         }
@@ -1750,12 +1750,12 @@ bool Graph::willCatchExceptionInMachineFrame(CodeOrigin codeOrigin, CodeOrigin&
     if (!m_hasExceptionHandlers)
         return false;
 
-    unsigned bytecodeIndexToCheck = codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeIndexToCheck = codeOrigin.bytecodeIndex();
     while (1) {
         InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame();
         CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
-        if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) {
-            opCatchOriginOut = CodeOrigin(handler->target, inlineCallFrame);
+        if (HandlerInfo* handler = codeBlock->handlerForBytecodeIndex(bytecodeIndexToCheck)) {
+            opCatchOriginOut = CodeOrigin(BytecodeIndex(handler->target), inlineCallFrame);
             catchHandlerOut = handler;
             return true;
         }
index 4a708aa..6c64d47 100644 (file)
@@ -1094,7 +1094,7 @@ public:
 
     // This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
     // it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
-    HashMap<unsigned, unsigned> m_entrypointIndexToCatchBytecodeOffset;
+    HashMap<unsigned, BytecodeIndex> m_entrypointIndexToCatchBytecodeIndex;
 
     HashSet<String> m_localStrings;
     HashMap<const StringImpl*, String> m_copiedStrings;
index c5bcd4e..7e2f9ea 100644 (file)
@@ -214,7 +214,7 @@ void JITCode::clearOSREntryBlockAndResetThresholds(CodeBlock *dfgCodeBlock)
 { 
     ASSERT(m_osrEntryBlock);
 
-    unsigned osrEntryBytecode = m_osrEntryBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
+    BytecodeIndex osrEntryBytecode = m_osrEntryBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
     m_osrEntryBlock.clear();
     osrEntryRetry = 0;
     tierUpEntryTriggers.set(osrEntryBytecode, JITCode::TriggerReason::DontTrigger);
index bb8f50a..a00002b 100644 (file)
@@ -54,7 +54,7 @@ public:
     CommonData* dfgCommon() override;
     JITCode* dfg() override;
     
-    OSREntryData* appendOSREntryData(unsigned bytecodeIndex, CodeLocationLabel<OSREntryPtrTag> machineCode)
+    OSREntryData* appendOSREntryData(BytecodeIndex bytecodeIndex, CodeLocationLabel<OSREntryPtrTag> machineCode)
     {
         DFG::OSREntryData entry;
         entry.m_bytecodeIndex = bytecodeIndex;
@@ -63,9 +63,9 @@ public:
         return &osrEntry.last();
     }
     
-    OSREntryData* osrEntryDataForBytecodeIndex(unsigned bytecodeIndex)
+    OSREntryData* osrEntryDataForBytecodeIndex(BytecodeIndex bytecodeIndex)
     {
-        return tryBinarySearch<OSREntryData, unsigned>(
+        return tryBinarySearch<OSREntryData, BytecodeIndex>(
             osrEntry, osrEntry.size(), bytecodeIndex,
             getOSREntryDataBytecodeIndex);
     }
@@ -151,10 +151,10 @@ public:
     //
     // The key may not always be a target for OSR Entry but the list in the value is guaranteed
     // to be usable for OSR Entry.
-    HashMap<unsigned, Vector<unsigned>> tierUpInLoopHierarchy;
+    HashMap<BytecodeIndex, Vector<BytecodeIndex>> tierUpInLoopHierarchy;
 
     // Map each bytecode of CheckTierUpAndOSREnter to its stream index.
-    HashMap<unsigned, unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> bytecodeIndexToStreamIndex;
+    HashMap<BytecodeIndex, unsigned> bytecodeIndexToStreamIndex;
 
     enum class TriggerReason : uint8_t {
         DontTrigger,
@@ -165,7 +165,7 @@ public:
     // Map each bytecode of CheckTierUpAndOSREnter to its trigger forcing OSR Entry.
     // This can never be modified after it has been initialized since the addresses of the triggers
     // are used by the JIT.
-    HashMap<unsigned, TriggerReason> tierUpEntryTriggers;
+    HashMap<BytecodeIndex, TriggerReason> tierUpEntryTriggers;
 
     WriteBarrier<CodeBlock> m_osrEntryBlock;
     unsigned osrEntryRetry;
index 01063ce..92740a6 100644 (file)
@@ -60,7 +60,7 @@ JITCompiler::JITCompiler(Graph& dfg)
         m_disassembler = makeUnique<Disassembler>(dfg);
 #if ENABLE(FTL_JIT)
     m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy());
-    for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes())
+    for (BytecodeIndex tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes())
         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
 #endif
 }
@@ -374,7 +374,7 @@ void JITCompiler::compile()
     // we need to call out to a helper function to throw the StackOverflowError.
     stackOverflow.link(this);
 
-    emitStoreCodeOrigin(CodeOrigin(0));
+    emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
 
     if (maxFrameExtentForSlowPathCall)
         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
@@ -450,7 +450,7 @@ void JITCompiler::compileFunction()
     // we need to call out to a helper function to throw the StackOverflowError.
     stackOverflow.link(this);
 
-    emitStoreCodeOrigin(CodeOrigin(0));
+    emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
 
     if (maxFrameExtentForSlowPathCall)
         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
@@ -471,14 +471,14 @@ void JITCompiler::compileFunction()
 
         load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
         branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
-        emitStoreCodeOrigin(CodeOrigin(0));
+        emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
         if (maxFrameExtentForSlowPathCall)
             addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
         m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0, m_codeBlock->globalObject());
         if (maxFrameExtentForSlowPathCall)
             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
         branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
-        emitStoreCodeOrigin(CodeOrigin(0));
+        emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
         move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
         callArityFixup = nearCall();
         jump(fromArityCheck);
index 08dfb8d..6a139d2 100644 (file)
@@ -98,7 +98,7 @@ public:
     // Methods to set labels for the disassembler.
     void setStartOfCode()
     {
-        m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), CodeOrigin(0, nullptr));
+        m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), CodeOrigin(BytecodeIndex(0)));
         if (LIKELY(!m_disassembler))
             return;
         m_disassembler->setStartOfCode(labelIgnoringWatchpoints());
index 9eb46ca..42f68ad 100644 (file)
@@ -125,17 +125,17 @@ public:
             if (origin == cachedCodeOrigin)
                 return cachedHandlerResult;
 
-            unsigned bytecodeIndexToCheck = origin.bytecodeIndex();
+            BytecodeIndex bytecodeIndexToCheck = origin.bytecodeIndex();
 
             cachedCodeOrigin = origin;
 
             while (1) {
                 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
                 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
-                if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) {
+                if (HandlerInfo* handler = codeBlock->handlerForBytecodeIndex(bytecodeIndexToCheck)) {
                     liveAtCatchHead.clearAll();
 
-                    unsigned catchBytecodeIndex = handler->target;
+                    BytecodeIndex catchBytecodeIndex = BytecodeIndex(handler->target);
                     m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) {
                         liveAtCatchHead[operand.toLocal()] = true;
                     });
index d939d30..462d682 100644 (file)
@@ -42,7 +42,7 @@ namespace JSC { namespace DFG {
 
 void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
 {
-    out.print("bc#", m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
+    out.print(m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
     out.print(", stack rules = [");
     
     auto printOperand = [&] (VirtualRegister reg) {
@@ -92,7 +92,7 @@ void OSREntryData::dump(PrintStream& out) const
 }
 
 SUPPRESS_ASAN
-void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, unsigned bytecodeIndex)
+void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, BytecodeIndex bytecodeIndex)
 {
     ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
     ASSERT(codeBlock->alternative());
@@ -106,7 +106,7 @@ void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, unsign
     if (Options::verboseOSR()) {
         dataLog(
             "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
-            " from bc#", bytecodeIndex, "\n");
+            " from ", bytecodeIndex, "\n");
     }
     
     sanitizeStackForVM(vm);
@@ -329,8 +329,8 @@ void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, unsign
     return scratch;
 }
 
-MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, unsigned bytecodeIndex)
-{ 
+MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, BytecodeIndex bytecodeIndex)
+{
     ASSERT(codeBlock->jitType() == JITType::DFGJIT || codeBlock->jitType() == JITType::FTLJIT);
     ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid);
 
@@ -380,7 +380,7 @@ MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallF
     if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
         return nullptr;
 
-    auto instruction = callFrame->codeBlock()->instructions().at(callFrame->bytecodeOffset());
+    auto instruction = callFrame->codeBlock()->instructions().at(callFrame->bytecodeIndex());
     ASSERT(instruction->is<OpCatch>());
     ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(callFrame).m_buffer;
     JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
index d81677a..cf96d71 100644 (file)
@@ -53,7 +53,7 @@ struct OSREntryReshuffling {
 };
 
 struct OSREntryData {
-    unsigned m_bytecodeIndex;
+    BytecodeIndex m_bytecodeIndex;
     CodeLocationLabel<OSREntryPtrTag> m_machineCode;
     Operands<AbstractValue> m_expectedValues;
     // Use bitvectors here because they tend to only require one word.
@@ -66,7 +66,7 @@ struct OSREntryData {
     void dump(PrintStream&) const;
 };
 
-inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
+inline BytecodeIndex getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
 {
     return osrEntryData->m_bytecodeIndex;
 }
@@ -76,17 +76,17 @@ struct CatchEntrypointData {
     // are of the expected type before entering at a catch block.
     MacroAssemblerCodePtr<ExceptionHandlerPtrTag> machineCode;
     Vector<FlushFormat> argumentFormats;
-    unsigned bytecodeIndex;
+    BytecodeIndex bytecodeIndex;
 };
 
 // Returns a pointer to a data buffer that the OSR entry thunk will recognize and
 // parse. If this returns null, it means 
-void* prepareOSREntry(VM&, CallFrame*, CodeBlock*, unsigned bytecodeIndex);
+void* prepareOSREntry(VM&, CallFrame*, CodeBlock*, BytecodeIndex);
 
 // If null is returned, we can't OSR enter. If it's not null, it's the PC to jump to.
-MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM&, CallFrame*, CodeBlock*, unsigned bytecodeIndex);
+MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM&, CallFrame*, CodeBlock*, BytecodeIndex);
 #else
-inline MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareOSREntry(VM&, CallFrame*, CodeBlock*, unsigned) { return nullptr; }
+inline MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareOSREntry(VM&, CallFrame*, CodeBlock*, BytecodeIndex) { return nullptr; }
 #endif
 
 } } // namespace JSC::DFG
index 9e6cc6f..dc76bcb 100644 (file)
@@ -50,10 +50,10 @@ public:
         RELEASE_ASSERT(m_graph.m_plan.mode() == FTLForOSREntryMode);
         RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
 
-        unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex();
+        BytecodeIndex bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex();
         RELEASE_ASSERT(bytecodeIndex);
-        RELEASE_ASSERT(bytecodeIndex != UINT_MAX);
-        
+        RELEASE_ASSERT(bytecodeIndex.offset());
+
         // Needed by createPreHeader().
         m_graph.ensureCPSDominators();
         
@@ -92,7 +92,7 @@ public:
         BasicBlock* newRoot = insertionSet.insert(0, 1);
 
         // We'd really like to use an unset origin, but ThreadedCPS won't allow that.
-        NodeOrigin origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), false);
+        NodeOrigin origin = NodeOrigin(CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), false);
         
         Vector<Node*> locals(baseline->numCalleeLocals());
         for (int local = 0; local < baseline->numCalleeLocals(); ++local) {
index ac6b9cd..d0e0a5f 100644 (file)
@@ -405,7 +405,7 @@ void OSRExit::executeOSRExit(Context& context)
         bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
         void* jumpTarget;
         if (exitToLLInt) {
-            unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
+            BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
             const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
             MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
             jumpTarget = destination.executableAddress();    
@@ -786,7 +786,7 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
             callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
         } else {
             CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
-            unsigned callBytecodeIndex = trueCaller->bytecodeIndex();
+            BytecodeIndex callBytecodeIndex = trueCaller->bytecodeIndex();
             void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
 
             if (trueCaller->inlineCallFrame())
@@ -881,13 +881,13 @@ static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock
 
     if (exitState->isJumpToLLInt) {
         CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
-        unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
-        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
+        BytecodeIndex bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
+        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
 
         context.gpr(LLInt::Registers::metadataTableGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->metadataTable());
 #if USE(JSVALUE64)
         context.gpr(LLInt::Registers::pbGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->instructionsRawPointer());
-        context.gpr(LLInt::Registers::pcGPR) = static_cast<uintptr_t>(exit.m_codeOrigin.bytecodeIndex());
+        context.gpr(LLInt::Registers::pcGPR) = static_cast<uintptr_t>(exit.m_codeOrigin.bytecodeIndex().offset());
 #else
         context.gpr(LLInt::Registers::pcGPR) = bitwise_cast<uintptr_t>(&currentInstruction);
 #endif
@@ -905,10 +905,10 @@ static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit&
     CodeBlock* codeBlock = callFrame->codeBlock();
     CodeBlock* alternative = codeBlock->alternative();
     ExitKind kind = exit.m_kind;
-    unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
+    BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
 
     dataLog("Speculation failure in ", *codeBlock);
-    dataLog(" @ exit #", osrExitIndex, " (bc#", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
+    dataLog(" @ exit #", osrExitIndex, " (", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
     if (alternative) {
         dataLog(
             "executeCounter = ", alternative->jitExecuteCounter(),
@@ -1113,7 +1113,7 @@ void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const
         SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
         debugInfo->codeBlock = jit.codeBlock();
         debugInfo->kind = exit.m_kind;
-        debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
+        debugInfo->bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
 
         jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
     }
@@ -1670,7 +1670,7 @@ void JIT_OPERATION OSRExit::debugOperationPrintSpeculationFailure(CallFrame* cal
     CodeBlock* codeBlock = debugInfo->codeBlock;
     CodeBlock* alternative = codeBlock->alternative();
     dataLog("Speculation failure in ", *codeBlock);
-    dataLog(" @ exit #", vm.osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with ");
+    dataLog(" @ exit #", vm.osrExitIndex, " (", debugInfo->bytecodeIndex, ", ", exitKindToString(debugInfo->kind), ") with ");
     if (alternative) {
         dataLog(
             "executeCounter = ", alternative->jitExecuteCounter(),
index 25704d2..d1fc5f1 100644 (file)
@@ -176,7 +176,7 @@ struct SpeculationFailureDebugInfo {
     WTF_MAKE_STRUCT_FAST_ALLOCATED;
     CodeBlock* codeBlock;
     ExitKind kind;
-    unsigned bytecodeOffset;
+    BytecodeIndex bytecodeIndex;
 };
 
 } } // namespace JSC::DFG
index 22de9e6..54877b9 100644 (file)
@@ -138,7 +138,7 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
     doneAdjusting.link(&jit);
 }
 
-void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, unsigned callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind, bool& callerIsLLInt)
+void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind, bool& callerIsLLInt)
 {
     callerIsLLInt = Options::forceOSRExitToLLInt() || baselineCodeBlockForCaller->jitType() == JITType::InterpreterThunk;
 
@@ -263,7 +263,7 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
             callerFrameGPR = GPRInfo::regT3;
         } else {
             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
-            unsigned callBytecodeIndex = trueCaller->bytecodeIndex();
+            auto callBytecodeIndex = trueCaller->bytecodeIndex();
             void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
 
             if (trueCaller->inlineCallFrame()) {
@@ -382,8 +382,8 @@ void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
     void* jumpTarget;
     bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
     if (exitToLLInt) {
-        unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
-        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
+        auto bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
+        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
         MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
 
         if (exit.isExceptionHandler()) {
@@ -394,7 +394,7 @@ void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
         jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), LLInt::Registers::metadataTableGPR);
 #if USE(JSVALUE64)
         jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->instructionsRawPointer()), LLInt::Registers::pbGPR);
-        jit.move(CCallHelpers::TrustedImm32(bytecodeOffset), LLInt::Registers::pcGPR);
+        jit.move(CCallHelpers::TrustedImm32(bytecodeIndex.offset()), LLInt::Registers::pcGPR);
 #else
         jit.move(CCallHelpers::TrustedImmPtr(&currentInstruction), LLInt::Registers::pcGPR);
 #endif
index 0caeb00..4fc996f 100644 (file)
@@ -39,7 +39,7 @@ namespace JSC { namespace DFG {
 void handleExitCounts(CCallHelpers&, const OSRExitBase&);
 void reifyInlinedCallFrames(CCallHelpers&, const OSRExitBase&);
 void adjustAndJumpToTarget(VM&, CCallHelpers&, const OSRExitBase&);
-void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, unsigned callBytecodeOffset, InlineCallFrame::Kind callerKind, bool& callerIsLLInt);
+void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind callerKind, bool& callerIsLLInt);
 CCallHelpers::Address calleeSaveSlot(InlineCallFrame*, CodeBlock* baselineCodeBlock, GPRReg calleeSave);
 
 template <typename JITCodeType>
index d622eb7..6aee554 100644 (file)
@@ -3583,7 +3583,7 @@ static void triggerFTLReplacementCompile(VM& vm, CodeBlock* codeBlock, JITCode*
     CODEBLOCK_LOG_EVENT(codeBlock, "triggerFTLReplacement", ());
     // We need to compile the code.
     compile(
-        vm, codeBlock->newReplacement(), codeBlock, FTLMode, UINT_MAX,
+        vm, codeBlock->newReplacement(), codeBlock, FTLMode, BytecodeIndex(),
         Operands<Optional<JSValue>>(), ToFTLDeferredCompilationCallback::create());
 
     // If we reached here, the counter has not be reset. Do that now.
@@ -3637,7 +3637,7 @@ void JIT_OPERATION triggerTierUpNow(VM* vmPointer)
     }
 }
 
-static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeIndex, bool canOSREnterHere)
+static char* tierUpCommon(VM& vm, CallFrame* callFrame, BytecodeIndex originBytecodeIndex, bool canOSREnterHere)
 {
     CodeBlock* codeBlock = callFrame->codeBlock();
 
@@ -3690,7 +3690,7 @@ static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeI
                 if (Options::verboseOSR())
                     dataLog("OSR entry: From ", RawPointer(jitCode), " got entry block ", RawPointer(entryBlock), "\n");
                 if (void* address = FTL::prepareOSREntry(vm, callFrame, codeBlock, entryBlock, originBytecodeIndex, streamIndex)) {
-                    CODEBLOCK_LOG_EVENT(entryBlock, "osrEntry", ("at bc#", originBytecodeIndex));
+                    CODEBLOCK_LOG_EVENT(entryBlock, "osrEntry", ("at ", originBytecodeIndex));
                     return retagCodePtr<char*>(address, JSEntryPtrTag, bitwise_cast<PtrTag>(callFrame));
                 }
             }
@@ -3775,7 +3775,7 @@ static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeI
             // executing to compile. We start with trying to compile outer loops since we believe outer loop
             // compilations reveal the best opportunities for optimizing code.
             for (auto iter = tierUpHierarchyEntry->value.rbegin(), end = tierUpHierarchyEntry->value.rend(); iter != end; ++iter) {
-                unsigned osrEntryCandidate = *iter;
+                BytecodeIndex osrEntryCandidate = *iter;
 
                 if (jitCode->tierUpEntryTriggers.get(osrEntryCandidate) == JITCode::TriggerReason::StartCompilation) {
                     // This means that we already asked this loop to compile. If we've reached here, it
@@ -3787,7 +3787,7 @@ static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeI
                 // This is where we ask the outer to loop to immediately compile itself if program
                 // control reaches it.
                 if (Options::verboseOSR())
-                    dataLog("Inner-loop bc#", originBytecodeIndex, " in ", *codeBlock, " setting parent loop bc#", osrEntryCandidate, "'s trigger and backing off.\n");
+                    dataLog("Inner-loop ", originBytecodeIndex, " in ", *codeBlock, " setting parent loop ", osrEntryCandidate, "'s trigger and backing off.\n");
                 jitCode->tierUpEntryTriggers.set(osrEntryCandidate, JITCode::TriggerReason::StartCompilation);
                 return true;
             }
@@ -3837,7 +3837,7 @@ static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeI
         return nullptr;
     }
     
-    CODEBLOCK_LOG_EVENT(jitCode->osrEntryBlock(), "osrEntry", ("at bc#", originBytecodeIndex));
+    CODEBLOCK_LOG_EVENT(jitCode->osrEntryBlock(), "osrEntry", ("at ", originBytecodeIndex));
     // It's possible that the for-entry compile already succeeded. In that case OSR
     // entry will succeed unless we ran out of stack. It's not clear what we should do.
     // We signal to try again after a while if that happens.
@@ -3850,13 +3850,14 @@ static char* tierUpCommon(VM& vm, CallFrame* callFrame, unsigned originBytecodeI
     return retagCodePtr<char*>(address, JSEntryPtrTag, bitwise_cast<PtrTag>(callFrame));
 }
 
-void JIT_OPERATION triggerTierUpNowInLoop(VM* vmPointer, unsigned bytecodeIndex)
+void JIT_OPERATION triggerTierUpNowInLoop(VM* vmPointer, unsigned bytecodeIndexBits)
 {
     VM& vm = *vmPointer;
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     NativeCallFrameTracer tracer(vm, callFrame);
     DeferGCForAWhile deferGC(vm.heap);
     CodeBlock* codeBlock = callFrame->codeBlock();
+    BytecodeIndex bytecodeIndex = BytecodeIndex::fromBits(bytecodeIndexBits);
 
     sanitizeStackForVM(vm);
 
@@ -3885,13 +3886,14 @@ void JIT_OPERATION triggerTierUpNowInLoop(VM* vmPointer, unsigned bytecodeIndex)
     }
 }
 
-char* JIT_OPERATION triggerOSREntryNow(VM* vmPointer, unsigned bytecodeIndex)
+char* JIT_OPERATION triggerOSREntryNow(VM* vmPointer, unsigned bytecodeIndexBits)
 {
     VM& vm = *vmPointer;
     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
     NativeCallFrameTracer tracer(vm, callFrame);
     DeferGCForAWhile deferGC(vm.heap);
     CodeBlock* codeBlock = callFrame->codeBlock();
+    BytecodeIndex bytecodeIndex = BytecodeIndex::fromBits(bytecodeIndexBits);
 
     sanitizeStackForVM(vm);
 
index 45796ff..94f4c82 100644 (file)
@@ -304,8 +304,8 @@ double JIT_OPERATION operationRandom(JSGlobalObject*);
 
 #if ENABLE(FTL_JIT)
 void JIT_OPERATION triggerTierUpNow(VM*) WTF_INTERNAL;
-void JIT_OPERATION triggerTierUpNowInLoop(VM*, unsigned bytecodeIndex) WTF_INTERNAL;
-char* JIT_OPERATION triggerOSREntryNow(VM*, unsigned bytecodeIndex) WTF_INTERNAL;
+void JIT_OPERATION triggerTierUpNowInLoop(VM*, unsigned bytecodeIndexBits) WTF_INTERNAL;
+char* JIT_OPERATION triggerOSREntryNow(VM*, unsigned bytecodeIndexBits) WTF_INTERNAL;
 #endif // ENABLE(FTL_JIT)
 
 } // extern "C"
index 3000c31..d41a3d4 100644 (file)
@@ -134,7 +134,7 @@ Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
 } // anonymous namespace
 
 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
-    CompilationMode mode, unsigned osrEntryBytecodeIndex,
+    CompilationMode mode, BytecodeIndex osrEntryBytecodeIndex,
     const Operands<Optional<JSValue>>& mustHandleValues)
     : m_mode(mode)
     , m_vm(&passedCodeBlock->vm())
@@ -240,9 +240,9 @@ Plan::CompilationPath Plan::compileInThreadImpl()
 {
     cleanMustHandleValuesIfNecessary();
 
-    if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
+    if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex) {
         dataLog("\n");
-        dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
+        dataLog("Compiler must handle OSR entry from ", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
         dataLog("\n");
     }
 
@@ -725,7 +725,7 @@ void Plan::cleanMustHandleValuesIfNecessary()
         return;
 
     CodeBlock* alternative = m_codeBlock->alternative();
-    FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
+    FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeIndex(alternative, m_osrEntryBytecodeIndex);
 
     for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
         if (!liveness[local])
index ceda040..a6acb25 100644 (file)
@@ -56,7 +56,7 @@ class Plan : public ThreadSafeRefCounted<Plan> {
 public:
     Plan(
         CodeBlock* codeBlockToCompile, CodeBlock* profiledDFGCodeBlock,
-        CompilationMode, unsigned osrEntryBytecodeIndex,
+        CompilationMode, BytecodeIndex osrEntryBytecodeIndex,
         const Operands<Optional<JSValue>>& mustHandleValues);
     ~Plan();
 
@@ -87,7 +87,7 @@ public:
 
     bool isFTL() const { return DFG::isFTL(m_mode); }
     CompilationMode mode() const { return m_mode; }
-    unsigned osrEntryBytecodeIndex() const { return m_osrEntryBytecodeIndex; }
+    BytecodeIndex osrEntryBytecodeIndex() const { return m_osrEntryBytecodeIndex; }
     const Operands<Optional<JSValue>>& mustHandleValues() const { return m_mustHandleValues; }
     ThreadData* threadData() const { return m_threadData; }
     Profiler::Compilation* compilation() const { return m_compilation.get(); }
@@ -106,8 +106,8 @@ public:
     bool willTryToTierUp() const { return m_willTryToTierUp; }
     void setWillTryToTierUp(bool willTryToTierUp) { m_willTryToTierUp = willTryToTierUp; }
 
-    HashMap<unsigned, Vector<unsigned>>& tierUpInLoopHierarchy() { return m_tierUpInLoopHierarchy; }
-    Vector<unsigned>& tierUpAndOSREnterBytecodes() { return m_tierUpAndOSREnterBytecodes; }
+    HashMap<BytecodeIndex, Vector<BytecodeIndex>>& tierUpInLoopHierarchy() { return m_tierUpInLoopHierarchy; }
+    Vector<BytecodeIndex>& tierUpAndOSREnterBytecodes() { return m_tierUpAndOSREnterBytecodes; }
 
     enum Stage { Preparing, Compiling, Ready, Cancelled };
     Stage stage() const { return m_stage; }
@@ -143,7 +143,7 @@ private:
 
     bool m_willTryToTierUp { false };
 
-    const unsigned m_osrEntryBytecodeIndex;
+    const BytecodeIndex m_osrEntryBytecodeIndex;
 
     ThreadData* m_threadData;
 
@@ -159,8 +159,8 @@ private:
     DesiredGlobalProperties m_globalProperties;
     RecordedStatuses m_recordedStatuses;
 
-    HashMap<unsigned, Vector<unsigned>> m_tierUpInLoopHierarchy;
-    Vector<unsigned> m_tierUpAndOSREnterBytecodes;
+    HashMap<BytecodeIndex, Vector<BytecodeIndex>> m_tierUpInLoopHierarchy;
+    Vector<BytecodeIndex> m_tierUpAndOSREnterBytecodes;
 
     Stage m_stage;
 
index fb9e470..436e5d3 100644 (file)
@@ -97,14 +97,14 @@ public:
 
                 if (oldRoot->isCatchEntrypoint) {
                     ASSERT(!!entrypointIndex);
-                    m_graph.m_entrypointIndexToCatchBytecodeOffset.add(entrypointIndex, oldRoot->bytecodeBegin);
+                    m_graph.m_entrypointIndexToCatchBytecodeIndex.add(entrypointIndex, oldRoot->bytecodeBegin);
                 }
             }
 
             RELEASE_ASSERT(entrySwitchData->cases[0] == m_graph.block(0)); // We strongly assume the normal call entrypoint is the first item in the list.
 
             const bool exitOK = false;
-            NodeOrigin origin { CodeOrigin(0), CodeOrigin(0), exitOK };
+            NodeOrigin origin { CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), exitOK };
             newRoot->appendNode(
                 m_graph, SpecNone, EntrySwitch, origin, OpInfo(entrySwitchData));
 
index 3daec43..4741614 100644 (file)
@@ -1862,13 +1862,8 @@ void SpeculativeJIT::compileCurrentBlock()
         
         ASSERT(m_currentNode->shouldGenerate());
         
-        if (verboseCompilationEnabled()) {
-            dataLogF(
-                "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
-                (int)m_currentNode->index(),
-                m_currentNode->origin.semantic.bytecodeIndex(), m_jit.debugOffset());
-            dataLog("\n");
-        }
+        if (verboseCompilationEnabled())
+            dataLogLn("SpeculativeJIT generating Node @", (int)m_currentNode->index(), " (", m_currentNode->origin.semantic.bytecodeIndex().offset(), ") at JIT offset 0x", m_jit.debugOffset());
 
         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
             m_jit.jitReleaseAssertNoException(m_jit.vm());
@@ -1905,7 +1900,7 @@ void SpeculativeJIT::compileCurrentBlock()
 void SpeculativeJIT::checkArgumentTypes()
 {
     ASSERT(!m_currentNode);
-    m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
+    m_origin = NodeOrigin(CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), true);
 
     auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
@@ -3998,8 +3993,8 @@ void SpeculativeJIT::compileValueAdd(Node* node)
 #endif
 
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
-    unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+    BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
     auto repatchingFunction = operationValueAddOptimize;
     auto nonRepatchingFunction = operationValueAdd;
@@ -4022,8 +4017,8 @@ void SpeculativeJIT::compileValueSub(Node* node)
 #endif
 
         CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
-        unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+        BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile);
         auto repatchingFunction = operationValueSubOptimize;
         auto nonRepatchingFunction = operationValueSub;
@@ -4616,8 +4611,8 @@ void SpeculativeJIT::compileArithSub(Node* node)
 void SpeculativeJIT::compileValueNegate(Node* node)
 {
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
-    unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+    BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile);
     auto repatchingFunction = operationArithNegateOptimize;
     auto nonRepatchingFunction = operationArithNegate;
@@ -4839,8 +4834,8 @@ void SpeculativeJIT::compileValueMul(Node* node)
 #endif
 
     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
-    unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
-    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+    BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
+    ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
     JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile);
     auto repatchingFunction = operationValueMulOptimize;
     auto nonRepatchingFunction = operationValueMul;
@@ -10731,7 +10726,7 @@ void SpeculativeJIT::emitSwitchCharStringJump(Node* node, SwitchData* data, GPRR
             MacroAssembler::Address(scratch, StringImpl::lengthMemoryOffset()),
             TrustedImm32(1)),
         data->fallThrough.block);
-
+    
     m_jit.loadPtr(MacroAssembler::Address(scratch, StringImpl::dataOffset()), value);
     
     JITCompiler::Jump is8Bit = m_jit.branchTest32(
index 064de71..f7579fc 100644 (file)
@@ -5079,13 +5079,13 @@ void SpeculativeJIT::compile(Node* node)
 
         Vector<SilentRegisterSavePlan> savePlans;
         silentSpillAllRegistersImpl(false, savePlans, InvalidGPRReg);
-        unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
+        BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
 
         addSlowPathGeneratorLambda([=]() {
             callTierUp.link(&m_jit);
 
             silentSpill(savePlans);
-            callOperation(triggerTierUpNowInLoop, &vm(), TrustedImm32(bytecodeIndex));
+            callOperation(triggerTierUpNowInLoop, &vm(), TrustedImm32(bytecodeIndex.asBits()));
             silentFill(savePlans);
 
             m_jit.jump().linkTo(toNextOperation, &m_jit);
@@ -5113,7 +5113,7 @@ void SpeculativeJIT::compile(Node* node)
         GPRTemporary temp(this);
         GPRReg tempGPR = temp.gpr();
 
-        unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
+        BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
         auto triggerIterator = m_jit.jitCode()->tierUpEntryTriggers.find(bytecodeIndex);
         DFG_ASSERT(m_jit.graph(), node, triggerIterator != m_jit.jitCode()->tierUpEntryTriggers.end());
         JITCode::TriggerReason* forceEntryTrigger = &(m_jit.jitCode()->tierUpEntryTriggers.find(bytecodeIndex)->value);
@@ -5138,7 +5138,7 @@ void SpeculativeJIT::compile(Node* node)
             overflowedCounter.link(&m_jit);
 
             silentSpill(savePlans);
-            callOperation(triggerOSREntryNow, tempGPR, &vm(), TrustedImm32(bytecodeIndex));
+            callOperation(triggerOSREntryNow, tempGPR, &vm(), TrustedImm32(bytecodeIndex.asBits()));
 
             if (savePlans.isEmpty())
                 m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR).linkTo(toNextOperation, &m_jit);
index 1badc57..f5d73c1 100644 (file)
@@ -85,9 +85,9 @@ public:
         
         m_graph.ensureCPSNaturalLoops();
         CPSNaturalLoops& naturalLoops = *m_graph.m_cpsNaturalLoops;
-        HashMap<const NaturalLoop*, unsigned> naturalLoopToLoopHint = buildNaturalLoopToLoopHintMap(naturalLoops);
+        HashMap<const NaturalLoop*, BytecodeIndex> naturalLoopToLoopHint = buildNaturalLoopToLoopHintMap(naturalLoops);
 
-        HashMap<unsigned, LoopHintDescriptor> tierUpHierarchy;
+        HashMap<BytecodeIndex, LoopHintDescriptor> tierUpHierarchy;
 
         InsertionSet insertionSet(m_graph);
         for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
@@ -108,7 +108,7 @@ public:
                     tierUpType = CheckTierUpInLoop;
                 insertionSet.insertNode(nodeIndex + 1, SpecNone, tierUpType, origin);
 
-                unsigned bytecodeIndex = origin.semantic.bytecodeIndex();
+                auto bytecodeIndex = origin.semantic.bytecodeIndex();
                 if (canOSREnter)
                     m_graph.m_plan.tierUpAndOSREnterBytecodes().append(bytecodeIndex);
 
@@ -138,8 +138,8 @@ public:
 
         // Add all the candidates that can be OSR Entered.
         for (auto entry : tierUpHierarchy) {
-            Vector<unsigned> tierUpCandidates;
-            for (unsigned bytecodeIndex : entry.value.osrEntryCandidates) {
+            Vector<BytecodeIndex> tierUpCandidates;
+            for (BytecodeIndex bytecodeIndex : entry.value.osrEntryCandidates) {
                 auto descriptorIt = tierUpHierarchy.find(bytecodeIndex);
                 if (descriptorIt != tierUpHierarchy.end()
                     && descriptorIt->value.canOSREnter)
@@ -160,7 +160,7 @@ public:
 private:
 #if ENABLE(FTL_JIT)
     struct LoopHintDescriptor {
-        Vector<unsigned> osrEntryCandidates;
+        Vector<BytecodeIndex> osrEntryCandidates;
         bool canOSREnter;
     };
 
@@ -183,9 +183,9 @@ private:
         return true;
     }
 
-    HashMap<const NaturalLoop*, unsigned> buildNaturalLoopToLoopHintMap(const CPSNaturalLoops& naturalLoops)
+    HashMap<const NaturalLoop*, BytecodeIndex> buildNaturalLoopToLoopHintMap(const CPSNaturalLoops& naturalLoops)
     {
-        HashMap<const NaturalLoop*, unsigned> naturalLoopsToLoopHint;
+        HashMap<const NaturalLoop*, BytecodeIndex> naturalLoopsToLoopHint;
 
         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
@@ -194,7 +194,7 @@ private:
                     continue;
 
                 if (const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block)) {
-                    unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex();
+                    BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
                     naturalLoopsToLoopHint.add(loop, bytecodeIndex);
                 }
                 break;
index 8db6dbe..a42949c 100644 (file)
@@ -75,7 +75,7 @@ void ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete(
     switch (result) {
     case CompilationSuccessful: {
         jitCode->setOSREntryBlock(codeBlock->vm(), profiledDFGCodeBlock, codeBlock);
-        unsigned osrEntryBytecode = codeBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
+        BytecodeIndex osrEntryBytecode = codeBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
         jitCode->tierUpEntryTriggers.set(osrEntryBytecode, JITCode::TriggerReason::CompilationDone);
         break;
     }
index 8c04191..ef85b42 100644 (file)
@@ -780,7 +780,7 @@ private:
 
         auto& dominators = m_graph.ensureSSADominators();
 
-        for (unsigned entrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys())
+        for (unsigned entrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeIndex.keys())
             VALIDATE((), entrypointIndex > 0); // By convention, 0 is the entrypoint index for the op_enter entrypoint, which can not be in a catch.
 
         for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
index 4d4b1eb..de326be 100644 (file)
@@ -157,12 +157,12 @@ void compile(State& state, Safepoint::Result& safepointResult)
     state.generatedFunction = label;
     state.jitCode->initializeB3Byproducts(state.proc->releaseByproducts());
 
-    for (auto pair : state.graph.m_entrypointIndexToCatchBytecodeOffset) {
-        unsigned catchBytecodeOffset = pair.value;
+    for (auto pair : state.graph.m_entrypointIndexToCatchBytecodeIndex) {
+        BytecodeIndex catchBytecodeIndex = pair.value;
         unsigned entrypointIndex = pair.key;
         Vector<FlushFormat> argumentFormats = state.graph.m_argumentFormats[entrypointIndex];
         state.jitCode->common.appendCatchEntrypoint(
-            catchBytecodeOffset, state.finalizer->b3CodeLinkBuffer->locationOf<ExceptionHandlerPtrTag>(state.proc->entrypointLabel(entrypointIndex)), WTFMove(argumentFormats));
+            catchBytecodeIndex, state.finalizer->b3CodeLinkBuffer->locationOf<ExceptionHandlerPtrTag>(state.proc->entrypointLabel(entrypointIndex)), WTFMove(argumentFormats));
     }
     state.jitCode->common.finalizeCatchEntrypoints();
 
index af2cd11..2689d3a 100644 (file)
@@ -48,8 +48,8 @@ public:
     void initializeEntryBuffer(VM&, unsigned numCalleeLocals);
     ScratchBuffer* entryBuffer() const { return m_entryBuffer; }
     
-    void setBytecodeIndex(unsigned value) { m_bytecodeIndex = value; }
-    unsigned bytecodeIndex() const { return m_bytecodeIndex; }
+    void setBytecodeIndex(BytecodeIndex value) { m_bytecodeIndex = value; }
+    BytecodeIndex bytecodeIndex() const { return m_bytecodeIndex; }
     
     void countEntryFailure() { m_entryFailureCount++; }
     unsigned entryFailureCount() const { return m_entryFailureCount; }
@@ -58,7 +58,7 @@ public:
     
 private:
     ScratchBuffer* m_entryBuffer; // Only for OSR entry code blocks.
-    unsigned m_bytecodeIndex;
+    BytecodeIndex m_bytecodeIndex;
     unsigned m_entryFailureCount;
 };
 
index 7ef4179..8a00d96 100644 (file)
@@ -211,7 +211,7 @@ public:
                     jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
                 });
 
-            for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
+            for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeIndex.keys()) {
                 RELEASE_ASSERT(catchEntrypointIndex != 0);
                 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
             }
@@ -280,7 +280,7 @@ public:
         unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
         MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
         PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
-        CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
+        CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(BytecodeIndex(0)));
         stackOverflowHandler->appendSomeRegister(m_callFrame);
         stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
         stackOverflowHandler->numGPScratchRegisters = 1;
@@ -351,7 +351,7 @@ public:
             }
 
             m_node = nullptr;
-            m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
+            m_origin = NodeOrigin(CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), true);
 
             // Check Arguments.
             availabilityMap().clear();
@@ -2110,8 +2110,8 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+        BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueAddOptimize;
         auto nonRepatchingFunction = operationValueAdd;
         compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2131,8 +2131,8 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+        BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueSubOptimize;
         auto nonRepatchingFunction = operationValueSub;
         compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2152,8 +2152,8 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+        BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationValueMulOptimize;
         auto nonRepatchingFunction = operationValueMul;
         compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -2415,8 +2415,8 @@ private:
             }
 
             CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-            unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-            ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+            BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+            ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
             auto repatchingFunction = operationValueSubOptimize;
             auto nonRepatchingFunction = operationValueSub;
             compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
@@ -3093,8 +3093,8 @@ private:
     {
         DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
+        BytecodeIndex bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeIndex(bytecodeIndex);
         auto repatchingFunction = operationArithNegateOptimize;
         auto nonRepatchingFunction = operationArithNegate;
         compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
index c175737..9331771 100644 (file)
@@ -41,7 +41,7 @@ namespace JSC { namespace FTL {
 SUPPRESS_ASAN
 void* prepareOSREntry(
     VM& vm, CallFrame* callFrame, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
-    unsigned bytecodeIndex, unsigned streamIndex)
+    BytecodeIndex bytecodeIndex, unsigned streamIndex)
 {
     CodeBlock* baseline = dfgCodeBlock->baselineVersion();
     ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
@@ -55,7 +55,7 @@ void* prepareOSREntry(
     
     if (Options::verboseOSR()) {
         dataLog(
-            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
+            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at ",
             bytecodeIndex, ".\n");
     }
     
@@ -64,7 +64,7 @@ void* prepareOSREntry(
 
     if (bytecodeIndex != entryCode->bytecodeIndex()) {
         if (Options::verboseOSR())
-            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
+            dataLog("    OSR failed because we don't have an entrypoint for ", bytecodeIndex, "; ours is for ", entryCode->bytecodeIndex(), "\n");
         return 0;
     }
     
@@ -79,7 +79,7 @@ void* prepareOSREntry(
         Optional<JSValue> reconstructedValue = values.argument(argument);
         if ((reconstructedValue && valueOnStack == reconstructedValue.value()) || !argument)
             continue;
-        dataLog("Mismatch between reconstructed values and the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
+        dataLog("Mismatch between reconstructed values and the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at ", bytecodeIndex, ":\n");
         dataLog("    Value on stack: ", valueOnStack, "\n");
         dataLog("    Reconstructed value: ", reconstructedValue, "\n");
         RELEASE_ASSERT_NOT_REACHED();
index 3d123fb..64ecf75 100644 (file)
@@ -36,7 +36,7 @@ class VM;
 namespace FTL {
 
 void* prepareOSREntry(
-    VM&, CallFrame*, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, unsigned bytecodeIndex,
+    VM&, CallFrame*, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, BytecodeIndex,
     unsigned streamIndex);
 
 } } // namespace JSC::FTL
index 5232058..fff0adf 100644 (file)
@@ -98,12 +98,12 @@ SUPPRESS_ASAN unsigned CallFrame::unsafeCallSiteAsRawBits() const
 
 CallSiteIndex CallFrame::callSiteIndex() const
 {
-    return CallSiteIndex(callSiteAsRawBits());
+    return CallSiteIndex(BytecodeIndex(callSiteAsRawBits()));
 }
 
 SUPPRESS_ASAN CallSiteIndex CallFrame::unsafeCallSiteIndex() const
 {
-    return CallSiteIndex(unsafeCallSiteAsRawBits());
+    return CallSiteIndex(BytecodeIndex(unsafeCallSiteAsRawBits()));
 }
 
 #if USE(JSVALUE32_64)
@@ -114,7 +114,7 @@ const Instruction* CallFrame::currentVPC() const
 
 void CallFrame::setCurrentVPC(const Instruction* vpc)
 {
-    CallSiteIndex callSite(vpc);
+    CallSiteIndex callSite(codeBlock()->bytecodeIndex(vpc));
     this[CallFrameSlot::argumentCount].tag() = callSite.bits();
 }
 
@@ -134,7 +134,7 @@ const Instruction* CallFrame::currentVPC() const
 
 void CallFrame::setCurrentVPC(const Instruction* vpc)
 {
-    CallSiteIndex callSite(codeBlock()->bytecodeOffset(vpc));
+    CallSiteIndex callSite(codeBlock()->bytecodeIndex(vpc));
     this[CallFrameSlot::argumentCount].tag() = static_cast<int32_t>(callSite.bits());
 }
 
@@ -147,11 +147,11 @@ unsigned CallFrame::callSiteBitsAsBytecodeOffset() const
 
 #endif
     
-unsigned CallFrame::bytecodeOffset()
+BytecodeIndex CallFrame::bytecodeIndex()
 {
     ASSERT(!callee().isWasm());
     if (!codeBlock())
-        return 0;
+        return BytecodeIndex(0);
 #if ENABLE(DFG_JIT)
     if (callSiteBitsAreCodeOriginIndex()) {
         ASSERT(codeBlock());
@@ -164,13 +164,13 @@ unsigned CallFrame::bytecodeOffset()
     }
 #endif
     ASSERT(callSiteBitsAreBytecodeOffset());
-    return callSiteBitsAsBytecodeOffset();
+    return BytecodeIndex(callSiteBitsAsBytecodeOffset());
 }
 
 CodeOrigin CallFrame::codeOrigin()
 {
     if (!codeBlock())
-        return CodeOrigin(0);
+        return CodeOrigin(BytecodeIndex(0));
 #if ENABLE(DFG_JIT)
     if (callSiteBitsAreCodeOriginIndex()) {
         CallSiteIndex index = callSiteIndex();
@@ -178,7 +178,7 @@ CodeOrigin CallFrame::codeOrigin()
         return codeBlock()->codeOrigin(index);
     }
 #endif
-    return CodeOrigin(callSiteBitsAsBytecodeOffset());
+    return CodeOrigin(BytecodeIndex(callSiteBitsAsBytecodeOffset()));
 }
 
 Register* CallFrame::topOfFrameInternal()
@@ -302,7 +302,7 @@ String CallFrame::friendlyFunctionName()
 void CallFrame::dump(PrintStream& out)
 {
     if (CodeBlock* codeBlock = this->codeBlock()) {
-        out.print(codeBlock->inferredName(), "#", codeBlock->hashAsStringIfPossible(), " [", codeBlock->jitType(), " bc#", bytecodeOffset(), "]");
+        out.print(codeBlock->inferredName(), "#", codeBlock->hashAsStringIfPossible(), " [", codeBlock->jitType(), " ", bytecodeIndex(), "]");
 
         out.print("(");
         thisValue().dumpForBacktrace(out);
index b7b33e0..080f7c4 100644 (file)
@@ -45,22 +45,19 @@ namespace JSC  {
     public:
         CallSiteIndex() = default;
         
-        explicit CallSiteIndex(uint32_t bits)
-            : m_bits(bits)
+        explicit CallSiteIndex(BytecodeIndex bytecodeIndex)
+            : m_bytecodeIndex(bytecodeIndex)
         { }
-#if USE(JSVALUE32_64)
-        explicit CallSiteIndex(const Instruction* instruction)
-            : m_bits(bitwise_cast<uint32_t>(instruction))
-        { }
-#endif
 
-        explicit operator bool() const { return m_bits != UINT_MAX; }
-        bool operator==(const CallSiteIndex& other) const { return m_bits == other.m_bits; }
-        
-        inline uint32_t bits() const { return m_bits; }
+        explicit operator bool() const { return !!m_bytecodeIndex; }
+        bool operator==(const CallSiteIndex& other) const { return m_bytecodeIndex == other.m_bytecodeIndex; }
+
+        uint32_t bits() const { return m_bytecodeIndex.asBits(); }
+
+        BytecodeIndex bytecodeIndex() const { return m_bytecodeIndex; }
 
     private:
-        uint32_t m_bits { UINT_MAX };
+        BytecodeIndex m_bytecodeIndex;
     };
 
     class DisposableCallSiteIndex : public CallSiteIndex {
@@ -68,7 +65,7 @@ namespace JSC  {
         DisposableCallSiteIndex() = default;
 
         explicit DisposableCallSiteIndex(uint32_t bits)
-            : CallSiteIndex(bits)
+            : CallSiteIndex(BytecodeIndex::fromBits(bits))
         {
         }
 
@@ -179,10 +176,10 @@ namespace JSC  {
         // also return 0 if the call frame has no notion of bytecode offsets (for
         // example if it's native code).
         // https://bugs.webkit.org/show_bug.cgi?id=121754
-        unsigned bytecodeOffset();
+        BytecodeIndex bytecodeIndex();
         
         // This will get you a CodeOrigin. It will always succeed. May return
-        // CodeOrigin(0) if we're in native code.
+        // CodeOrigin(BytecodeIndex(0)) if we're in native code.
         JS_EXPORT_PRIVATE CodeOrigin codeOrigin();
 
         Register* topOfFrame()
index e43db29..ff81597 100644 (file)
@@ -410,7 +410,7 @@ public:
                 m_results.append(StackFrame(visitor->wasmFunctionIndexOrName()));
             } else if (!!visitor->codeBlock() && !visitor->codeBlock()->unlinkedCodeBlock()->isBuiltinFunction()) {
                 m_results.append(
-                    StackFrame(m_vm, m_owner, visitor->callee().asCell(), visitor->codeBlock(), visitor->bytecodeOffset()));
+                    StackFrame(m_vm, m_owner, visitor->callee().asCell(), visitor->codeBlock(), visitor->bytecodeIndex()));
             } else {
                 m_results.append(
                     StackFrame(m_vm, m_owner, visitor->callee().asCell()));
@@ -480,7 +480,7 @@ ALWAYS_INLINE static HandlerInfo* findExceptionHandler(StackVisitor& visitor, Co
     if (JITCode::isOptimizingJIT(codeBlock->jitType()))
         exceptionHandlerIndex = callFrame->callSiteIndex().bits();
     else
-        exceptionHandlerIndex = callFrame->bytecodeOffset();
+        exceptionHandlerIndex = callFrame->bytecodeIndex().offset();
 
     return codeBlock->handlerForIndex(exceptionHandlerIndex, requiredHandler);
 }
index 864041f..87ac433 100644 (file)
@@ -329,7 +329,7 @@ void ShadowChicken::update(VM& vm, CallFrame* callFrame)
             bool isTailDeleted = false;
             JSScope* scope = nullptr;
             CodeBlock* codeBlock = callFrame->codeBlock();
-            JSValue scopeValue = callFrame->bytecodeOffset() && codeBlock && codeBlock->scopeRegister().isValid()
+            JSValue scopeValue = callFrame->bytecodeIndex() && codeBlock && codeBlock->scopeRegister().isValid()
                 ? callFrame->registers()[codeBlock->scopeRegister().offset()].jsValue()
                 : jsUndefined();
             if (!scopeValue.isUndefined() && codeBlock->wasCompiledWithDebuggingOpcodes()) {
index 80c8f68..c3172f7 100644 (file)
@@ -169,7 +169,7 @@ void StackVisitor::readNonInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOri
     if (callFrame->isAnyWasmCallee()) {
         m_frame.m_isWasmFrame = true;
         m_frame.m_codeBlock = nullptr;
-        m_frame.m_bytecodeOffset = 0;
+        m_frame.m_bytecodeIndex = BytecodeIndex();
 #if ENABLE(WEBASSEMBLY)
         CalleeBits bits = callFrame->callee();
         if (bits.isWasm())
@@ -177,9 +177,9 @@ void StackVisitor::readNonInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOri
 #endif
     } else {
         m_frame.m_codeBlock = callFrame->codeBlock();
-        m_frame.m_bytecodeOffset = !m_frame.codeBlock() ? 0
+        m_frame.m_bytecodeIndex = !m_frame.codeBlock() ? BytecodeIndex(0)
             : codeOrigin ? codeOrigin->bytecodeIndex()
-            : callFrame->bytecodeOffset();
+            : callFrame->bytecodeIndex();
 
     }
 
@@ -213,7 +213,7 @@ void StackVisitor::readInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOrigin
         else
             m_frame.m_argumentCountIncludingThis = inlineCallFrame->argumentCountIncludingThis;
         m_frame.m_codeBlock = inlineCallFrame->baselineCodeBlock.get();
-        m_frame.m_bytecodeOffset = codeOrigin->bytecodeIndex();
+        m_frame.m_bytecodeIndex = codeOrigin->bytecodeIndex();
 
         JSFunction* callee = inlineCallFrame->calleeForCallFrame(callFrame);
         m_frame.m_callee = callee;
@@ -423,7 +423,7 @@ void StackVisitor::Frame::computeLineAndColumn(unsigned& line, unsigned& column)
 void StackVisitor::Frame::retrieveExpressionInfo(int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
     CodeBlock* codeBlock = this->codeBlock();
-    codeBlock->unlinkedCodeBlock()->expressionRangeForBytecodeOffset(bytecodeOffset(), divot, startOffset, endOffset, line, column);
+    codeBlock->unlinkedCodeBlock()->expressionRangeForBytecodeIndex(bytecodeIndex(), divot, startOffset, endOffset, line, column);
     divot += codeBlock->sourceOffset();
 }
 
@@ -485,8 +485,8 @@ void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, WTF::Function<
             indent++;
 
             if (callFrame->callSiteBitsAreBytecodeOffset()) {
-                unsigned bytecodeOffset = callFrame->bytecodeOffset();
-                out.print(indent, "bytecodeOffset: ", bytecodeOffset, " of ", codeBlock->instructions().size(), "\n");
+                BytecodeIndex bytecodeIndex = callFrame->bytecodeIndex();
+                out.print(indent, bytecodeIndex, " of ", codeBlock->instructions().size(), "\n");
 #if ENABLE(DFG_JIT)
             } else {
                 out.print(indent, "hasCodeOrigins: ", codeBlock->hasCodeOrigins(), "\n");
index 63ad408..d35761f 100644 (file)
@@ -25,6 +25,7 @@
 
 #pragma once
 
+#include "BytecodeIndex.h"
 #include "CalleeBits.h"
 #include "WasmIndexOrName.h"
 #include <wtf/Function.h>
@@ -64,7 +65,7 @@ public:
         CallFrame* callerFrame() const { return m_callerFrame; }
         CalleeBits callee() const { return m_callee; }
         CodeBlock* codeBlock() const { return m_codeBlock; }
-        unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+        BytecodeIndex bytecodeIndex() const { return m_bytecodeIndex; }
         InlineCallFrame* inlineCallFrame() const {
 #if ENABLE(DFG_JIT)
             return m_inlineCallFrame;
@@ -120,7 +121,7 @@ public:
         CodeBlock* m_codeBlock;
         size_t m_index;
         size_t m_argumentCountIncludingThis;
-        unsigned m_bytecodeOffset;
+        BytecodeIndex m_bytecodeIndex;
         bool m_callerIsEntryFrame : 1;
         bool m_isWasmFrame : 1;
         Wasm::IndexOrName m_wasmFunctionIndexOrName;
index d44897d..1ff8b47 100644 (file)
@@ -74,16 +74,15 @@ void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr<CFu
         newCalleeFunction.retagged<OperationPtrTag>());
 }
 
-JIT::JIT(VM& vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
+JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex)
     : JSInterfaceJIT(&vm, codeBlock)
     , m_interpreter(vm.interpreter)
     , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
-    , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
     , m_pcToCodeOriginMapBuilder(vm)
     , m_canBeOptimized(false)
     , m_shouldEmitProfiling(false)
     , m_shouldUseIndexMasking(Options::enableSpectreMitigations())
-    , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset)
+    , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex)
 {
 }
 
@@ -100,11 +99,11 @@ void JIT::emitEnterOptimizationCheck()
     JumpList skipOptimize;
     
     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
-    ASSERT(!m_bytecodeOffset);
+    ASSERT(!m_bytecodeIndex.offset());
 
     copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
 
-    callOperation(operationOptimize, &vm(), m_bytecodeOffset);
+    callOperation(operationOptimize, &vm(), m_bytecodeIndex.asBits());
     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
     farJump(returnValueGPR, GPRInfo::callFrameRegister);
     skipOptimize.link(this);
@@ -138,12 +137,12 @@ void JIT::assertStackPointerOffset()
 }
 
 #define NEXT_OPCODE(name) \
-    m_bytecodeOffset += currentInstruction->size(); \
+    m_bytecodeIndex = BytecodeIndex(m_bytecodeIndex.offset() + currentInstruction->size()); \
     break;
 
 #define DEFINE_SLOW_OP(name) \
     case op_##name: { \
-        if (m_bytecodeOffset >= startBytecodeOffset) { \
+        if (m_bytecodeIndex >= startBytecodeIndex) { \
             JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
             slowPathCall.call(); \
         } \
@@ -152,7 +151,7 @@ void JIT::assertStackPointerOffset()
 
 #define DEFINE_OP(name) \
     case name: { \
-        if (m_bytecodeOffset >= startBytecodeOffset) { \
+        if (m_bytecodeIndex >= startBytecodeIndex) { \
             emit_##name(currentInstruction); \
         } \
         NEXT_OPCODE(name); \
@@ -192,8 +191,8 @@ void JIT::privateCompileMainPass()
     m_callLinkInfoIndex = 0;
 
     VM& vm = m_codeBlock->vm();
-    unsigned startBytecodeOffset = 0;
-    if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {
+    BytecodeIndex startBytecodeIndex(0);
+    if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {
         // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
         // This optimization would be invalid otherwise. When the LLInt determines it wants to
         // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
@@ -202,28 +201,28 @@ void JIT::privateCompileMainPass()
 
         // We only bother building the bytecode graph if it could save time and executable
         // memory. We pick an arbitrary offset where we deem this is profitable.
-        if (m_loopOSREntryBytecodeOffset >= 200) {
+        if (m_loopOSREntryBytecodeIndex.offset() >= 200) {
             // As a simplification, we don't find all bytecode ranges that are unreachable.
             // Instead, we just find the minimum bytecode offset that is reachable, and
             // compile code from that bytecode offset onwards.
 
             BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());
-            BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset);
+            BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset());
             RELEASE_ASSERT(block);
 
             GraphNodeWorklist<BytecodeBasicBlock*> worklist;
-            startBytecodeOffset = UINT_MAX;
+            startBytecodeIndex = BytecodeIndex();
             worklist.push(block);
 
             while (BytecodeBasicBlock* block = worklist.pop()) {
-                startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset());
+                startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset()));
                 worklist.pushAll(block->successors());
 
                 // Also add catch blocks for bytecodes that throw.
                 if (m_codeBlock->numberOfExceptionHandlers()) {
                     for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
                         auto instruction = instructions.at(bytecodeOffset);
-                        if (auto* handler = m_codeBlock->handlerForBytecodeOffset(bytecodeOffset))
+                        if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset)))
                             worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));
 
                         bytecodeOffset += instruction->size();
@@ -233,29 +232,29 @@ void JIT::privateCompileMainPass()
         }
     }
 
-    for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
-        if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) {
+    for (m_bytecodeIndex = BytecodeIndex(0); m_bytecodeIndex.offset() < instructionCount; ) {
+        if (m_bytecodeIndex == startBytecodeIndex && startBytecodeIndex.offset() > 0) {
             // We've proven all bytecode instructions up until here are unreachable.
             // Let's ensure that by crashing if it's ever hit.
             breakpoint();
         }
 
         if (m_disassembler)
-            m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
-        const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr();
-        ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
+            m_disassembler->setForBytecodeMainPath(m_bytecodeIndex.offset(), label());
+        const Instruction* currentInstruction = instructions.at(m_bytecodeIndex).ptr();
+        ASSERT(currentInstruction->size());
 
-        m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+        m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeIndex));
 
 #if ENABLE(OPCODE_SAMPLING)
-        if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
+        if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
             sampleInstruction(currentInstruction);
 #endif
 
-        m_labels[m_bytecodeOffset] = label();
+        m_labels[m_bytecodeIndex.offset()] = label();
 
         if (JITInternal::verbose)
-            dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+            dataLogLn("Old JIT emitting code for ", m_bytecodeIndex, " at offset ", (long)debugOffset());
 
         OpcodeID opcodeID = currentInstruction->opcodeID();
 
@@ -263,13 +262,13 @@ void JIT::privateCompileMainPass()
             add64(
                 TrustedImm32(1),
                 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
-                    m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
+                    m_compilation->bytecodes(), m_bytecodeIndex)))->address()));
         }
         
         if (Options::eagerlyUpdateTopCallFrame())
             updateTopCallFrame();
 
-        unsigned bytecodeOffset = m_bytecodeOffset;
+        unsigned bytecodeOffset = m_bytecodeIndex.offset();
 #if ENABLE(MASM_PROBE)
         if (UNLIKELY(Options::traceBaselineJITExecution())) {
             CodeBlock* codeBlock = m_codeBlock;
@@ -472,7 +471,7 @@ void JIT::privateCompileMainPass()
 
 #ifndef NDEBUG
     // Reset this, in order to guard its use with ASSERTs.
-    m_bytecodeOffset = std::numeric_limits<unsigned>::max();
+    m_bytecodeIndex = BytecodeIndex();
 #endif
 }
 
@@ -495,28 +494,28 @@ void JIT::privateCompileSlowCases()
     m_callLinkInfoIndex = 0;
     
     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-        m_bytecodeOffset = iter->to;
+        m_bytecodeIndex = iter->to;
 
-        m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+        m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeIndex));
 
-        unsigned firstTo = m_bytecodeOffset;
+        BytecodeIndex firstTo = m_bytecodeIndex;
 
-        const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr();
+        const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeIndex).ptr();
         
         RareCaseProfile* rareCaseProfile = 0;
         if (shouldEmitProfiling())
-            rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
+            rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeIndex);
 
         if (JITInternal::verbose)
-            dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+            dataLogLn("Old JIT emitting slow code for ", m_bytecodeIndex, " at offset ", (long)debugOffset());
 
         if (m_disassembler)
-            m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
+            m_disassembler->setForBytecodeSlowPath(m_bytecodeIndex.offset(), label());
 
 #if ENABLE(MASM_PROBE)
         if (UNLIKELY(Options::traceBaselineJITExecution())) {
             OpcodeID opcodeID = currentInstruction->opcodeID();
-            unsigned bytecodeOffset = m_bytecodeOffset;
+            unsigned bytecodeOffset = m_bytecodeIndex.offset();
             CodeBlock* codeBlock = m_codeBlock;
             probe([=] (Probe::Context& ctx) {
                 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
@@ -623,7 +622,7 @@ void JIT::privateCompileSlowCases()
 
 #ifndef NDEBUG
     // Reset this, in order to guard its use with ASSERTs.
-    m_bytecodeOffset = std::numeric_limits<unsigned>::max();
+    m_bytecodeIndex = BytecodeIndex();
 #endif
 }
 
@@ -674,7 +673,7 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort)
         m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
     }
     
-    m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
+    m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(BytecodeIndex(0)));
 
     Label entryLabel(this);
     if (m_disassembler)
@@ -711,7 +710,7 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort)
     emitMaterializeTagCheckRegisters();
 
     if (m_codeBlock->codeType() == FunctionCode) {
-        ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
+        ASSERT(!m_bytecodeIndex);
         if (shouldEmitProfiling()) {
             for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
                 // If this is a constructor, then we want to put in a dummy profiling site (to
@@ -741,7 +740,7 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort)
     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
 
     stackOverflow.link(this);
-    m_bytecodeOffset = 0;
+    m_bytecodeIndex = BytecodeIndex(0);
     if (maxFrameExtentForSlowPathCall)
         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
     callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
@@ -757,7 +756,7 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort)
         load32(payloadFor(CallFrameSlot::argumentCount), regT1);
         branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
 
-        m_bytecodeOffset = 0;
+        m_bytecodeIndex = BytecodeIndex(0);
 
         if (maxFrameExtentForSlowPathCall)
             addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
@@ -769,7 +768,7 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort)
         emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>());
 
 #if !ASSERT_DISABLED
-        m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
+        m_bytecodeIndex = BytecodeIndex(); // Reset this, in order to guard its use with ASSERTs.
 #endif
 
         jump(beginLabel);
@@ -809,7 +808,7 @@ CompilationResult JIT::link()
 
     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
     for (auto& record : m_switches) {
-        unsigned bytecodeOffset = record.bytecodeOffset;
+        unsigned bytecodeOffset = record.bytecodeIndex.offset();
 
         if (record.type != SwitchRecord::String) {
             ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 
@@ -894,7 +893,7 @@ CompilationResult JIT::link()
     JITCodeMap jitCodeMap;
     for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
         if (m_labels[bytecodeOffset].isSet())
-            jitCodeMap.append(bytecodeOffset, patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
+            jitCodeMap.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
     }
     jitCodeMap.finish();
     m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap));
@@ -949,7 +948,7 @@ void JIT::privateCompileExceptionHandlers()
         // operationLookupExceptionHandlerFromCallerFrame is passed one argument, the VM*.
         move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
         prepareCallOperation(vm());
-        m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame)));
+        m_calls.append(CallRecord(call(OperationPtrTag), BytecodeIndex(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame)));
         jumpToExceptionHandler(vm());
     }
 
@@ -962,7 +961,7 @@ void JIT::privateCompileExceptionHandlers()
         // operationLookupExceptionHandler is passed one argument, the VM*.
         move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
         prepareCallOperation(vm());
-        m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler)));
+        m_calls.append(CallRecord(call(OperationPtrTag), BytecodeIndex(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler)));
         jumpToExceptionHandler(vm());
     }
 }
index 19779a3..d45fc03 100644 (file)
@@ -70,16 +70,16 @@ namespace JSC {
 
     struct CallRecord {
         MacroAssembler::Call from;
-        unsigned bytecodeOffset;
+        BytecodeIndex bytecodeIndex;
         FunctionPtr<OperationPtrTag> callee;
 
         CallRecord()
         {
         }
 
-        CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, FunctionPtr<OperationPtrTag> callee)
+        CallRecord(MacroAssembler::Call from, BytecodeIndex bytecodeIndex, FunctionPtr<OperationPtrTag> callee)
             : from(from)
-            , bytecodeOffset(bytecodeOffset)
+            , bytecodeIndex(bytecodeIndex)
             , callee(callee)
         {
         }
@@ -98,9 +98,9 @@ namespace JSC {
 
     struct SlowCaseEntry {
         MacroAssembler::Jump from;
-        unsigned to;
+        BytecodeIndex to;
         
-        SlowCaseEntry(MacroAssembler::Jump f, unsigned t)
+        SlowCaseEntry(MacroAssembler::Jump f, BytecodeIndex t)
             : from(f)
             , to(t)
         {
@@ -121,20 +121,20 @@ namespace JSC {
             StringJumpTable* stringJumpTable;
         } jumpTable;
 
-        unsigned bytecodeOffset;
+        BytecodeIndex bytecodeIndex;
         unsigned defaultOffset;
 
-        SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
+        SwitchRecord(SimpleJumpTable* jumpTable, BytecodeIndex bytecodeIndex, unsigned defaultOffset, Type type)
             : type(type)
-            , bytecodeOffset(bytecodeOffset)
+            , bytecodeIndex(bytecodeIndex)
             , defaultOffset(defaultOffset)
         {
             this->jumpTable.simpleJumpTable = jumpTable;
         }
 
-        SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
+        SwitchRecord(StringJumpTable* jumpTable, BytecodeIndex bytecodeIndex, unsigned defaultOffset)
             : type(String)
-            , bytecodeOffset(bytecodeOffset)
+            , bytecodeIndex(bytecodeIndex)
             , defaultOffset(defaultOffset)
         {
             this->jumpTable.stringJumpTable = jumpTable;
@@ -144,7 +144,7 @@ namespace JSC {
     struct ByValCompilationInfo {
         ByValCompilationInfo() { }
         
-        ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget)
+        ByValCompilationInfo(ByValInfo* byValInfo, BytecodeIndex bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget)
             : byValInfo(byValInfo)
             , bytecodeIndex(bytecodeIndex)
             , notIndexJump(notIndexJump)
@@ -157,7 +157,7 @@ namespace JSC {
         }
 
         ByValInfo* byValInfo;
-        unsigned bytecodeIndex;
+        BytecodeIndex bytecodeIndex;
         MacroAssembler::PatchableJump notIndexJump;
         MacroAssembler::PatchableJump badTypeJump;
         JITArrayMode arrayMode;
@@ -192,7 +192,7 @@ namespace JSC {
         static constexpr int patchPutByIdDefaultOffset = 256;
 
     public:
-        JIT(VM&, CodeBlock* = 0, unsigned loopOSREntryBytecodeOffset = 0);
+        JIT(VM&, CodeBlock* = nullptr, BytecodeIndex loopOSREntryBytecodeOffset = BytecodeIndex(0));
         ~JIT();
 
         VM& vm() { return *JSInterfaceJIT::vm(); }
@@ -202,7 +202,7 @@ namespace JSC {
 
         void doMainThreadPreparationBeforeCompile();
         
-        static CompilationResult compile(VM& vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0)
+        static CompilationResult compile(VM& vm, CodeBlock* codeBlock, JITCompilationEffort effort, BytecodeIndex bytecodeOffset = BytecodeIndex(0))
         {
             return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort);
         }
@@ -210,28 +210,28 @@ namespace JSC {
         static void compileGetByVal(const ConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompileGetByVal(locker, byValInfo, returnAddress, arrayMode);
         }
 
         static void compileGetByValWithCachedId(VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName);
         }
 
         static void compilePutByVal(const ConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompilePutByVal<OpPutByVal>(locker, byValInfo, returnAddress, arrayMode);
         }
         
         static void compileDirectPutByVal(const ConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompilePutByVal<OpPutByValDirect>(locker, byValInfo, returnAddress, arrayMode);
         }
 
@@ -239,14 +239,14 @@ namespace JSC {
         static void compilePutByValWithCachedId(VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompilePutByValWithCachedId<Op>(byValInfo, returnAddress, putKind, propertyName);
         }
 
         static void compileHasIndexedProperty(VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
         {
             JIT jit(vm, codeBlock);
-            jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+            jit.m_bytecodeIndex = byValInfo->bytecodeIndex;
             jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode);
         }
 
@@ -277,7 +277,7 @@ namespace JSC {
         Call appendCall(const FunctionPtr<CFunctionPtrTag> function)
         {
             Call functionCall = call(OperationPtrTag);
-            m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.retagged<OperationPtrTag>()));
+            m_calls.append(CallRecord(functionCall, m_bytecodeIndex, function.retagged<OperationPtrTag>()));
             return functionCall;
         }
 
@@ -754,12 +754,12 @@ namespace JSC {
             ++iter;
         }
         void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex);
-        void linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases,
-            Vector<SlowCaseEntry>::iterator&, unsigned bytecodeOffset);
+        void linkAllSlowCasesForBytecodeIndex(Vector<SlowCaseEntry>& slowCases,
+            Vector<SlowCaseEntry>::iterator&, BytecodeIndex bytecodeOffset);
 
         void linkAllSlowCases(Vector<SlowCaseEntry>::iterator& iter)
         {
-            linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+            linkAllSlowCasesForBytecodeIndex(m_slowCases, iter, m_bytecodeIndex);
         }
 
         MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr<CFunctionPtrTag>);
@@ -934,7 +934,7 @@ namespace JSC {
         Vector<CallCompilationInfo> m_callCompilationInfo;
         Vector<JumpTable> m_jmpTable;
 
-        unsigned m_bytecodeOffset;
+        BytecodeIndex m_bytecodeIndex;
         Vector<SlowCaseEntry> m_slowCases;
         Vector<SwitchRecord> m_switches;
 
@@ -968,7 +968,7 @@ namespace JSC {
         bool m_canBeOptimizedOrInlined;
         bool m_shouldEmitProfiling;
         bool m_shouldUseIndexMasking;
-        unsigned m_loopOSREntryBytecodeOffset { 0 };
+        BytecodeIndex m_loopOSREntryBytecodeIndex;
     };
 
 } // namespace JSC
index 78a3a41..bf2306b 100644 (file)
@@ -155,7 +155,7 @@ void JIT::compileCallEvalSlowCase(const Instruction* instruction, Vector<SlowCas
 
     auto bytecode = instruction->as<OpCallEval>();
     CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
-    info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
+    info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeIndex), regT0);
 
     int registerOffset = -bytecode.m_argv;
 
@@ -226,8 +226,8 @@ void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoInd
     compileSetupFrame(bytecode, info);
 
     // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
-    uint32_t bytecodeOffset = m_codeBlock->bytecodeOffset(instruction);
-    uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
+    auto bytecodeIndex = m_codeBlock->bytecodeIndex(instruction);
+    uint32_t locationBits = CallSiteIndex(bytecodeIndex).bits();
     store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
 
     emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
@@ -242,7 +242,7 @@ void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoInd
     addSlowCase(slowCase);
 
     ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
-    info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
+    info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeIndex), regT0);
     m_callCompilationInfo.append(CallCompilationInfo());
     m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
index e1308f3..e075724 100644 (file)
@@ -38,31 +38,31 @@ private:
     struct Entry {
         Entry() { }
 
-        Entry(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation)
+        Entry(BytecodeIndex bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation)
             : m_bytecodeIndex(bytecodeIndex)
             , m_codeLocation(codeLocation)
         { }
 
-        inline unsigned bytecodeIndex() const { return m_bytecodeIndex; }
+        inline BytecodeIndex bytecodeIndex() const { return m_bytecodeIndex; }
         inline CodeLocationLabel<JSEntryPtrTag> codeLocation() { return m_codeLocation; }
 
     private:
-        unsigned m_bytecodeIndex;
+        BytecodeIndex m_bytecodeIndex;
         CodeLocationLabel<JSEntryPtrTag> m_codeLocation;
     };
 
 public:
-    void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation)
+    void append(BytecodeIndex bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation)
     {
         m_entries.append({ bytecodeIndex, codeLocation });
     }
 
     void finish() { m_entries.shrinkToFit(); }
 
-    CodeLocationLabel<JSEntryPtrTag> find(unsigned bytecodeIndex) const
+    CodeLocationLabel<JSEntryPtrTag> find(BytecodeIndex bytecodeIndex) const
     {
         auto* entry =
-            binarySearch<Entry, unsigned>(m_entries,
+            binarySearch<Entry, BytecodeIndex>(m_entries,
                 m_entries.size(), bytecodeIndex, [] (Entry* entry) {
                     return entry->bytecodeIndex();
                 });
index 20ed37f..75f9e25 100644 (file)
@@ -118,7 +118,7 @@ Vector<JITDisassembler::DumpedOp> JITDisassembler::dumpVectorForInstructions(Lin
         }
         out.reset();
         result.append(DumpedOp());
-        result.last().index = i;
+        result.last().bytecodeIndex = BytecodeIndex(i);
         out.print(prefix);
         m_codeBlock->dumpBytecode(out, i);
         for (unsigned nextIndex = i + 1; ; nextIndex++) {
@@ -154,7 +154,7 @@ void JITDisassembler::reportInstructions(Profiler::Compilation* compilation, Lin
     for (unsigned i = 0; i < dumpedOps.size(); ++i) {
         compilation->addDescription(
             Profiler::CompiledBytecode(
-                Profiler::OriginStack(Profiler::Origin(compilation->bytecodes(), dumpedOps[i].index)),
+                Profiler::OriginStack(Profiler::Origin(compilation->bytecodes(), dumpedOps[i].bytecodeIndex)),
                 dumpedOps[i].disassembly));
     }
 }
index 5e32286..5a6276a 100644 (file)
@@ -67,7 +67,7 @@ private:
     MacroAssembler::Label firstSlowLabel();
     
     struct DumpedOp {
-        unsigned index;
+        BytecodeIndex bytecodeIndex;
         CString disassembly;
     };
     Vector<DumpedOp> dumpVectorForInstructions(LinkBuffer&, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel);
index 3959b8d..1080046 100644 (file)
@@ -109,29 +109,23 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
 
 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr<NoPtrTag> target)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
     Call nakedCall = nearCall();
-    m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
+    m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
     return nakedCall;
 }
 
 ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr<NoPtrTag> target)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
     Call nakedCall = nearTailCall();
-    m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
+    m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
     return nakedCall;
 }
 
 ALWAYS_INLINE void JIT::updateTopCallFrame()
 {
-    ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
-#if USE(JSVALUE32_64)
-    const Instruction* instruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr();
-    uint32_t locationBits = CallSiteIndex(instruction).bits();
-#else
-    uint32_t locationBits = CallSiteIndex(m_bytecodeOffset).bits();
-#endif
+    uint32_t locationBits = CallSiteIndex(m_bytecodeIndex).bits();
     store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount));
     
     // FIXME: It's not clear that this is needed. JITOperations tend to update the top call frame on
@@ -196,45 +190,45 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
         linkSlowCase(iter);
 }
 
-ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
+ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeIndex(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, BytecodeIndex bytecodeIndex)
 {
-    while (iter != slowCases.end() && iter->to == bytecodeOffset)
+    while (iter != slowCases.end() && iter->to == bytecodeIndex)
         linkSlowCase(iter);
 }
 
 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
-    m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
+    m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
 }
 
 ALWAYS_INLINE void JIT::addSlowCase(const JumpList& jumpList)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
     for (const Jump& jump : jumpList.jumps())
-        m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
+        m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
 }
 
 ALWAYS_INLINE void JIT::addSlowCase()
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
     
     Jump emptyJump; // Doing it this way to make Windows happy.
-    m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
+    m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeIndex));
 }
 
 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
-    m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
+    m_jmpTable.append(JumpTable(jump, m_bytecodeIndex.offset() + relativeOffset));
 }
 
 ALWAYS_INLINE void JIT::addJump(const JumpList& jumpList, int relativeOffset)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
     for (auto& jump : jumpList.jumps())
         addJump(jump, relativeOffset);
@@ -242,9 +236,9 @@ ALWAYS_INLINE void JIT::addJump(const JumpList& jumpList, int relativeOffset)
 
 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
-    jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
+    jump.linkTo(m_labels[m_bytecodeIndex.offset() + relativeOffset], this);
 }
 
 #if ENABLE(SAMPLING_FLAGS)
@@ -573,7 +567,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t
 // get arg puts an arg from the SF register array into a h/w register
 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
 {
-    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
 
     if (m_codeBlock->isConstantRegisterIndex(src)) {
         JSValue value = m_codeBlock->getConstant(src);
index f3ab59a..0fc4069 100644 (file)
@@ -169,7 +169,7 @@ void JIT::emit_op_instanceof(const Instruction* currentInstruction)
     emitJumpSlowCaseIfNotJSCell(regT1, proto);
 
     JITInstanceOfGenerator gen(
-        m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset),
+        m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
         RegisterSet::stubUnavailableRegisters(),
         regT0, // result
         regT2, // value
@@ -720,9 +720,9 @@ void JIT::emit_op_catch(const Instruction* currentInstruction)
     auto& metadata = bytecode.metadata(m_codeBlock);
     ValueProfileAndOperandBuffer* buffer = metadata.m_buffer;
     if (buffer || !shouldEmitProfiling())
-        callOperation(operationTryOSREnterAtCatch, &vm(), m_bytecodeOffset);
+        callOperation(operationTryOSREnterAtCatch, &vm(), m_bytecodeIndex.asBits());
     else
-        callOperation(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeOffset);
+        callOperation(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeIndex.asBits());
     auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
     emitRestoreCalleeSaves();
     farJump(returnValueGPR, ExceptionHandlerPtrTag);
@@ -760,7 +760,7 @@ void JIT::emit_op_switch_imm(const Instruction* currentInstruction)
 
     // create jump table for switch destinations, track this switch statement.
     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
-    m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
     jumpTable->ensureCTITable();
 
     emitGetVirtualRegister(scrutinee, regT0);
@@ -777,7 +777,7 @@ void JIT::emit_op_switch_char(const Instruction* currentInstruction)
 
     // create jump table for switch destinations, track this switch statement.
     SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
-    m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
     jumpTable->ensureCTITable();
 
     emitGetVirtualRegister(scrutinee, regT0);
@@ -794,7 +794,7 @@ void JIT::emit_op_switch_string(const Instruction* currentInstruction)
 
     // create jump table for switch destinations, track this switch statement.
     StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
-    m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
 
     emitGetVirtualRegister(scrutinee, regT0);
     callOperation(operationSwitchStringWithUnknownKeyType, TrustedImmPtr(m_codeBlock->globalObject()), regT0, tableIndex);
@@ -1036,7 +1036,7 @@ void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<Sl
 
         copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
 
-        callOperation(operationOptimize, &vm(), m_bytecodeOffset);
+        callOperation(operationOptimize, &vm(), m_bytecodeIndex.asBits());
         Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
         if (!ASSERT_DISABLED) {
             Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
@@ -1301,7 +1301,7 @@ void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction)
 
     Label nextHotPath = label();
     
-    m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
+    m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeIndex, PatchableJump(), badType, mode, profile, done, nextHotPath));
 }
 
 void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1504,7 +1504,7 @@ void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction)
     ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
     emitGetVirtualRegister(bytecode.m_thisValue.offset(), regT2);
     emitGetVirtualRegister(bytecode.m_scope.offset(), regT3);
-    logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset));
+    logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeIndex));
 }
 
 #endif // USE(JSVALUE64)
index 87349ee..eb51e4e 100644 (file)
@@ -159,7 +159,7 @@ void JIT::emit_op_instanceof(const Instruction* currentInstruction)
     emitJumpSlowCaseIfNotJSCell(proto);
     
     JITInstanceOfGenerator gen(
-        m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset),
+        m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
         RegisterSet::stubUnavailab