https://bugs.webkit.org/show_bug.cgi?id=134668
Reviewed by Geoffrey Garen.
Source/JavaScriptCore:
In FTL, the LinkBuffer can outlive the MacroAssembler that was used for code generation.
When that happens, the pointer m_assembler points to released memory. That was not causing
issues because the attribute is not used after linking, but that was not particularily
future proof.
This patch refactors LinkBuffer to avoid any lifetime risk. The MacroAssembler is now passed
as a reference, it is used for linking but no reference is ever stored with the LinkBuffer.
While fixing the call sites to use a reference, I also discovered LinkBuffer.h was included
everywhere. I refactored some #include to avoid that.
* assembler/LinkBuffer.cpp:
(JSC::LinkBuffer::copyCompactAndLinkCode):
(JSC::LinkBuffer::linkCode):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::LinkBuffer):
* bytecode/Watchpoint.cpp:
* dfg/DFGDisassembler.cpp:
* dfg/DFGDisassembler.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::linkFunction):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGPlan.cpp:
* dfg/DFGThunks.cpp:
(JSC::DFG::osrExitGenerationThunkGenerator):
(JSC::DFG::osrEntryThunkGenerator):
* ftl/FTLCompile.cpp:
(JSC::FTL::generateICFastPath):
(JSC::FTL::fixFunctionBasedOnStackMaps):
* ftl/FTLJSCall.cpp:
* ftl/FTLJSCall.h:
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToLLVM.cpp:
* ftl/FTLOSRExitCompiler.cpp:
(JSC::FTL::compileStub):
* ftl/FTLThunks.cpp:
(JSC::FTL::osrExitGenerationThunkGenerator):
(JSC::FTL::slowPathCallThunkGenerator):
* jit/ArityCheckFailReturnThunks.cpp:
(JSC::ArityCheckFailReturnThunks::returnPCsFor):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JITCall.cpp:
(JSC::JIT::privateCompileClosureCall):
* jit/JITCall32_64.cpp:
(JSC::JIT::privateCompileClosureCall):
* jit/JITDisassembler.cpp:
* jit/JITDisassembler.h:
* jit/JITOpcodes.cpp:
* jit/JITPropertyAccess.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompileGetByVal):
(JSC::JIT::privateCompilePutByVal):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::stringGetByValStubGenerator):
* jit/RegisterPreservationWrapperGenerator.cpp:
(JSC::generateRegisterPreservationWrapper):
(JSC::registerRestorationThunkGenerator):
* jit/Repatch.cpp:
(JSC::generateByIdStub):
(JSC::tryCacheGetByID):
(JSC::emitPutReplaceStub):
(JSC::emitPutTransitionStub):
(JSC::tryRepatchIn):
(JSC::linkClosureCall):
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::finalize):
* jit/ThunkGenerators.cpp:
(JSC::throwExceptionFromCallSlowPathGenerator):
(JSC::linkForThunkGenerator):
(JSC::linkClosureCallForThunkGenerator):
(JSC::virtualForThunkGenerator):
(JSC::nativeForGenerator):
(JSC::arityFixup):
* llint/LLIntThunks.cpp:
(JSC::LLInt::generateThunkWithJumpTo):
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::compile):
Source/WebCore:
* cssjit/SelectorCompiler.cpp:
(WebCore::SelectorCompiler::SelectorCodeGenerator::compile):
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@170876
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2014-07-07 Benjamin Poulain <benjamin@webkit.org>
+
+ LinkBuffer should not keep a reference to the MacroAssembler
+ https://bugs.webkit.org/show_bug.cgi?id=134668
+
+ Reviewed by Geoffrey Garen.
+
+ In FTL, the LinkBuffer can outlive the MacroAssembler that was used for code generation.
+ When that happens, the pointer m_assembler points to released memory. That was not causing
+ issues because the attribute is not used after linking, but that was not particularily
+ future proof.
+
+ This patch refactors LinkBuffer to avoid any lifetime risk. The MacroAssembler is now passed
+ as a reference, it is used for linking but no reference is ever stored with the LinkBuffer.
+
+ While fixing the call sites to use a reference, I also discovered LinkBuffer.h was included
+ everywhere. I refactored some #include to avoid that.
+
+ * assembler/LinkBuffer.cpp:
+ (JSC::LinkBuffer::copyCompactAndLinkCode):
+ (JSC::LinkBuffer::linkCode):
+ * assembler/LinkBuffer.h:
+ (JSC::LinkBuffer::LinkBuffer):
+ * bytecode/Watchpoint.cpp:
+ * dfg/DFGDisassembler.cpp:
+ * dfg/DFGDisassembler.h:
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ (JSC::DFG::JITCompiler::linkFunction):
+ * dfg/DFGOSRExitCompiler.cpp:
+ * dfg/DFGPlan.cpp:
+ * dfg/DFGThunks.cpp:
+ (JSC::DFG::osrExitGenerationThunkGenerator):
+ (JSC::DFG::osrEntryThunkGenerator):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::generateICFastPath):
+ (JSC::FTL::fixFunctionBasedOnStackMaps):
+ * ftl/FTLJSCall.cpp:
+ * ftl/FTLJSCall.h:
+ * ftl/FTLLink.cpp:
+ (JSC::FTL::link):
+ * ftl/FTLLowerDFGToLLVM.cpp:
+ * ftl/FTLOSRExitCompiler.cpp:
+ (JSC::FTL::compileStub):
+ * ftl/FTLThunks.cpp:
+ (JSC::FTL::osrExitGenerationThunkGenerator):
+ (JSC::FTL::slowPathCallThunkGenerator):
+ * jit/ArityCheckFailReturnThunks.cpp:
+ (JSC::ArityCheckFailReturnThunks::returnPCsFor):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompile):
+ * jit/JITCall.cpp:
+ (JSC::JIT::privateCompileClosureCall):
+ * jit/JITCall32_64.cpp:
+ (JSC::JIT::privateCompileClosureCall):
+ * jit/JITDisassembler.cpp:
+ * jit/JITDisassembler.h:
+ * jit/JITOpcodes.cpp:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ (JSC::JIT::privateCompileGetByVal):
+ (JSC::JIT::privateCompilePutByVal):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ * jit/RegisterPreservationWrapperGenerator.cpp:
+ (JSC::generateRegisterPreservationWrapper):
+ (JSC::registerRestorationThunkGenerator):
+ * jit/Repatch.cpp:
+ (JSC::generateByIdStub):
+ (JSC::tryCacheGetByID):
+ (JSC::emitPutReplaceStub):
+ (JSC::emitPutTransitionStub):
+ (JSC::tryRepatchIn):
+ (JSC::linkClosureCall):
+ * jit/SpecializedThunkJIT.h:
+ (JSC::SpecializedThunkJIT::finalize):
+ * jit/ThunkGenerators.cpp:
+ (JSC::throwExceptionFromCallSlowPathGenerator):
+ (JSC::linkForThunkGenerator):
+ (JSC::linkClosureCallForThunkGenerator):
+ (JSC::virtualForThunkGenerator):
+ (JSC::nativeForGenerator):
+ (JSC::arityFixup):
+ * llint/LLIntThunks.cpp:
+ (JSC::LLInt::generateThunkWithJumpTo):
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::compile):
+
2014-07-07 Andreas Kling <akling@apple.com>
Fast path for jsStringWithCache() when asked for the same string repeatedly.
#if ENABLE(BRANCH_COMPACTION)
template <typename InstructionType>
-void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
{
- m_initialSize = m_assembler->m_assembler.codeSize();
+ m_initialSize = macroAssembler.m_assembler.codeSize();
allocate(m_initialSize, ownerUID, effort);
if (didFailToAllocate())
return;
- uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* inData = (uint8_t*)macroAssembler.unlinkedCode();
uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
int readPtr = 0;
int writePtr = 0;
- Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink();
unsigned jumpCount = jumpsToLink.size();
for (unsigned i = 0; i < jumpCount; ++i) {
int offset = readPtr - writePtr;
ASSERT(!(writePtr % 2));
while (copySource != copyEnd)
*copyDst++ = *copySource++;
- m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ macroAssembler.recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
readPtr += regionSize;
writePtr += regionSize;
else
target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
- JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ JumpLinkType jumpLinkType = macroAssembler.computeJumpType(jumpsToLink[i], outData + writePtr, target);
// Compact branch if we can...
- if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ if (macroAssembler.canCompact(jumpsToLink[i].type())) {
// Step back in the write stream
- int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ int32_t delta = macroAssembler.jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
if (delta) {
writePtr -= delta;
- m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ macroAssembler.recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
}
}
jumpsToLink[i].setFrom(writePtr);
}
// Copy everything after the last jump
memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
- m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+ macroAssembler.recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
for (unsigned i = 0; i < jumpCount; ++i) {
uint8_t* location = outData + jumpsToLink[i].from();
uint8_t* target = outData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
- m_assembler->link(jumpsToLink[i], location, target);
+ macroAssembler.link(jumpsToLink[i], location, target);
}
jumpsToLink.clear();
#endif
-void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
{
#if !ENABLE(BRANCH_COMPACTION)
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- m_assembler->m_assembler.buffer().flushConstantPool(false);
+ macroAssembler.m_assembler.buffer().flushConstantPool(false);
#endif
- AssemblerBuffer& buffer = m_assembler->m_assembler.buffer();
+ AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
allocate(buffer.codeSize(), ownerUID, effort);
if (!m_didAllocate)
return;
ASSERT(m_code);
#if CPU(ARM_TRADITIONAL)
- m_assembler->m_assembler.prepareExecutableCopy(m_code);
+ macroAssembler.m_assembler.prepareExecutableCopy(m_code);
#endif
memcpy(m_code, buffer.data(), buffer.codeSize());
#if CPU(MIPS)
- m_assembler->m_assembler.relocateJumps(buffer.data(), m_code);
+ macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
#endif
#elif CPU(ARM_THUMB2)
copyCompactAndLinkCode<uint16_t>(ownerUID, effort);
#endif
public:
- LinkBuffer(VM& vm, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
: m_size(0)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
#endif
, m_didAllocate(false)
, m_code(0)
- , m_storage(masm->m_assembler.buffer().storage())
- , m_assembler(masm)
+ , m_storage(macroAssembler.m_assembler.buffer().storage())
, m_vm(&vm)
#ifndef NDEBUG
, m_completed(false)
#endif
{
- linkCode(ownerUID, effort);
+ linkCode(macroAssembler, ownerUID, effort);
}
- LinkBuffer(VM& vm, MacroAssembler* masm, void* code, size_t size)
+ LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* code, size_t size)
: m_size(size)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
#endif
, m_didAllocate(false)
, m_code(code)
- , m_storage(masm->m_assembler.buffer().storage())
- , m_assembler(masm)
+ , m_storage(macroAssembler.m_assembler.buffer().storage())
, m_vm(&vm)
#ifndef NDEBUG
, m_completed(false)
#endif
{
- linkCode(0, JITCompilationCanFail);
+ linkCode(macroAssembler, 0, JITCompilationCanFail);
}
~LinkBuffer()
void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort);
void shrink(size_t newSize);
- JS_EXPORT_PRIVATE void linkCode(void* ownerUID, JITCompilationEffort);
+ JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
#if ENABLE(BRANCH_COMPACTION)
template <typename InstructionType>
- void copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort);
+ void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
#endif
void performFinalization();
bool m_didAllocate;
void* m_code;
RefPtr<AssemblerData> m_storage;
- MacroAssembler* m_assembler;
VM* m_vm;
#ifndef NDEBUG
bool m_completed;
#include "config.h"
#include "Watchpoint.h"
-#include "LinkBuffer.h"
#include <wtf/CompilationThread.h>
#include <wtf/PassRefPtr.h>
#include "DFGGraph.h"
#include "DFGJITCode.h"
#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ProfilerDatabase.h"
#include <wtf/StdLibExtras.h>
#include "DFGCommon.h"
#include "DumpContext.h"
-#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "ProfilerDatabase.h"
+#include "ProfilerCompilation.h"
#include <wtf/HashMap.h>
#include <wtf/StringPrintStream.h>
#include <wtf/Vector.h>
-namespace JSC { namespace DFG {
+namespace JSC {
+
+class LinkBuffer;
+
+namespace DFG {
class Graph;
void JITCompiler::link()
{
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
+ OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, *this, m_codeBlock, JITCompilationCanFail));
if (linkBuffer->didFailToAllocate()) {
m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
return;
void JITCompiler::linkFunction()
{
// === Link ===
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
+ OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, *this, m_codeBlock, JITCompilationCanFail));
if (linkBuffer->didFailToAllocate()) {
m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
return;
exitCompiler.compileExit(exit, operands, recovery);
- LinkBuffer patchBuffer(*vm, &jit, codeBlock);
+ LinkBuffer patchBuffer(*vm, jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldShowDisassembly() || Options::verboseOSR(),
patchBuffer,
#include "DFGVirtualRegisterAllocationPhase.h"
#include "DFGWatchpointCollectionPhase.h"
#include "Debugger.h"
-#include "OperandsInlines.h"
#include "JSCInlines.h"
+#include "OperandsInlines.h"
+#include "ProfilerDatabase.h"
#include <wtf/CurrentTime.h>
#if ENABLE(FTL_JIT)
jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination));
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, compileOSRExit);
ok.link(&jit);
jit.jump(GPRInfo::regT1);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("DFG OSR entry thunk"));
}
char* startOfIC =
bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
- LinkBuffer linkBuffer(vm, &fastPathJIT, startOfIC, sizeOfIC);
+ LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
// Note: we could handle the !isValid() case. We just don't appear to have a
// reason to do so, yet.
RELEASE_ASSERT(linkBuffer.isValid());
checkJIT.jump(exceptionContinueArg1Set);
OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(
- vm, &checkJIT, codeBlock, JITCompilationMustSucceed));
+ vm, checkJIT, codeBlock, JITCompilationMustSucceed));
linkBuffer->link(call, FunctionPtr(lookupExceptionHandler));
state.finalizer->handleExceptionsLinkBuffer = linkBuffer.release();
RELEASE_ASSERT(didSeeUnwindInfo);
OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(
- vm, &exitThunkGenerator, codeBlock, JITCompilationMustSucceed));
+ vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed));
RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
state.finalizer->sideCodeLinkBuffer = adoptPtr(
- new LinkBuffer(vm, &slowPathJIT, codeBlock, JITCompilationMustSucceed));
+ new LinkBuffer(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed));
state.finalizer->sideCodeLinkBuffer->link(
exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
- LinkBuffer linkBuffer(vm, &fastPathJIT, startOfIC, sizeOfCall());
+ LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
if (!linkBuffer.isValid()) {
dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
RELEASE_ASSERT_NOT_REACHED();
#if ENABLE(FTL_JIT)
#include "DFGNode.h"
+#include "LinkBuffer.h"
namespace JSC { namespace FTL {
#include "CCallHelpers.h"
#include "CallLinkInfo.h"
#include "CodeOrigin.h"
-#include "LinkBuffer.h"
namespace JSC {
+class LinkBuffer;
+
namespace DFG {
struct Node;
}
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.jump());
- linkBuffer = adoptPtr(new LinkBuffer(vm, &jit, codeBlock, JITCompilationMustSucceed));
+ linkBuffer = adoptPtr(new LinkBuffer(vm, jit, codeBlock, JITCompilationMustSucceed));
linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixup)).code().executableAddress()));
linkBuffer->link(mainPathJumps, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));
jit.emitFunctionEpilogue();
CCallHelpers::Jump mainPathJump = jit.jump();
- linkBuffer = adoptPtr(new LinkBuffer(vm, &jit, codeBlock, JITCompilationMustSucceed));
+ linkBuffer = adoptPtr(new LinkBuffer(vm, jit, codeBlock, JITCompilationMustSucceed));
linkBuffer->link(mainPathJump, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));
state.jitCode->initializeAddressForCall(linkBuffer->locationOf(start));
#include "FTLOutput.h"
#include "FTLThunks.h"
#include "FTLWeightedTarget.h"
-#include "LinkBuffer.h"
#include "OperandsInlines.h"
#include "JSCInlines.h"
#include "VirtualRegister.h"
adjustAndJumpToTarget(jit, exit);
- LinkBuffer patchBuffer(*vm, &jit, codeBlock);
+ LinkBuffer patchBuffer(*vm, jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
patchBuffer,
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, compileFTLOSRExit);
return FINALIZE_CODE(patchBuffer, ("FTL OSR exit generation thunk"));
}
jit.ret();
- LinkBuffer patchBuffer(vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(vm, jit, GLOBAL_THUNK_ID);
patchBuffer.link(call, FunctionPtr(key.callTarget()));
return FINALIZE_CODE(patchBuffer, ("FTL slow path call thunk for %s", toCString(key).data()));
}
jit.jump(GPRInfo::regT2);
}
- LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer linkBuffer(vm, jit, GLOBAL_THUNK_ID);
unsigned returnPCsSize = numExpectedArgumentsIncludingThis / stackAlignmentRegisters() + 1;
std::unique_ptr<CodeLocationLabel[]> returnPCs =
#include "LinkBuffer.h"
#include "MaxFrameExtentForSlowPathCall.h"
#include "JSCInlines.h"
+#include "ProfilerDatabase.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
if (m_disassembler)
m_disassembler->setEndOfCode(label());
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
if (patchBuffer.didFailToAllocate())
return CompilationFailed;
#include "JSFunction.h"
#include "Interpreter.h"
#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
restoreReturnAddressBeforeReturn(regT2);
Jump slow = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
#include "JSArray.h"
#include "JSFunction.h"
#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
restoreReturnAddressBeforeReturn(regT2);
Jump slow = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
#include "CodeBlockWithJITType.h"
#include "JIT.h"
#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include <wtf/StringPrintStream.h>
namespace JSC {
#if ENABLE(JIT)
-#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "ProfilerDatabase.h"
#include <wtf/Vector.h>
+#include <wtf/text/CString.h>
namespace JSC {
class CodeBlock;
+class LinkBuffer;
+
+namespace Profiler {
+class Compilation;
+}
class JITDisassembler {
WTF_MAKE_FAST_ALLOCATED;
#include "JSCell.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
-#include "LinkBuffer.h"
#include "MaxFrameExtentForSlowPathCall.h"
#include "SlowPathCall.h"
#include "VirtualRegister.h"
jumpToExceptionHandler();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0);
AssemblyHelpers::Jump jump = jit.jump();
- LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer linkBuffer(vm, jit, GLOBAL_THUNK_ID);
linkBuffer.link(jump, CodeLocationLabel(target));
if (Options::verboseFTLToJSThunk())
{
AssemblyHelpers jit(vm, 0);
generateRegisterRestoration(jit);
- LinkBuffer linkBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(linkBuffer, ("Register restoration thunk"));
}
}
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+ LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock());
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
if (kind == CallCustomGetter || kind == CallCustomSetter) {
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+ LinkBuffer patchBuffer(*vm, stubJit, codeBlock);
linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
MacroAssembler::Jump success = stubJit.jump();
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+ LinkBuffer patchBuffer(*vm, stubJit, codeBlock);
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
failure = badStructure;
}
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+ LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock());
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
patchBuffer.link(failure, failureLabel);
successInSlowPath = stubJit.jump();
}
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+ LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock());
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
if (allocator.didReuseRegisters())
patchBuffer.link(failure, failureLabel);
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+ LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock());
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
AssemblyHelpers::Jump slow = stubJit.jump();
- LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
+ LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock);
patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
if (JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
jit.call(GPRInfo::nonArgGPR0);
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
slowPathFor(jit, vm, operationLinkFor(kind, registers));
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
slowPathFor(jit, vm, operationLinkClosureCallFor(registers));
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
slowPathFor(jit, vm, operationVirtualFor(kind, registers));
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
}
jit.ret();
#endif
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("fixup arity"));
}
jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
jit.jump(JSInterfaceJIT::regT0);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind));
}
backtrack();
// Link & finalize the code.
- LinkBuffer linkBuffer(*vm, this, REGEXP_CODE_ID);
+ LinkBuffer linkBuffer(*vm, *this, REGEXP_CODE_ID);
m_backtrackingState.linkDataLabels(linkBuffer);
if (compileMode == MatchOnly) {
+2014-07-07 Benjamin Poulain <benjamin@webkit.org>
+
+ LinkBuffer should not keep a reference to the MacroAssembler
+ https://bugs.webkit.org/show_bug.cgi?id=134668
+
+ Reviewed by Geoffrey Garen.
+
+ * cssjit/SelectorCompiler.cpp:
+ (WebCore::SelectorCompiler::SelectorCodeGenerator::compile):
+
2014-07-07 Zalan Bujtas <zalan@apple.com>
Subpixel rendering: Inline box decoration rounds to integral.
return SelectorCompilationStatus::CannotCompile;
}
- JSC::LinkBuffer linkBuffer(*vm, &m_assembler, CSS_CODE_ID);
+ JSC::LinkBuffer linkBuffer(*vm, m_assembler, CSS_CODE_ID);
for (unsigned i = 0; i < m_functionCalls.size(); i++)
linkBuffer.link(m_functionCalls[i].first, m_functionCalls[i].second);