https://bugs.webkit.org/show_bug.cgi?id=187472
Reviewed by Mark Lam.
Source/JavaScriptCore:
std::function allocates memory from standard malloc instead of bmalloc. Instead of
using that, we should use WTF::{Function,ScopedLambda,RecursableLambda}.
This patch attempts to replace std::function with the above WTF function types.
If the function's lifetime can be the same to the stack, we can use ScopedLambda, which
is really efficient. Otherwise, we should use WTF::Function.
For recurring use cases, we can use RecursableLambda.
* assembler/MacroAssembler.cpp:
(JSC::stdFunctionCallback):
(JSC::MacroAssembler::probe):
* assembler/MacroAssembler.h:
* b3/air/AirDisassembler.cpp:
(JSC::B3::Air::Disassembler::dump):
* b3/air/AirDisassembler.h:
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::BytecodeGenerator):
(JSC::BytecodeGenerator::initializeDefaultParameterValuesAndSetupFunctionScopeStack):
(JSC::BytecodeGenerator::emitEnumeration):
* bytecompiler/BytecodeGenerator.h:
* bytecompiler/NodesCodegen.cpp:
(JSC::ArrayNode::emitBytecode):
(JSC::ApplyFunctionCallDotNode::emitBytecode):
(JSC::ForOfNode::emitBytecode):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::addSlowPathGeneratorLambda):
(JSC::DFG::SpeculativeJIT::compileMathIC):
* dfg/DFGSpeculativeJIT.h:
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGValidate.cpp:
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* heap/HeapSnapshotBuilder.cpp:
(JSC::HeapSnapshotBuilder::json):
* heap/HeapSnapshotBuilder.h:
* interpreter/StackVisitor.cpp:
(JSC::StackVisitor::Frame::dump const):
* interpreter/StackVisitor.h:
* runtime/PromiseDeferredTimer.h:
* runtime/VM.cpp:
(JSC::VM::whenIdle):
(JSC::enableProfilerWithRespectToCount):
(JSC::disableProfilerWithRespectToCount):
* runtime/VM.h:
* runtime/VMEntryScope.cpp:
(JSC::VMEntryScope::addDidPopListener):
* runtime/VMEntryScope.h:
* tools/HeapVerifier.cpp:
(JSC::HeapVerifier::verifyCellList):
(JSC::HeapVerifier::validateCell):
(JSC::HeapVerifier::validateJSCell):
* tools/HeapVerifier.h:
Source/WTF:
* wtf/ScopedLambda.h:
(WTF::ScopedLambda<ResultType):
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@234082
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2018-07-21 Yusuke Suzuki <utatane.tea@gmail.com>
+
+ [JSC] Use Function / ScopedLambda / RecursableLambda instead of std::function
+ https://bugs.webkit.org/show_bug.cgi?id=187472
+
+ Reviewed by Mark Lam.
+
+ std::function allocates memory from standard malloc instead of bmalloc. Instead of
+ using that, we should use WTF::{Function,ScopedLambda,RecursableLambda}.
+
+ This patch attempts to replace std::function with the above WTF function types.
+ If the function's lifetime can be the same to the stack, we can use ScopedLambda, which
+ is really efficient. Otherwise, we should use WTF::Function.
+ For recurring use cases, we can use RecursableLambda.
+
+ * assembler/MacroAssembler.cpp:
+ (JSC::stdFunctionCallback):
+ (JSC::MacroAssembler::probe):
+ * assembler/MacroAssembler.h:
+ * b3/air/AirDisassembler.cpp:
+ (JSC::B3::Air::Disassembler::dump):
+ * b3/air/AirDisassembler.h:
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::BytecodeGenerator):
+ (JSC::BytecodeGenerator::initializeDefaultParameterValuesAndSetupFunctionScopeStack):
+ (JSC::BytecodeGenerator::emitEnumeration):
+ * bytecompiler/BytecodeGenerator.h:
+ * bytecompiler/NodesCodegen.cpp:
+ (JSC::ArrayNode::emitBytecode):
+ (JSC::ApplyFunctionCallDotNode::emitBytecode):
+ (JSC::ForOfNode::emitBytecode):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::addSlowPathGeneratorLambda):
+ (JSC::DFG::SpeculativeJIT::compileMathIC):
+ * dfg/DFGSpeculativeJIT.h:
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGValidate.cpp:
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::compile):
+ * heap/HeapSnapshotBuilder.cpp:
+ (JSC::HeapSnapshotBuilder::json):
+ * heap/HeapSnapshotBuilder.h:
+ * interpreter/StackVisitor.cpp:
+ (JSC::StackVisitor::Frame::dump const):
+ * interpreter/StackVisitor.h:
+ * runtime/PromiseDeferredTimer.h:
+ * runtime/VM.cpp:
+ (JSC::VM::whenIdle):
+ (JSC::enableProfilerWithRespectToCount):
+ (JSC::disableProfilerWithRespectToCount):
+ * runtime/VM.h:
+ * runtime/VMEntryScope.cpp:
+ (JSC::VMEntryScope::addDidPopListener):
+ * runtime/VMEntryScope.h:
+ * tools/HeapVerifier.cpp:
+ (JSC::HeapVerifier::verifyCellList):
+ (JSC::HeapVerifier::validateCell):
+ (JSC::HeapVerifier::validateJSCell):
+ * tools/HeapVerifier.h:
+
2018-07-20 Michael Saboff <msaboff@apple.com>
DFG AbstractInterpreter: CheckArray filters array modes for DirectArguments/ScopedArguments using only NonArray
#if ENABLE(MASM_PROBE)
static void stdFunctionCallback(Probe::Context& context)
{
- auto func = context.arg<const std::function<void(Probe::Context&)>*>();
+ auto func = context.arg<const Function<void(Probe::Context&)>*>();
(*func)(context);
}
-void MacroAssembler::probe(std::function<void(Probe::Context&)> func)
+void MacroAssembler::probe(Function<void(Probe::Context&)> func)
{
- probe(stdFunctionCallback, new std::function<void(Probe::Context&)>(func));
+ probe(stdFunctionCallback, new Function<void(Probe::Context&)>(WTFMove(func)));
}
#endif // ENABLE(MASM_PROBE)
// MacroAssembler.
void probe(Probe::Function, void* arg);
- JS_EXPORT_PRIVATE void probe(std::function<void(Probe::Context&)>);
+ JS_EXPORT_PRIVATE void probe(Function<void(Probe::Context&)>);
// Let's you print from your JIT generated code.
// See comments in MacroAssemblerPrinter.h for examples of how to use this.
RELEASE_ASSERT(addResult.isNewEntry);
}
-void Disassembler::dump(Code& code, PrintStream& out, LinkBuffer& linkBuffer, const char* airPrefix, const char* asmPrefix, std::function<void(Inst&)> doToEachInst)
+void Disassembler::dump(Code& code, PrintStream& out, LinkBuffer& linkBuffer, const char* airPrefix, const char* asmPrefix, const ScopedLambda<void(Inst&)>& doToEachInst)
{
auto dumpAsmRange = [&] (CCallHelpers::Label startLabel, CCallHelpers::Label endLabel) {
RELEASE_ASSERT(startLabel.isSet());
void startBlock(BasicBlock*, CCallHelpers&);
void addInst(Inst*, CCallHelpers::Label, CCallHelpers::Label);
- void dump(Code&, PrintStream&, LinkBuffer&, const char* airPrefix, const char* asmPrefix, std::function<void(Inst&)> doToEachInst);
+ void dump(Code&, PrintStream&, LinkBuffer&, const char* airPrefix, const char* asmPrefix, const ScopedLambda<void(Inst&)>& doToEachInst);
private:
HashMap<Inst*, std::pair<CCallHelpers::Label, CCallHelpers::Label>> m_instToRange;
if (shouldCaptureAllOfTheThings)
functionNode->varDeclarations().markAllVariablesAsCaptured();
- auto captures = [&] (UniquedStringImpl* uid) -> bool {
+ auto captures = scopedLambda<bool (UniquedStringImpl*)>([&] (UniquedStringImpl* uid) -> bool {
if (!shouldCaptureSomeOfTheThings)
return false;
if (needsArguments && uid == propertyNames().arguments.impl()) {
return true;
}
return functionNode->captures(uid);
- };
+ });
auto varKind = [&] (UniquedStringImpl* uid) -> VarKind {
return captures(uid) ? VarKind::Scope : VarKind::Stack;
};
void BytecodeGenerator::initializeDefaultParameterValuesAndSetupFunctionScopeStack(
FunctionParameters& parameters, bool isSimpleParameterList, FunctionNode* functionNode, SymbolTable* functionSymbolTable,
- int symbolTableConstantIndex, const std::function<bool (UniquedStringImpl*)>& captures, bool shouldCreateArgumentsVariableInParameterScope)
+ int symbolTableConstantIndex, const ScopedLambda<bool (UniquedStringImpl*)>& captures, bool shouldCreateArgumentsVariableInParameterScope)
{
Vector<std::pair<Identifier, RefPtr<RegisterID>>> valuesToMoveIntoVars;
ASSERT(!(isSimpleParameterList && shouldCreateArgumentsVariableInParameterScope));
return false;
}
-void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack, ForOfNode* forLoopNode, RegisterID* forLoopSymbolTable)
+void BytecodeGenerator::emitEnumeration(ThrowableExpressionData* node, ExpressionNode* subjectNode, const ScopedLambda<void(BytecodeGenerator&, RegisterID*)>& callBack, ForOfNode* forLoopNode, RegisterID* forLoopSymbolTable)
{
bool isForAwait = forLoopNode ? forLoopNode->isForAwait() : false;
ASSERT(!isForAwait || (isForAwait && isAsyncFunctionParseMode(parseMode())));
void emitCallDefineProperty(RegisterID* newObj, RegisterID* propertyNameRegister,
RegisterID* valueRegister, RegisterID* getterRegister, RegisterID* setterRegister, unsigned options, const JSTextPosition&);
- void emitEnumeration(ThrowableExpressionData* enumerationNode, ExpressionNode* subjectNode, const std::function<void(BytecodeGenerator&, RegisterID*)>& callBack, ForOfNode* = nullptr, RegisterID* forLoopSymbolTable = nullptr);
+ void emitEnumeration(ThrowableExpressionData* enumerationNode, ExpressionNode* subjectNode, const ScopedLambda<void(BytecodeGenerator&, RegisterID*)>& callBack, ForOfNode* = nullptr, RegisterID* forLoopSymbolTable = nullptr);
RegisterID* emitGetTemplateObject(RegisterID* dst, TaggedTemplateNode*);
RegisterID* emitGetGlobalPrivate(RegisterID* dst, const Identifier& property);
void initializeParameters(FunctionParameters&);
void initializeVarLexicalEnvironment(int symbolTableConstantIndex, SymbolTable* functionSymbolTable, bool hasCapturedVariables);
- void initializeDefaultParameterValuesAndSetupFunctionScopeStack(FunctionParameters&, bool isSimpleParameterList, FunctionNode*, SymbolTable*, int symbolTableConstantIndex, const std::function<bool (UniquedStringImpl*)>& captures, bool shouldCreateArgumentsVariableInParameterScope);
+ void initializeDefaultParameterValuesAndSetupFunctionScopeStack(FunctionParameters&, bool isSimpleParameterList, FunctionNode*, SymbolTable*, int symbolTableConstantIndex, const ScopedLambda<bool (UniquedStringImpl*)>& captures, bool shouldCreateArgumentsVariableInParameterScope);
void initializeArrowFunctionContextScopeIfNeeded(SymbolTable* functionSymbolTable = nullptr, bool canReuseLexicalEnvironment = false);
bool needsDerivedConstructorInArrowFunctionLexicalEnvironment();
handleSpread:
RefPtr<RegisterID> index = generator.emitLoad(generator.newTemporary(), jsNumber(length));
- auto spreader = [array, index](BytecodeGenerator& generator, RegisterID* value)
+ auto spreader = scopedLambda<void(BytecodeGenerator&, RegisterID*)>([array, index](BytecodeGenerator& generator, RegisterID* value)
{
generator.emitDirectPutByVal(array.get(), index.get(), value);
generator.emitInc(index.get());
- };
+ });
for (; n; n = n->next()) {
if (n->elision())
generator.emitBinaryOp(op_add, index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32()));
RefPtr<RegisterID> thisRegister = generator.emitLoad(generator.newTemporary(), jsUndefined());
RefPtr<RegisterID> argumentsRegister = generator.emitLoad(generator.newTemporary(), jsUndefined());
- auto extractor = [&thisRegister, &argumentsRegister, &index](BytecodeGenerator& generator, RegisterID* value)
+ auto extractor = scopedLambda<void(BytecodeGenerator&, RegisterID*)>([&thisRegister, &argumentsRegister, &index](BytecodeGenerator& generator, RegisterID* value)
{
Ref<Label> haveThis = generator.newLabel();
Ref<Label> end = generator.newLabel();
generator.move(argumentsRegister.get(), value);
generator.emitLoad(index.get(), jsNumber(2));
generator.emitLabel(end.get());
- };
+ });
generator.emitEnumeration(this, spread->expression(), extractor);
generator.emitCallVarargsInTailPosition(returnValue.get(), realFunction.get(), thisRegister.get(), argumentsRegister.get(), generator.newTemporary(), 0, divot(), divotStart(), divotEnd(), DebuggableCall::Yes);
} else if (m_args->m_listNode->m_next) {
RegisterID* forLoopSymbolTable = nullptr;
generator.pushLexicalScope(this, BytecodeGenerator::TDZCheckOptimization::Optimize, BytecodeGenerator::NestedScopeType::IsNested, &forLoopSymbolTable);
- auto extractor = [this, dst](BytecodeGenerator& generator, RegisterID* value)
+ auto extractor = scopedLambda<void(BytecodeGenerator&, RegisterID*)>([this, dst](BytecodeGenerator& generator, RegisterID* value)
{
if (m_lexpr->isResolveNode()) {
const Identifier& ident = static_cast<ResolveNode*>(m_lexpr)->identifier();
}
generator.emitProfileControlFlow(m_statement->startOffset());
generator.emitNode(dst, m_statement);
- };
+ });
generator.emitEnumeration(this, m_expr, extractor, this, forLoopSymbolTable);
generator.popLexicalScope(this);
generator.emitProfileControlFlow(m_statement->endOffset() + (m_statement->isBlock() ? 1 : 0));
m_slowPathGenerators.append(WTFMove(slowPathGenerator));
}
-void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
+void SpeculativeJIT::addSlowPathGeneratorLambda(Function<void()>&& lambda)
{
- m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
+ m_slowPathLambdas.append(SlowPathLambda{ WTFMove(lambda), m_currentNode, static_cast<unsigned>(m_stream->size()) });
}
void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
auto done = m_jit.label();
- addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
+ addSlowPathGeneratorLambda([=, savePlans = WTFMove(savePlans)] () {
addICGenerationState->slowPathJumps.link(&m_jit);
addICGenerationState->slowPathStart = m_jit.label();
#if ENABLE(MATH_IC_STATS)
auto done = m_jit.label();
- addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
+ addSlowPathGeneratorLambda([=, savePlans = WTFMove(savePlans)] () {
icGenerationState->slowPathJumps.link(&m_jit);
icGenerationState->slowPathStart = m_jit.label();
#if ENABLE(MATH_IC_STATS)
GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>);
- void addSlowPathGenerator(std::function<void()>);
+ void addSlowPathGeneratorLambda(Function<void()>&&);
void runSlowPathGenerators(PCToCodeOriginMapBuilder&);
void compile(Node*);
Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators;
struct SlowPathLambda {
- std::function<void()> generator;
+ Function<void()> generator;
Node* currentNode;
unsigned streamIndex;
};
silentSpillAllRegistersImpl(false, savePlans, InvalidGPRReg);
unsigned bytecodeIndex = node->origin.semantic.bytecodeIndex;
- addSlowPathGenerator([=]() {
+ addSlowPathGeneratorLambda([=]() {
callTierUp.link(&m_jit);
silentSpill(savePlans);
unsigned streamIndex = m_stream->size();
m_jit.jitCode()->bytecodeIndexToStreamIndex.add(bytecodeIndex, streamIndex);
- addSlowPathGenerator([=]() {
+ addSlowPathGeneratorLambda([=]() {
forceOSREntry.link(&m_jit);
overflowedCounter.link(&m_jit);
// Validate clobbered states.
struct DefLambdaAdaptor {
- std::function<void(PureValue)> pureValue;
- std::function<void(HeapLocation, LazyNode)> locationAndNode;
+ Function<void(PureValue)> pureValue;
+ Function<void(HeapLocation, LazyNode)> locationAndNode;
void operator()(PureValue value) const
{
#include "LinkBuffer.h"
#include "PCToCodeOriginMap.h"
#include "ScratchRegisterAllocator.h"
-#include <wtf/Function.h>
+#include <wtf/RecursableLambda.h>
namespace JSC { namespace FTL {
printDFGNode(bitwise_cast<Node*>(value->origin().data()));
HashSet<B3::Value*> localPrintedValues;
- WTF::Function<void(B3::Value*)> printValueRecursive = [&] (B3::Value* value) {
+ auto printValueRecursive = recursableLambda([&] (auto self, B3::Value* value) -> void {
if (printedValues.contains(value) || localPrintedValues.contains(value))
return;
localPrintedValues.add(value);
for (unsigned i = 0; i < value->numChildren(); i++)
- printValueRecursive(value->child(i));
+ self(value->child(i));
out.print(b3Prefix);
value->deepDump(state.proc.get(), out);
out.print("\n");
- };
+ });
printValueRecursive(currentB3Value);
printedValues.add(value);
};
- auto forEachInst = [&] (B3::Air::Inst& inst) {
+ auto forEachInst = scopedLambda<void(B3::Air::Inst&)>([&] (B3::Air::Inst& inst) {
printB3Value(inst.origin);
- };
+ });
disassembler->dump(state.proc->code(), out, linkBuffer, airPrefix, asmPrefix, forEachInst);
linkBuffer.didAlreadyDisassemble();
return json([] (const HeapSnapshotNode&) { return true; });
}
-String HeapSnapshotBuilder::json(std::function<bool (const HeapSnapshotNode&)> allowNodeCallback)
+String HeapSnapshotBuilder::json(Function<bool (const HeapSnapshotNode&)> allowNodeCallback)
{
VM& vm = m_profiler.vm();
DeferGCForAWhile deferGC(vm.heap);
void appendIndexEdge(JSCell* from, JSCell* to, uint32_t index);
String json();
- String json(std::function<bool (const HeapSnapshotNode&)> allowNodeCallback);
+ String json(Function<bool (const HeapSnapshotNode&)> allowNodeCallback);
private:
// Finalized snapshots are not modified during building. So searching them
dump(out, indent, [] (PrintStream&) { });
}
-void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, std::function<void(PrintStream&)> prefix) const
+void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, WTF::Function<void(PrintStream&)> prefix) const
{
if (!this->callFrame()) {
out.print(indent, "frame 0x0\n");
#include "CalleeBits.h"
#include "VMEntryRecord.h"
#include "WasmIndexOrName.h"
-#include <functional>
+#include <wtf/Function.h>
#include <wtf/Indenter.h>
#include <wtf/text/WTFString.h>
CallFrame* callFrame() const { return m_callFrame; }
void dump(PrintStream&, Indenter = Indenter()) const;
- void dump(PrintStream&, Indenter, std::function<void(PrintStream&)> prefix) const;
+ void dump(PrintStream&, Indenter, WTF::Function<void(PrintStream&)> prefix) const;
private:
Frame() { }
// JSPromiseDeferred should handle canceling when the promise is resolved or rejected.
bool cancelPendingPromise(JSPromiseDeferred*);
- typedef std::function<void()> Task;
+ using Task = Function<void()>;
void scheduleWorkSoon(JSPromiseDeferred*, Task&&);
void stopRunningTasks() { m_runTasks = false; }
dateInstanceCache.reset();
}
-void VM::whenIdle(std::function<void()> callback)
+void VM::whenIdle(Function<void()>&& callback)
{
if (!entryScope) {
callback();
return;
}
- entryScope->addDidPopListener(callback);
+ entryScope->addDidPopListener(WTFMove(callback));
}
void VM::deleteAllLinkedCode(DeleteAllCodeEffort effort)
watchpointSet->fireAll(*this, "Impure property added");
}
-static bool enableProfilerWithRespectToCount(unsigned& counter, std::function<void()> doEnableWork)
+template<typename Func>
+static bool enableProfilerWithRespectToCount(unsigned& counter, const Func& doEnableWork)
{
bool needsToRecompile = false;
if (!counter) {
return needsToRecompile;
}
-static bool disableProfilerWithRespectToCount(unsigned& counter, std::function<void()> doDisableWork)
+template<typename Func>
+static bool disableProfilerWithRespectToCount(unsigned& counter, const Func& doDisableWork)
{
RELEASE_ASSERT(counter > 0);
bool needsToRecompile = false;
JSLock& apiLock() { return *m_apiLock; }
CodeCache* codeCache() { return m_codeCache.get(); }
- JS_EXPORT_PRIVATE void whenIdle(std::function<void()>);
+ JS_EXPORT_PRIVATE void whenIdle(Function<void()>&&);
JS_EXPORT_PRIVATE void deleteAllCode(DeleteAllCodeEffort);
JS_EXPORT_PRIVATE void deleteAllLinkedCode(DeleteAllCodeEffort);
vm.clearLastException();
}
-void VMEntryScope::addDidPopListener(std::function<void ()> listener)
+void VMEntryScope::addDidPopListener(Function<void ()>&& listener)
{
- m_didPopListeners.append(listener);
+ m_didPopListeners.append(WTFMove(listener));
}
VMEntryScope::~VMEntryScope()
VM& vm() const { return m_vm; }
JSGlobalObject* globalObject() const { return m_globalObject; }
- void addDidPopListener(std::function<void ()>);
+ void addDidPopListener(Function<void ()>&&);
private:
VM& m_vm;
JSGlobalObject* m_globalObject;
- Vector<std::function<void ()>> m_didPopListeners;
+ Vector<Function<void ()>> m_didPopListeners;
};
} // namespace JSC
auto& liveCells = list.cells();
bool listNamePrinted = false;
- auto printHeaderIfNeeded = [&] () {
+ auto printHeaderIfNeeded = scopedLambda<void()>([&] () {
if (listNamePrinted)
return;
dataLog(" @ phase ", phaseName(phase), ": FAILED in cell list '", list.name(), "' (size ", liveCells.size(), ")\n");
listNamePrinted = true;
m_didPrintLogs = true;
- };
+ });
bool success = true;
for (size_t i = 0; i < liveCells.size(); i++) {
bool HeapVerifier::validateCell(HeapCell* cell, VM* expectedVM)
{
- auto printNothing = [] () { };
+ auto printNothing = scopedLambda<void()>([] () { });
if (cell->isZapped()) {
dataLog(" cell ", RawPointer(cell), " is ZAPPED\n");
return validateJSCell(expectedVM, jsCell, nullptr, nullptr, printNothing);
}
-bool HeapVerifier::validateJSCell(VM* expectedVM, JSCell* cell, CellProfile* profile, CellList* list, std::function<void()> printHeaderIfNeeded, const char* prefix)
+bool HeapVerifier::validateJSCell(VM* expectedVM, JSCell* cell, CellProfile* profile, CellList* list, const ScopedLambda<void()>& printHeaderIfNeeded, const char* prefix)
{
- auto printHeaderAndCell = [cell, profile, printHeaderIfNeeded, prefix] () {
+ auto printHeaderAndCell = [cell, profile, &printHeaderIfNeeded, prefix] () {
printHeaderIfNeeded();
dataLog(prefix, "cell ", RawPointer(cell));
if (profile)
#include "CellList.h"
#include "Heap.h"
#include <wtf/MonotonicTime.h>
+#include <wtf/ScopedLambda.h>
#include <wtf/UniqueArray.h>
namespace JSC {
CellList* cellListForGathering(Phase);
bool verifyCellList(Phase, CellList&);
- static bool validateJSCell(VM* expectedVM, JSCell*, CellProfile*, CellList*, std::function<void()> printHeaderIfNeeded, const char* prefix = "");
+ static bool validateJSCell(VM* expectedVM, JSCell*, CellProfile*, CellList*, const ScopedLambda<void()>& printHeaderIfNeeded, const char* prefix = "");
void printVerificationHeader();
+2018-07-21 Yusuke Suzuki <utatane.tea@gmail.com>
+
+ [JSC] Use Function / ScopedLambda / RecursableLambda instead of std::function
+ https://bugs.webkit.org/show_bug.cgi?id=187472
+
+ Reviewed by Mark Lam.
+
+ * wtf/ScopedLambda.h:
+ (WTF::ScopedLambda<ResultType):
+
2018-07-18 Michael Catanzaro <mcatanzaro@igalia.com>
Switch CMake ports back to C++ 14
#ifndef ScopedLambda_h
#define ScopedLambda_h
+#include <wtf/ForbidHeapAllocation.h>
+
namespace WTF {
// You can use ScopedLambda to efficiently pass lambdas without allocating memory or requiring
template<typename FunctionType> class ScopedLambda;
template<typename ResultType, typename... ArgumentTypes>
class ScopedLambda<ResultType (ArgumentTypes...)> {
+ WTF_FORBID_HEAP_ALLOCATION;
public:
ScopedLambda(ResultType (*impl)(void* arg, ArgumentTypes...) = nullptr, void* arg = nullptr)
: m_impl(impl)