2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include <wtf/BitVector.h>
65 #include <wtf/MathExtras.h>
67 namespace JSC { namespace DFG {
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
73 , m_lastGeneratedNode(LastNodeType)
75 , m_generationInfo(m_jit.graph().frameRegisterCount())
76 , m_state(m_jit.graph())
77 , m_interpreter(m_jit.graph(), m_state)
78 , m_stream(&jit.jitCode()->variableEventStream)
79 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
83 SpeculativeJIT::~SpeculativeJIT()
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
89 IndexingType indexingType = structure->indexingType();
90 bool hasIndexingHeader = hasIndexedProperties(indexingType);
92 unsigned inlineCapacity = structure->inlineCapacity();
93 unsigned outOfLineCapacity = structure->outOfLineCapacity();
95 GPRTemporary scratch(this);
96 GPRTemporary scratch2(this);
97 GPRReg scratchGPR = scratch.gpr();
98 GPRReg scratch2GPR = scratch2.gpr();
100 ASSERT(vectorLength >= numElements);
101 vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
103 JITCompiler::JumpList slowCases;
106 if (hasIndexingHeader)
107 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108 size += outOfLineCapacity * sizeof(JSValue);
110 m_jit.move(TrustedImmPtr(0), storageGPR);
113 if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
114 m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115 m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
118 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
121 if (hasIndexingHeader)
122 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
124 slowCases.append(m_jit.jump());
127 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128 MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
130 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131 emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
134 slowCases.append(m_jit.jump());
136 // I want a slow path that also loads out the storage pointer, and that's
137 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138 // of work for a very small piece of functionality. :-/
139 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141 structure, vectorLength));
143 if (numElements < vectorLength) {
145 if (hasDouble(structure->indexingType()))
146 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
148 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149 for (unsigned i = numElements; i < vectorLength; ++i)
150 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
152 EncodedValueDescriptor value;
153 if (hasDouble(structure->indexingType()))
154 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
156 value.asInt64 = JSValue::encode(JSValue());
157 for (unsigned i = numElements; i < vectorLength; ++i) {
158 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
164 if (hasIndexingHeader)
165 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
167 m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
169 m_jit.mutatorFence();
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
174 if (inlineCallFrame && !inlineCallFrame->isVarargs())
175 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
177 VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
180 m_jit.sub32(TrustedImm32(1), lengthGPR);
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
186 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
191 if (origin.inlineCallFrame) {
192 if (origin.inlineCallFrame->isClosureCall) {
194 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
198 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
202 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
209 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210 GPRInfo::callFrameRegister, startGPR);
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
215 if (!Options::useOSRExitFuzz()
216 || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217 || !doOSRExitFuzzing())
218 return MacroAssembler::Jump();
220 MacroAssembler::Jump result;
222 m_jit.pushToSave(GPRInfo::regT0);
223 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227 unsigned at = Options::fireOSRExitFuzzAt();
228 if (at || atOrAfter) {
230 MacroAssembler::RelationalCondition condition;
232 threshold = atOrAfter;
233 condition = MacroAssembler::Below;
236 condition = MacroAssembler::NotEqual;
238 MacroAssembler::Jump ok = m_jit.branch32(
239 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240 m_jit.popToRestore(GPRInfo::regT0);
241 result = m_jit.jump();
244 m_jit.popToRestore(GPRInfo::regT0);
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
253 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254 if (fuzzJump.isSet()) {
255 JITCompiler::JumpList jumpsToFail;
256 jumpsToFail.append(fuzzJump);
257 jumpsToFail.append(jumpToFail);
258 m_jit.appendExitInfo(jumpsToFail);
260 m_jit.appendExitInfo(jumpToFail);
261 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
268 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269 if (fuzzJump.isSet()) {
270 JITCompiler::JumpList myJumpsToFail;
271 myJumpsToFail.append(jumpsToFail);
272 myJumpsToFail.append(fuzzJump);
273 m_jit.appendExitInfo(myJumpsToFail);
275 m_jit.appendExitInfo(jumpsToFail);
276 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
282 return OSRExitJumpPlaceholder();
283 unsigned index = m_jit.jitCode()->osrExit.size();
284 m_jit.appendExitInfo();
285 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286 return OSRExitJumpPlaceholder(index);
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
291 return speculationCheck(kind, jsValueSource, nodeUse.node());
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
296 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
301 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
308 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309 m_jit.appendExitInfo(jumpToFail);
310 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
315 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
322 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323 m_jit.jitCode()->appendOSRExit(OSRExit(
324 UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325 this, m_stream->size()));
326 info.m_replacementSource = m_jit.watchpointLabel();
327 ASSERT(info.m_replacementSource.isSet());
331 void SpeculativeJIT::unreachable(Node* node)
333 m_compileOkay = false;
334 m_jit.abortWithReason(DFGUnreachableNode, node->op());
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
341 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342 m_compileOkay = false;
343 if (verboseCompilationEnabled())
344 dataLog("Bailing compilation.\n");
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
349 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
354 ASSERT(needsTypeCheck(edge, typesPassedThrough));
355 m_interpreter.filter(edge, typesPassedThrough);
356 speculationCheck(exitKind, source, edge.node(), jumpToFail);
359 RegisterSet SpeculativeJIT::usedRegisters()
363 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364 GPRReg gpr = GPRInfo::toRegister(i);
365 if (m_gprs.isInUse(gpr))
368 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369 FPRReg fpr = FPRInfo::toRegister(i);
370 if (m_fprs.isInUse(fpr))
374 result.merge(RegisterSet::stubUnavailableRegisters());
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
381 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
386 m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
391 for (auto& slowPathGenerator : m_slowPathGenerators) {
392 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393 slowPathGenerator->generate(this);
395 for (auto& slowPathLambda : m_slowPathLambdas) {
396 Node* currentNode = slowPathLambda.currentNode;
397 m_currentNode = currentNode;
398 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400 slowPathLambda.generator();
401 m_outOfLineStreamIndex = std::nullopt;
405 void SpeculativeJIT::clearGenerationInfo()
407 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408 m_generationInfo[i] = GenerationInfo();
409 m_gprs = RegisterBank<GPRInfo>();
410 m_fprs = RegisterBank<FPRInfo>();
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
415 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416 Node* node = info.node();
417 DataFormat registerFormat = info.registerFormat();
418 ASSERT(registerFormat != DataFormatNone);
419 ASSERT(registerFormat != DataFormatDouble);
421 SilentSpillAction spillAction;
422 SilentFillAction fillAction;
424 if (!info.needsSpill())
425 spillAction = DoNothingForSpill;
428 ASSERT(info.gpr() == source);
429 if (registerFormat == DataFormatInt32)
430 spillAction = Store32Payload;
431 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432 spillAction = StorePtr;
433 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434 spillAction = Store64;
436 ASSERT(registerFormat & DataFormatJS);
437 spillAction = Store64;
439 #elif USE(JSVALUE32_64)
440 if (registerFormat & DataFormatJS) {
441 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
444 ASSERT(info.gpr() == source);
445 spillAction = Store32Payload;
450 if (registerFormat == DataFormatInt32) {
451 ASSERT(info.gpr() == source);
452 ASSERT(isJSInt32(info.registerFormat()));
453 if (node->hasConstant()) {
454 ASSERT(node->isInt32Constant());
455 fillAction = SetInt32Constant;
457 fillAction = Load32Payload;
458 } else if (registerFormat == DataFormatBoolean) {
460 RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462 fillAction = DoNothingForFill;
464 #elif USE(JSVALUE32_64)
465 ASSERT(info.gpr() == source);
466 if (node->hasConstant()) {
467 ASSERT(node->isBooleanConstant());
468 fillAction = SetBooleanConstant;
470 fillAction = Load32Payload;
472 } else if (registerFormat == DataFormatCell) {
473 ASSERT(info.gpr() == source);
474 if (node->hasConstant()) {
475 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476 node->asCell(); // To get the assertion.
477 fillAction = SetCellConstant;
480 fillAction = LoadPtr;
482 fillAction = Load32Payload;
485 } else if (registerFormat == DataFormatStorage) {
486 ASSERT(info.gpr() == source);
487 fillAction = LoadPtr;
488 } else if (registerFormat == DataFormatInt52) {
489 if (node->hasConstant())
490 fillAction = SetInt52Constant;
491 else if (info.spillFormat() == DataFormatInt52)
493 else if (info.spillFormat() == DataFormatStrictInt52)
494 fillAction = Load64ShiftInt52Left;
495 else if (info.spillFormat() == DataFormatNone)
498 RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500 fillAction = Load64; // Make GCC happy.
503 } else if (registerFormat == DataFormatStrictInt52) {
504 if (node->hasConstant())
505 fillAction = SetStrictInt52Constant;
506 else if (info.spillFormat() == DataFormatInt52)
507 fillAction = Load64ShiftInt52Right;
508 else if (info.spillFormat() == DataFormatStrictInt52)
510 else if (info.spillFormat() == DataFormatNone)
513 RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515 fillAction = Load64; // Make GCC happy.
519 ASSERT(registerFormat & DataFormatJS);
521 ASSERT(info.gpr() == source);
522 if (node->hasConstant()) {
523 if (node->isCellConstant())
524 fillAction = SetTrustedJSConstant;
526 fillAction = SetJSConstant;
527 } else if (info.spillFormat() == DataFormatInt32) {
528 ASSERT(registerFormat == DataFormatJSInt32);
529 fillAction = Load32PayloadBoxInt;
533 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534 if (node->hasConstant())
535 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536 else if (info.payloadGPR() == source)
537 fillAction = Load32Payload;
538 else { // Fill the Tag
539 switch (info.spillFormat()) {
540 case DataFormatInt32:
541 ASSERT(registerFormat == DataFormatJSInt32);
542 fillAction = SetInt32Tag;
545 ASSERT(registerFormat == DataFormatJSCell);
546 fillAction = SetCellTag;
548 case DataFormatBoolean:
549 ASSERT(registerFormat == DataFormatJSBoolean);
550 fillAction = SetBooleanTag;
553 fillAction = Load32Tag;
560 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
565 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566 Node* node = info.node();
567 ASSERT(info.registerFormat() == DataFormatDouble);
569 SilentSpillAction spillAction;
570 SilentFillAction fillAction;
572 if (!info.needsSpill())
573 spillAction = DoNothingForSpill;
575 ASSERT(!node->hasConstant());
576 ASSERT(info.spillFormat() == DataFormatNone);
577 ASSERT(info.fpr() == source);
578 spillAction = StoreDouble;
582 if (node->hasConstant()) {
583 node->asNumber(); // To get the assertion.
584 fillAction = SetDoubleConstant;
586 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587 fillAction = LoadDouble;
589 #elif USE(JSVALUE32_64)
590 ASSERT(info.registerFormat() == DataFormatDouble);
591 if (node->hasConstant()) {
592 node->asNumber(); // To get the assertion.
593 fillAction = SetDoubleConstant;
595 fillAction = LoadDouble;
598 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
603 switch (plan.spillAction()) {
604 case DoNothingForSpill:
607 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
610 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
613 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
617 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
621 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
624 RELEASE_ASSERT_NOT_REACHED();
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
630 #if USE(JSVALUE32_64)
631 UNUSED_PARAM(canTrample);
633 switch (plan.fillAction()) {
634 case DoNothingForFill:
636 case SetInt32Constant:
637 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
640 case SetInt52Constant:
641 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
643 case SetStrictInt52Constant:
644 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
646 #endif // USE(JSVALUE64)
647 case SetBooleanConstant:
648 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
650 case SetCellConstant:
651 ASSERT(plan.node()->constant()->value().isCell());
652 m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
655 case SetTrustedJSConstant:
656 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
659 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
661 case SetDoubleConstant:
662 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663 m_jit.move64ToDouble(canTrample, plan.fpr());
665 case Load32PayloadBoxInt:
666 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
669 case Load32PayloadConvertToInt52:
670 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
674 case Load32PayloadSignExtend:
675 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
679 case SetJSConstantTag:
680 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
682 case SetJSConstantPayload:
683 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
686 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
689 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
692 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
694 case SetDoubleConstant:
695 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
699 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
702 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
705 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
709 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
711 case Load64ShiftInt52Right:
712 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
715 case Load64ShiftInt52Left:
716 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
721 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
724 RELEASE_ASSERT_NOT_REACHED();
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
730 switch (arrayMode.arrayClass()) {
731 case Array::OriginalArray: {
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
740 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741 return m_jit.branch32(
742 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
744 case Array::NonArray:
745 case Array::OriginalNonArray:
746 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747 return m_jit.branch32(
748 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
750 case Array::PossiblyArray:
751 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
755 RELEASE_ASSERT_NOT_REACHED();
756 return JITCompiler::Jump();
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
761 JITCompiler::JumpList result;
763 switch (arrayMode.type()) {
765 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
768 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
770 case Array::Contiguous:
771 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
773 case Array::Undecided:
774 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
776 case Array::ArrayStorage:
777 case Array::SlowPutArrayStorage: {
778 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
780 if (arrayMode.isJSArray()) {
781 if (arrayMode.isSlowPut()) {
784 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
785 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
786 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
789 MacroAssembler::Above, tempGPR,
790 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
793 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
795 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
798 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
799 if (arrayMode.isSlowPut()) {
800 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
803 MacroAssembler::Above, tempGPR,
804 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
808 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
819 void SpeculativeJIT::checkArray(Node* node)
821 ASSERT(node->arrayMode().isSpecific());
822 ASSERT(!node->arrayMode().doesConversion());
824 SpeculateCellOperand base(this, node->child1());
825 GPRReg baseReg = base.gpr();
827 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
828 noResult(m_currentNode);
832 const ClassInfo* expectedClassInfo = 0;
834 switch (node->arrayMode().type()) {
835 case Array::AnyTypedArray:
837 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
841 case Array::Contiguous:
842 case Array::Undecided:
843 case Array::ArrayStorage:
844 case Array::SlowPutArrayStorage: {
845 GPRTemporary temp(this);
846 GPRReg tempGPR = temp.gpr();
847 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
849 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
850 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
852 noResult(m_currentNode);
855 case Array::DirectArguments:
856 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
857 noResult(m_currentNode);
859 case Array::ScopedArguments:
860 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
861 noResult(m_currentNode);
864 speculateCellTypeWithoutTypeFiltering(
865 node->child1(), baseReg,
866 typeForTypedArrayType(node->arrayMode().typedArrayType()));
867 noResult(m_currentNode);
871 RELEASE_ASSERT(expectedClassInfo);
873 GPRTemporary temp(this);
874 GPRTemporary temp2(this);
875 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
877 BadType, JSValueSource::unboxedCell(baseReg), node,
879 MacroAssembler::NotEqual,
880 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
881 TrustedImmPtr(expectedClassInfo)));
883 noResult(m_currentNode);
886 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
888 ASSERT(node->arrayMode().doesConversion());
890 GPRTemporary temp(this);
891 GPRTemporary structure;
892 GPRReg tempGPR = temp.gpr();
893 GPRReg structureGPR = InvalidGPRReg;
895 if (node->op() != ArrayifyToStructure) {
896 GPRTemporary realStructure(this);
897 structure.adopt(realStructure);
898 structureGPR = structure.gpr();
901 // We can skip all that comes next if we already have array storage.
902 MacroAssembler::JumpList slowPath;
904 if (node->op() == ArrayifyToStructure) {
905 slowPath.append(m_jit.branchWeakStructure(
906 JITCompiler::NotEqual,
907 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
911 MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
913 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
916 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
917 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
919 noResult(m_currentNode);
922 void SpeculativeJIT::arrayify(Node* node)
924 ASSERT(node->arrayMode().isSpecific());
926 SpeculateCellOperand base(this, node->child1());
928 if (!node->child2()) {
929 arrayify(node, base.gpr(), InvalidGPRReg);
933 SpeculateInt32Operand property(this, node->child2());
935 arrayify(node, base.gpr(), property.gpr());
938 GPRReg SpeculativeJIT::fillStorage(Edge edge)
940 VirtualRegister virtualRegister = edge->virtualRegister();
941 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
943 switch (info.registerFormat()) {
944 case DataFormatNone: {
945 if (info.spillFormat() == DataFormatStorage) {
946 GPRReg gpr = allocate();
947 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
948 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
949 info.fillStorage(*m_stream, gpr);
953 // Must be a cell; fill it as a cell and then return the pointer.
954 return fillSpeculateCell(edge);
957 case DataFormatStorage: {
958 GPRReg gpr = info.gpr();
964 return fillSpeculateCell(edge);
968 void SpeculativeJIT::useChildren(Node* node)
970 if (node->flags() & NodeHasVarArgs) {
971 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
972 if (!!m_jit.graph().m_varArgChildren[childIdx])
973 use(m_jit.graph().m_varArgChildren[childIdx]);
976 Edge child1 = node->child1();
978 ASSERT(!node->child2() && !node->child3());
983 Edge child2 = node->child2();
985 ASSERT(!node->child3());
990 Edge child3 = node->child3();
997 void SpeculativeJIT::compileTryGetById(Node* node)
999 switch (node->child1().useKind()) {
1001 SpeculateCellOperand base(this, node->child1());
1002 JSValueRegsTemporary result(this, Reuse, base);
1004 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1005 JSValueRegs resultRegs = result.regs();
1009 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1011 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1016 JSValueOperand base(this, node->child1());
1017 JSValueRegsTemporary result(this, Reuse, base);
1019 JSValueRegs baseRegs = base.jsValueRegs();
1020 JSValueRegs resultRegs = result.regs();
1024 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1026 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1028 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1033 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1038 void SpeculativeJIT::compileIn(Node* node)
1040 SpeculateCellOperand base(this, node->child1());
1041 GPRReg baseGPR = base.gpr();
1043 if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1044 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1045 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1047 GPRTemporary result(this);
1048 GPRReg resultGPR = result.gpr();
1050 use(node->child2());
1052 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1053 MacroAssembler::Label done = m_jit.label();
1055 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1056 // we can cast it to const AtomicStringImpl* safely.
1057 auto slowPath = slowPathCall(
1058 jump.m_jump, this, operationInOptimize,
1059 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1060 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1062 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1063 stubInfo->codeOrigin = node->origin.semantic;
1064 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1065 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1066 stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1067 #if USE(JSVALUE32_64)
1068 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1069 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1070 stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1072 stubInfo->patch.usedRegisters = usedRegisters();
1074 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1075 addSlowPathGenerator(WTFMove(slowPath));
1079 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1084 JSValueOperand key(this, node->child2());
1085 JSValueRegs regs = key.jsValueRegs();
1087 GPRFlushedCallResult result(this);
1088 GPRReg resultGPR = result.gpr();
1095 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1097 m_jit.exceptionCheck();
1098 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1101 void SpeculativeJIT::compileDeleteById(Node* node)
1103 JSValueOperand value(this, node->child1());
1104 GPRFlushedCallResult result(this);
1106 JSValueRegs valueRegs = value.jsValueRegs();
1107 GPRReg resultGPR = result.gpr();
1112 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1113 m_jit.exceptionCheck();
1115 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1118 void SpeculativeJIT::compileDeleteByVal(Node* node)
1120 JSValueOperand base(this, node->child1());
1121 JSValueOperand key(this, node->child2());
1122 GPRFlushedCallResult result(this);
1124 JSValueRegs baseRegs = base.jsValueRegs();
1125 JSValueRegs keyRegs = key.jsValueRegs();
1126 GPRReg resultGPR = result.gpr();
1132 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1133 m_jit.exceptionCheck();
1135 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1138 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1140 unsigned branchIndexInBlock = detectPeepHoleBranch();
1141 if (branchIndexInBlock != UINT_MAX) {
1142 Node* branchNode = m_block->at(branchIndexInBlock);
1144 ASSERT(node->adjustedRefCount() == 1);
1146 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1148 m_indexInBlock = branchIndexInBlock;
1149 m_currentNode = branchNode;
1154 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1159 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1161 unsigned branchIndexInBlock = detectPeepHoleBranch();
1162 if (branchIndexInBlock != UINT_MAX) {
1163 Node* branchNode = m_block->at(branchIndexInBlock);
1165 ASSERT(node->adjustedRefCount() == 1);
1167 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1169 m_indexInBlock = branchIndexInBlock;
1170 m_currentNode = branchNode;
1175 nonSpeculativeNonPeepholeStrictEq(node, invert);
1180 static const char* dataFormatString(DataFormat format)
1182 // These values correspond to the DataFormat enum.
1183 const char* strings[] = {
1201 return strings[format];
1204 void SpeculativeJIT::dump(const char* label)
1207 dataLogF("<%s>\n", label);
1209 dataLogF(" gprs:\n");
1211 dataLogF(" fprs:\n");
1213 dataLogF(" VirtualRegisters:\n");
1214 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1215 GenerationInfo& info = m_generationInfo[i];
1217 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1219 dataLogF(" % 3d:[__][__]", i);
1220 if (info.registerFormat() == DataFormatDouble)
1221 dataLogF(":fpr%d\n", info.fpr());
1222 else if (info.registerFormat() != DataFormatNone
1223 #if USE(JSVALUE32_64)
1224 && !(info.registerFormat() & DataFormatJS)
1227 ASSERT(info.gpr() != InvalidGPRReg);
1228 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1233 dataLogF("</%s>\n", label);
1236 GPRTemporary::GPRTemporary()
1238 , m_gpr(InvalidGPRReg)
1242 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1244 , m_gpr(InvalidGPRReg)
1246 m_gpr = m_jit->allocate();
1249 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1251 , m_gpr(InvalidGPRReg)
1253 m_gpr = m_jit->allocate(specific);
1256 #if USE(JSVALUE32_64)
1257 GPRTemporary::GPRTemporary(
1258 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1260 , m_gpr(InvalidGPRReg)
1262 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1263 m_gpr = m_jit->reuse(op1.gpr(which));
1265 m_gpr = m_jit->allocate();
1267 #endif // USE(JSVALUE32_64)
1269 JSValueRegsTemporary::JSValueRegsTemporary() { }
1271 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1282 template<typename T>
1283 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1284 : m_gpr(jit, Reuse, operand)
1288 template<typename T>
1289 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1291 if (resultWord == PayloadWord) {
1292 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1293 m_tagGPR = GPRTemporary(jit);
1295 m_payloadGPR = GPRTemporary(jit);
1296 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1302 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1304 m_gpr = GPRTemporary(jit, Reuse, operand);
1307 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1309 if (jit->canReuse(operand.node())) {
1310 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1311 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1313 m_payloadGPR = GPRTemporary(jit);
1314 m_tagGPR = GPRTemporary(jit);
1319 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1321 JSValueRegs JSValueRegsTemporary::regs()
1324 return JSValueRegs(m_gpr.gpr());
1326 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1330 void GPRTemporary::adopt(GPRTemporary& other)
1333 ASSERT(m_gpr == InvalidGPRReg);
1334 ASSERT(other.m_jit);
1335 ASSERT(other.m_gpr != InvalidGPRReg);
1336 m_jit = other.m_jit;
1337 m_gpr = other.m_gpr;
1339 other.m_gpr = InvalidGPRReg;
1342 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1344 ASSERT(other.m_jit);
1345 ASSERT(other.m_fpr != InvalidFPRReg);
1346 m_jit = other.m_jit;
1347 m_fpr = other.m_fpr;
1349 other.m_jit = nullptr;
1352 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1354 , m_fpr(InvalidFPRReg)
1356 m_fpr = m_jit->fprAllocate();
1359 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1361 , m_fpr(InvalidFPRReg)
1363 if (m_jit->canReuse(op1.node()))
1364 m_fpr = m_jit->reuse(op1.fpr());
1366 m_fpr = m_jit->fprAllocate();
1369 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1371 , m_fpr(InvalidFPRReg)
1373 if (m_jit->canReuse(op1.node()))
1374 m_fpr = m_jit->reuse(op1.fpr());
1375 else if (m_jit->canReuse(op2.node()))
1376 m_fpr = m_jit->reuse(op2.fpr());
1377 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1378 m_fpr = m_jit->reuse(op1.fpr());
1380 m_fpr = m_jit->fprAllocate();
1383 #if USE(JSVALUE32_64)
1384 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1386 , m_fpr(InvalidFPRReg)
1388 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1389 m_fpr = m_jit->reuse(op1.fpr());
1391 m_fpr = m_jit->fprAllocate();
1395 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1397 BasicBlock* taken = branchNode->branchData()->taken.block;
1398 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1400 if (taken == nextBlock()) {
1401 condition = MacroAssembler::invert(condition);
1402 std::swap(taken, notTaken);
1405 SpeculateDoubleOperand op1(this, node->child1());
1406 SpeculateDoubleOperand op2(this, node->child2());
1408 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1412 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1414 BasicBlock* taken = branchNode->branchData()->taken.block;
1415 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1417 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1419 if (taken == nextBlock()) {
1420 condition = MacroAssembler::NotEqual;
1421 BasicBlock* tmp = taken;
1426 SpeculateCellOperand op1(this, node->child1());
1427 SpeculateCellOperand op2(this, node->child2());
1429 GPRReg op1GPR = op1.gpr();
1430 GPRReg op2GPR = op2.gpr();
1432 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1433 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1435 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1437 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1439 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1442 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1444 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1445 m_jit.branchIfNotObject(op1GPR));
1447 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1449 MacroAssembler::NonZero,
1450 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1451 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1453 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1455 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1456 m_jit.branchIfNotObject(op2GPR));
1458 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1460 MacroAssembler::NonZero,
1461 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1462 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1465 branchPtr(condition, op1GPR, op2GPR, taken);
1469 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1471 BasicBlock* taken = branchNode->branchData()->taken.block;
1472 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1474 // The branch instruction will branch to the taken block.
1475 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1476 if (taken == nextBlock()) {
1477 condition = JITCompiler::invert(condition);
1478 BasicBlock* tmp = taken;
1483 if (node->child1()->isInt32Constant()) {
1484 int32_t imm = node->child1()->asInt32();
1485 SpeculateBooleanOperand op2(this, node->child2());
1486 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1487 } else if (node->child2()->isInt32Constant()) {
1488 SpeculateBooleanOperand op1(this, node->child1());
1489 int32_t imm = node->child2()->asInt32();
1490 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1492 SpeculateBooleanOperand op1(this, node->child1());
1493 SpeculateBooleanOperand op2(this, node->child2());
1494 branch32(condition, op1.gpr(), op2.gpr(), taken);
1500 void SpeculativeJIT::compileToLowerCase(Node* node)
1502 ASSERT(node->op() == ToLowerCase);
1503 SpeculateCellOperand string(this, node->child1());
1504 GPRTemporary temp(this);
1505 GPRTemporary index(this);
1506 GPRTemporary charReg(this);
1507 GPRTemporary length(this);
1509 GPRReg stringGPR = string.gpr();
1510 GPRReg tempGPR = temp.gpr();
1511 GPRReg indexGPR = index.gpr();
1512 GPRReg charGPR = charReg.gpr();
1513 GPRReg lengthGPR = length.gpr();
1515 speculateString(node->child1(), stringGPR);
1517 CCallHelpers::JumpList slowPath;
1519 m_jit.move(TrustedImmPtr(0), indexGPR);
1521 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1522 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1524 slowPath.append(m_jit.branchTest32(
1525 MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1526 MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1527 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1528 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1530 auto loopStart = m_jit.label();
1531 auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1532 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1533 slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1534 m_jit.sub32(TrustedImm32('A'), charGPR);
1535 slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1537 m_jit.add32(TrustedImm32(1), indexGPR);
1538 m_jit.jump().linkTo(loopStart, &m_jit);
1540 slowPath.link(&m_jit);
1541 silentSpillAllRegisters(lengthGPR);
1542 callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1543 silentFillAllRegisters(lengthGPR);
1544 m_jit.exceptionCheck();
1545 auto done = m_jit.jump();
1547 loopDone.link(&m_jit);
1548 m_jit.move(stringGPR, lengthGPR);
1551 cellResult(lengthGPR, node);
1554 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1556 BasicBlock* taken = branchNode->branchData()->taken.block;
1557 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1559 // The branch instruction will branch to the taken block.
1560 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1561 if (taken == nextBlock()) {
1562 condition = JITCompiler::invert(condition);
1563 BasicBlock* tmp = taken;
1568 if (node->child1()->isInt32Constant()) {
1569 int32_t imm = node->child1()->asInt32();
1570 SpeculateInt32Operand op2(this, node->child2());
1571 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1572 } else if (node->child2()->isInt32Constant()) {
1573 SpeculateInt32Operand op1(this, node->child1());
1574 int32_t imm = node->child2()->asInt32();
1575 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1577 SpeculateInt32Operand op1(this, node->child1());
1578 SpeculateInt32Operand op2(this, node->child2());
1579 branch32(condition, op1.gpr(), op2.gpr(), taken);
1585 // Returns true if the compare is fused with a subsequent branch.
1586 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1588 // Fused compare & branch.
1589 unsigned branchIndexInBlock = detectPeepHoleBranch();
1590 if (branchIndexInBlock != UINT_MAX) {
1591 Node* branchNode = m_block->at(branchIndexInBlock);
1593 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1594 // so can be no intervening nodes to also reference the compare.
1595 ASSERT(node->adjustedRefCount() == 1);
1597 if (node->isBinaryUseKind(Int32Use))
1598 compilePeepHoleInt32Branch(node, branchNode, condition);
1600 else if (node->isBinaryUseKind(Int52RepUse))
1601 compilePeepHoleInt52Branch(node, branchNode, condition);
1602 #endif // USE(JSVALUE64)
1603 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1604 // Use non-peephole comparison, for now.
1606 } else if (node->isBinaryUseKind(DoubleRepUse))
1607 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1608 else if (node->op() == CompareEq) {
1609 if (node->isBinaryUseKind(BooleanUse))
1610 compilePeepHoleBooleanBranch(node, branchNode, condition);
1611 else if (node->isBinaryUseKind(SymbolUse))
1612 compilePeepHoleSymbolEquality(node, branchNode);
1613 else if (node->isBinaryUseKind(ObjectUse))
1614 compilePeepHoleObjectEquality(node, branchNode);
1615 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1616 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1617 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1618 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1619 else if (!needsTypeCheck(node->child1(), SpecOther))
1620 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1621 else if (!needsTypeCheck(node->child2(), SpecOther))
1622 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1624 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1628 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1632 use(node->child1());
1633 use(node->child2());
1634 m_indexInBlock = branchIndexInBlock;
1635 m_currentNode = branchNode;
1641 void SpeculativeJIT::noticeOSRBirth(Node* node)
1643 if (!node->hasVirtualRegister())
1646 VirtualRegister virtualRegister = node->virtualRegister();
1647 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1649 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1652 void SpeculativeJIT::compileMovHint(Node* node)
1654 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1656 Node* child = node->child1().node();
1657 noticeOSRBirth(child);
1659 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1662 void SpeculativeJIT::bail(AbortReason reason)
1664 if (verboseCompilationEnabled())
1665 dataLog("Bailing compilation.\n");
1666 m_compileOkay = true;
1667 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1668 clearGenerationInfo();
1671 void SpeculativeJIT::compileCurrentBlock()
1673 ASSERT(m_compileOkay);
1678 ASSERT(m_block->isReachable);
1680 m_jit.blockHeads()[m_block->index] = m_jit.label();
1682 if (!m_block->intersectionOfCFAHasVisited) {
1683 // Don't generate code for basic blocks that are unreachable according to CFA.
1684 // But to be sure that nobody has generated a jump to this block, drop in a
1686 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1690 m_stream->appendAndLog(VariableEvent::reset());
1692 m_jit.jitAssertHasValidCallFrame();
1693 m_jit.jitAssertTagsInPlace();
1694 m_jit.jitAssertArgumentCountSane();
1697 m_state.beginBasicBlock(m_block);
1699 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1700 int operand = m_block->variablesAtHead.operandForIndex(i);
1701 Node* node = m_block->variablesAtHead[i];
1703 continue; // No need to record dead SetLocal's.
1705 VariableAccessData* variable = node->variableAccessData();
1707 if (!node->refCount())
1708 continue; // No need to record dead SetLocal's.
1709 format = dataFormatFor(variable->flushFormat());
1710 m_stream->appendAndLog(
1711 VariableEvent::setLocal(
1712 VirtualRegister(operand),
1713 variable->machineLocal(),
1717 m_origin = NodeOrigin();
1719 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1720 m_currentNode = m_block->at(m_indexInBlock);
1722 // We may have hit a contradiction that the CFA was aware of but that the JIT
1723 // didn't cause directly.
1724 if (!m_state.isValid()) {
1725 bail(DFGBailedAtTopOfBlock);
1729 m_interpreter.startExecuting();
1730 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1731 m_jit.setForNode(m_currentNode);
1732 m_origin = m_currentNode->origin;
1733 if (validationEnabled())
1734 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1735 m_lastGeneratedNode = m_currentNode->op();
1737 ASSERT(m_currentNode->shouldGenerate());
1739 if (verboseCompilationEnabled()) {
1741 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1742 (int)m_currentNode->index(),
1743 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1747 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1748 m_jit.jitReleaseAssertNoException();
1750 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1752 compile(m_currentNode);
1754 if (belongsInMinifiedGraph(m_currentNode->op()))
1755 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1757 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1758 m_jit.clearRegisterAllocationOffsets();
1761 if (!m_compileOkay) {
1762 bail(DFGBailedAtEndOfNode);
1766 // Make sure that the abstract state is rematerialized for the next node.
1767 m_interpreter.executeEffects(m_indexInBlock);
1770 // Perform the most basic verification that children have been used correctly.
1771 if (!ASSERT_DISABLED) {
1772 for (auto& info : m_generationInfo)
1773 RELEASE_ASSERT(!info.alive());
1777 // If we are making type predictions about our arguments then
1778 // we need to check that they are correct on function entry.
1779 void SpeculativeJIT::checkArgumentTypes()
1781 ASSERT(!m_currentNode);
1782 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1784 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1785 Node* node = m_jit.graph().m_arguments[i];
1787 // The argument is dead. We don't do any checks for such arguments.
1791 ASSERT(node->op() == SetArgument);
1792 ASSERT(node->shouldGenerate());
1794 VariableAccessData* variableAccessData = node->variableAccessData();
1795 FlushFormat format = variableAccessData->flushFormat();
1797 if (format == FlushedJSValue)
1800 VirtualRegister virtualRegister = variableAccessData->local();
1802 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1806 case FlushedInt32: {
1807 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1810 case FlushedBoolean: {
1811 GPRTemporary temp(this);
1812 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1813 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1814 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1818 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1822 RELEASE_ASSERT_NOT_REACHED();
1827 case FlushedInt32: {
1828 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1831 case FlushedBoolean: {
1832 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1836 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1840 RELEASE_ASSERT_NOT_REACHED();
1846 m_origin = NodeOrigin();
1849 bool SpeculativeJIT::compile()
1851 checkArgumentTypes();
1853 ASSERT(!m_currentNode);
1854 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1855 m_jit.setForBlockIndex(blockIndex);
1856 m_block = m_jit.graph().block(blockIndex);
1857 compileCurrentBlock();
1863 void SpeculativeJIT::createOSREntries()
1865 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1866 BasicBlock* block = m_jit.graph().block(blockIndex);
1869 if (!block->isOSRTarget)
1872 // Currently we don't have OSR entry trampolines. We could add them
1874 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1878 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1880 unsigned osrEntryIndex = 0;
1881 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1882 BasicBlock* block = m_jit.graph().block(blockIndex);
1885 if (!block->isOSRTarget)
1887 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1889 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1891 if (verboseCompilationEnabled()) {
1892 DumpContext dumpContext;
1893 dataLog("OSR Entries:\n");
1894 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1895 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1896 if (!dumpContext.isEmpty())
1897 dumpContext.dump(WTF::dataFile());
1901 void SpeculativeJIT::compileCheckTraps(Node*)
1903 ASSERT(Options::usePollingTraps());
1904 GPRTemporary unused(this);
1905 GPRReg unusedGPR = unused.gpr();
1907 JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1908 JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1910 addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1913 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1915 Edge child3 = m_jit.graph().varArgChild(node, 2);
1916 Edge child4 = m_jit.graph().varArgChild(node, 3);
1918 ArrayMode arrayMode = node->arrayMode();
1920 GPRReg baseReg = base.gpr();
1921 GPRReg propertyReg = property.gpr();
1923 SpeculateDoubleOperand value(this, child3);
1925 FPRReg valueReg = value.fpr();
1928 JSValueRegs(), child3, SpecFullRealNumber,
1930 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1935 StorageOperand storage(this, child4);
1936 GPRReg storageReg = storage.gpr();
1938 if (node->op() == PutByValAlias) {
1939 // Store the value to the array.
1940 GPRReg propertyReg = property.gpr();
1941 FPRReg valueReg = value.fpr();
1942 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1944 noResult(m_currentNode);
1948 GPRTemporary temporary;
1949 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1951 MacroAssembler::Jump slowCase;
1953 if (arrayMode.isInBounds()) {
1955 OutOfBounds, JSValueRegs(), 0,
1956 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1958 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1960 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1962 if (!arrayMode.isOutOfBounds())
1963 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1965 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1966 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1968 inBounds.link(&m_jit);
1971 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1978 if (arrayMode.isOutOfBounds()) {
1979 addSlowPathGenerator(
1982 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1983 NoResult, baseReg, propertyReg, valueReg));
1986 noResult(m_currentNode, UseChildrenCalledExplicitly);
1989 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1991 SpeculateCellOperand string(this, node->child1());
1992 SpeculateStrictInt32Operand index(this, node->child2());
1993 StorageOperand storage(this, node->child3());
1995 GPRReg stringReg = string.gpr();
1996 GPRReg indexReg = index.gpr();
1997 GPRReg storageReg = storage.gpr();
1999 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2001 // unsigned comparison so we can filter out negative indices and indices that are too large
2002 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2004 GPRTemporary scratch(this);
2005 GPRReg scratchReg = scratch.gpr();
2007 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2009 // Load the character into scratchReg
2010 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2012 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2013 JITCompiler::Jump cont8Bit = m_jit.jump();
2015 is16Bit.link(&m_jit);
2017 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2019 cont8Bit.link(&m_jit);
2021 int32Result(scratchReg, m_currentNode);
2024 void SpeculativeJIT::compileGetByValOnString(Node* node)
2026 SpeculateCellOperand base(this, node->child1());
2027 SpeculateStrictInt32Operand property(this, node->child2());
2028 StorageOperand storage(this, node->child3());
2029 GPRReg baseReg = base.gpr();
2030 GPRReg propertyReg = property.gpr();
2031 GPRReg storageReg = storage.gpr();
2033 GPRTemporary scratch(this);
2034 GPRReg scratchReg = scratch.gpr();
2035 #if USE(JSVALUE32_64)
2036 GPRTemporary resultTag;
2037 GPRReg resultTagReg = InvalidGPRReg;
2038 if (node->arrayMode().isOutOfBounds()) {
2039 GPRTemporary realResultTag(this);
2040 resultTag.adopt(realResultTag);
2041 resultTagReg = resultTag.gpr();
2045 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2047 // unsigned comparison so we can filter out negative indices and indices that are too large
2048 JITCompiler::Jump outOfBounds = m_jit.branch32(
2049 MacroAssembler::AboveOrEqual, propertyReg,
2050 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2051 if (node->arrayMode().isInBounds())
2052 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2054 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2056 // Load the character into scratchReg
2057 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2059 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2060 JITCompiler::Jump cont8Bit = m_jit.jump();
2062 is16Bit.link(&m_jit);
2064 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2066 JITCompiler::Jump bigCharacter =
2067 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2069 // 8 bit string values don't need the isASCII check.
2070 cont8Bit.link(&m_jit);
2072 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2073 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2074 m_jit.loadPtr(scratchReg, scratchReg);
2076 addSlowPathGenerator(
2078 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2080 if (node->arrayMode().isOutOfBounds()) {
2081 #if USE(JSVALUE32_64)
2082 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2085 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2086 bool prototypeChainIsSane = false;
2087 if (globalObject->stringPrototypeChainIsSane()) {
2088 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2089 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2090 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2091 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2092 // indexed properties either.
2093 // https://bugs.webkit.org/show_bug.cgi?id=144668
2094 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2095 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2096 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2098 if (prototypeChainIsSane) {
2099 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2100 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2103 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2104 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2106 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2107 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2108 baseReg, propertyReg));
2112 addSlowPathGenerator(
2114 outOfBounds, this, operationGetByValStringInt,
2115 scratchReg, baseReg, propertyReg));
2117 addSlowPathGenerator(
2119 outOfBounds, this, operationGetByValStringInt,
2120 JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2125 jsValueResult(scratchReg, m_currentNode);
2127 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2130 cellResult(scratchReg, m_currentNode);
2133 void SpeculativeJIT::compileFromCharCode(Node* node)
2135 Edge& child = node->child1();
2136 if (child.useKind() == UntypedUse) {
2137 JSValueOperand opr(this, child);
2138 JSValueRegs oprRegs = opr.jsValueRegs();
2140 GPRTemporary result(this);
2141 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2143 GPRTemporary resultTag(this);
2144 GPRTemporary resultPayload(this);
2145 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2148 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2149 m_jit.exceptionCheck();
2151 jsValueResult(resultRegs, node);
2155 SpeculateStrictInt32Operand property(this, child);
2156 GPRReg propertyReg = property.gpr();
2157 GPRTemporary smallStrings(this);
2158 GPRTemporary scratch(this);
2159 GPRReg scratchReg = scratch.gpr();
2160 GPRReg smallStringsReg = smallStrings.gpr();
2162 JITCompiler::JumpList slowCases;
2163 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2164 m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2165 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2167 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2168 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2169 cellResult(scratchReg, m_currentNode);
2172 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2174 VirtualRegister virtualRegister = node->virtualRegister();
2175 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2177 switch (info.registerFormat()) {
2178 case DataFormatStorage:
2179 RELEASE_ASSERT_NOT_REACHED();
2181 case DataFormatBoolean:
2182 case DataFormatCell:
2183 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2184 return GeneratedOperandTypeUnknown;
2186 case DataFormatNone:
2187 case DataFormatJSCell:
2189 case DataFormatJSBoolean:
2190 case DataFormatJSDouble:
2191 return GeneratedOperandJSValue;
2193 case DataFormatJSInt32:
2194 case DataFormatInt32:
2195 return GeneratedOperandInteger;
2198 RELEASE_ASSERT_NOT_REACHED();
2199 return GeneratedOperandTypeUnknown;
2203 void SpeculativeJIT::compileValueToInt32(Node* node)
2205 switch (node->child1().useKind()) {
2208 SpeculateStrictInt52Operand op1(this, node->child1());
2209 GPRTemporary result(this, Reuse, op1);
2210 GPRReg op1GPR = op1.gpr();
2211 GPRReg resultGPR = result.gpr();
2212 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2213 int32Result(resultGPR, node, DataFormatInt32);
2216 #endif // USE(JSVALUE64)
2218 case DoubleRepUse: {
2219 GPRTemporary result(this);
2220 SpeculateDoubleOperand op1(this, node->child1());
2221 FPRReg fpr = op1.fpr();
2222 GPRReg gpr = result.gpr();
2223 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2225 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2226 hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2228 int32Result(gpr, node);
2234 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2235 case GeneratedOperandInteger: {
2236 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2237 GPRTemporary result(this, Reuse, op1);
2238 m_jit.move(op1.gpr(), result.gpr());
2239 int32Result(result.gpr(), node, op1.format());
2242 case GeneratedOperandJSValue: {
2243 GPRTemporary result(this);
2245 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2247 GPRReg gpr = op1.gpr();
2248 GPRReg resultGpr = result.gpr();
2249 FPRTemporary tempFpr(this);
2250 FPRReg fpr = tempFpr.fpr();
2252 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2253 JITCompiler::JumpList converted;
2255 if (node->child1().useKind() == NumberUse) {
2257 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2259 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2261 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2264 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2266 // It's not a cell: so true turns into 1 and all else turns into 0.
2267 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2268 converted.append(m_jit.jump());
2270 isNumber.link(&m_jit);
2273 // First, if we get here we have a double encoded as a JSValue
2274 unboxDouble(gpr, resultGpr, fpr);
2276 silentSpillAllRegisters(resultGpr);
2277 callOperation(operationToInt32, resultGpr, fpr);
2278 silentFillAllRegisters(resultGpr);
2280 converted.append(m_jit.jump());
2282 isInteger.link(&m_jit);
2283 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2285 converted.link(&m_jit);
2287 Node* childNode = node->child1().node();
2288 VirtualRegister virtualRegister = childNode->virtualRegister();
2289 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2291 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2293 GPRReg payloadGPR = op1.payloadGPR();
2294 GPRReg resultGpr = result.gpr();
2296 JITCompiler::JumpList converted;
2298 if (info.registerFormat() == DataFormatJSInt32)
2299 m_jit.move(payloadGPR, resultGpr);
2301 GPRReg tagGPR = op1.tagGPR();
2302 FPRTemporary tempFpr(this);
2303 FPRReg fpr = tempFpr.fpr();
2304 FPRTemporary scratch(this);
2306 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2308 if (node->child1().useKind() == NumberUse) {
2310 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2312 MacroAssembler::AboveOrEqual, tagGPR,
2313 TrustedImm32(JSValue::LowestTag)));
2315 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2318 op1.jsValueRegs(), node->child1(), ~SpecCell,
2319 m_jit.branchIfCell(op1.jsValueRegs()));
2321 // It's not a cell: so true turns into 1 and all else turns into 0.
2322 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2323 m_jit.move(TrustedImm32(0), resultGpr);
2324 converted.append(m_jit.jump());
2326 isBoolean.link(&m_jit);
2327 m_jit.move(payloadGPR, resultGpr);
2328 converted.append(m_jit.jump());
2330 isNumber.link(&m_jit);
2333 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2335 silentSpillAllRegisters(resultGpr);
2336 callOperation(operationToInt32, resultGpr, fpr);
2337 silentFillAllRegisters(resultGpr);
2339 converted.append(m_jit.jump());
2341 isInteger.link(&m_jit);
2342 m_jit.move(payloadGPR, resultGpr);
2344 converted.link(&m_jit);
2347 int32Result(resultGpr, node);
2350 case GeneratedOperandTypeUnknown:
2351 RELEASE_ASSERT(!m_compileOkay);
2354 RELEASE_ASSERT_NOT_REACHED();
2359 ASSERT(!m_compileOkay);
2364 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2366 if (doesOverflow(node->arithMode())) {
2367 if (enableInt52()) {
2368 SpeculateInt32Operand op1(this, node->child1());
2369 GPRTemporary result(this, Reuse, op1);
2370 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2371 strictInt52Result(result.gpr(), node);
2374 SpeculateInt32Operand op1(this, node->child1());
2375 FPRTemporary result(this);
2377 GPRReg inputGPR = op1.gpr();
2378 FPRReg outputFPR = result.fpr();
2380 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2382 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2383 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2384 positive.link(&m_jit);
2386 doubleResult(outputFPR, node);
2390 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2392 SpeculateInt32Operand op1(this, node->child1());
2393 GPRTemporary result(this);
2395 m_jit.move(op1.gpr(), result.gpr());
2397 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2399 int32Result(result.gpr(), node, op1.format());
2402 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2404 SpeculateDoubleOperand op1(this, node->child1());
2405 FPRTemporary scratch(this);
2406 GPRTemporary result(this);
2408 FPRReg valueFPR = op1.fpr();
2409 FPRReg scratchFPR = scratch.fpr();
2410 GPRReg resultGPR = result.gpr();
2412 JITCompiler::JumpList failureCases;
2413 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2414 m_jit.branchConvertDoubleToInt32(
2415 valueFPR, resultGPR, failureCases, scratchFPR,
2416 shouldCheckNegativeZero(node->arithMode()));
2417 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2419 int32Result(resultGPR, node);
2422 void SpeculativeJIT::compileDoubleRep(Node* node)
2424 switch (node->child1().useKind()) {
2425 case RealNumberUse: {
2426 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2427 FPRTemporary result(this);
2429 JSValueRegs op1Regs = op1.jsValueRegs();
2430 FPRReg resultFPR = result.fpr();
2433 GPRTemporary temp(this);
2434 GPRReg tempGPR = temp.gpr();
2435 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2437 FPRTemporary temp(this);
2438 FPRReg tempFPR = temp.fpr();
2439 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2442 JITCompiler::Jump done = m_jit.branchDouble(
2443 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2446 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2447 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2451 doubleResult(resultFPR, node);
2457 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2459 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2460 if (isInt32Speculation(possibleTypes)) {
2461 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2462 FPRTemporary result(this);
2463 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2464 doubleResult(result.fpr(), node);
2468 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2469 FPRTemporary result(this);
2472 GPRTemporary temp(this);
2474 GPRReg op1GPR = op1.gpr();
2475 GPRReg tempGPR = temp.gpr();
2476 FPRReg resultFPR = result.fpr();
2477 JITCompiler::JumpList done;
2479 JITCompiler::Jump isInteger = m_jit.branch64(
2480 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2482 if (node->child1().useKind() == NotCellUse) {
2483 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2484 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2486 static const double zero = 0;
2487 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2489 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2490 done.append(isNull);
2492 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2493 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2495 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2496 static const double one = 1;
2497 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2498 done.append(m_jit.jump());
2499 done.append(isFalse);
2501 isUndefined.link(&m_jit);
2502 static const double NaN = PNaN;
2503 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2504 done.append(m_jit.jump());
2506 isNumber.link(&m_jit);
2507 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2509 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2510 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2513 unboxDouble(op1GPR, tempGPR, resultFPR);
2514 done.append(m_jit.jump());
2516 isInteger.link(&m_jit);
2517 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2519 #else // USE(JSVALUE64) -> this is the 32_64 case
2520 FPRTemporary temp(this);
2522 GPRReg op1TagGPR = op1.tagGPR();
2523 GPRReg op1PayloadGPR = op1.payloadGPR();
2524 FPRReg tempFPR = temp.fpr();
2525 FPRReg resultFPR = result.fpr();
2526 JITCompiler::JumpList done;
2528 JITCompiler::Jump isInteger = m_jit.branch32(
2529 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2531 if (node->child1().useKind() == NotCellUse) {
2532 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2533 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2535 static const double zero = 0;
2536 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2538 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2539 done.append(isNull);
2541 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2543 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2544 static const double one = 1;
2545 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2546 done.append(m_jit.jump());
2547 done.append(isFalse);
2549 isUndefined.link(&m_jit);
2550 static const double NaN = PNaN;
2551 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2552 done.append(m_jit.jump());
2554 isNumber.link(&m_jit);
2555 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2557 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2558 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2561 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2562 done.append(m_jit.jump());
2564 isInteger.link(&m_jit);
2565 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2567 #endif // USE(JSVALUE64)
2569 doubleResult(resultFPR, node);
2575 SpeculateStrictInt52Operand value(this, node->child1());
2576 FPRTemporary result(this);
2578 GPRReg valueGPR = value.gpr();
2579 FPRReg resultFPR = result.fpr();
2581 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2583 doubleResult(resultFPR, node);
2586 #endif // USE(JSVALUE64)
2589 RELEASE_ASSERT_NOT_REACHED();
2594 void SpeculativeJIT::compileValueRep(Node* node)
2596 switch (node->child1().useKind()) {
2597 case DoubleRepUse: {
2598 SpeculateDoubleOperand value(this, node->child1());
2599 JSValueRegsTemporary result(this);
2601 FPRReg valueFPR = value.fpr();
2602 JSValueRegs resultRegs = result.regs();
2604 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2605 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2606 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2607 // local was purified.
2608 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2609 m_jit.purifyNaN(valueFPR);
2611 boxDouble(valueFPR, resultRegs);
2613 jsValueResult(resultRegs, node);
2619 SpeculateStrictInt52Operand value(this, node->child1());
2620 GPRTemporary result(this);
2622 GPRReg valueGPR = value.gpr();
2623 GPRReg resultGPR = result.gpr();
2625 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2627 jsValueResult(resultGPR, node);
2630 #endif // USE(JSVALUE64)
2633 RELEASE_ASSERT_NOT_REACHED();
2638 static double clampDoubleToByte(double d)
2648 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2650 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2651 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2652 jit.xorPtr(result, result);
2653 MacroAssembler::Jump clamped = jit.jump();
2655 jit.move(JITCompiler::TrustedImm32(255), result);
2657 inBounds.link(&jit);
2660 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2662 // Unordered compare so we pick up NaN
2663 static const double zero = 0;
2664 static const double byteMax = 255;
2665 static const double half = 0.5;
2666 jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2667 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2668 jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2669 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2671 jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2672 // FIXME: This should probably just use a floating point round!
2673 // https://bugs.webkit.org/show_bug.cgi?id=72054
2674 jit.addDouble(source, scratch);
2675 jit.truncateDoubleToInt32(scratch, result);
2676 MacroAssembler::Jump truncatedInt = jit.jump();
2678 tooSmall.link(&jit);
2679 jit.xorPtr(result, result);
2680 MacroAssembler::Jump zeroed = jit.jump();
2683 jit.move(JITCompiler::TrustedImm32(255), result);
2685 truncatedInt.link(&jit);
2690 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2692 if (node->op() == PutByValAlias)
2693 return JITCompiler::Jump();
2694 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2695 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2697 uint32_t length = view->length();
2698 Node* indexNode = m_jit.graph().child(node, 1).node();
2699 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2700 return JITCompiler::Jump();
2701 return m_jit.branch32(
2702 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2704 return m_jit.branch32(
2705 MacroAssembler::AboveOrEqual, indexGPR,
2706 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2709 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2711 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2714 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2717 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2719 JITCompiler::Jump done;
2720 if (outOfBounds.isSet()) {
2721 done = m_jit.jump();
2722 if (node->arrayMode().isInBounds())
2723 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2725 outOfBounds.link(&m_jit);
2727 JITCompiler::Jump notWasteful = m_jit.branch32(
2728 MacroAssembler::NotEqual,
2729 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2730 TrustedImm32(WastefulTypedArray));
2732 JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2733 MacroAssembler::Zero,
2734 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2735 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2736 notWasteful.link(&m_jit);
2742 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2744 ASSERT(isInt(type));
2746 SpeculateCellOperand base(this, node->child1());
2747 SpeculateStrictInt32Operand property(this, node->child2());
2748 StorageOperand storage(this, node->child3());
2750 GPRReg baseReg = base.gpr();
2751 GPRReg propertyReg = property.gpr();
2752 GPRReg storageReg = storage.gpr();
2754 GPRTemporary result(this);
2755 GPRReg resultReg = result.gpr();
2757 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2759 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2760 switch (elementSize(type)) {
2763 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2765 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2769 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2771 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2774 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2779 if (elementSize(type) < 4 || isSigned(type)) {
2780 int32Result(resultReg, node);
2784 ASSERT(elementSize(type) == 4 && !isSigned(type));
2785 if (node->shouldSpeculateInt32()) {
2786 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2787 int32Result(resultReg, node);
2792 if (node->shouldSpeculateAnyInt()) {
2793 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2794 strictInt52Result(resultReg, node);
2799 FPRTemporary fresult(this);
2800 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2801 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2802 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2803 positive.link(&m_jit);
2804 doubleResult(fresult.fpr(), node);
2807 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2809 ASSERT(isInt(type));
2811 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2812 GPRReg storageReg = storage.gpr();
2814 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2817 #if USE(JSVALUE32_64)
2818 GPRTemporary propertyTag;
2819 GPRTemporary valueTag;
2822 GPRReg valueGPR = InvalidGPRReg;
2823 #if USE(JSVALUE32_64)
2824 GPRReg propertyTagGPR = InvalidGPRReg;
2825 GPRReg valueTagGPR = InvalidGPRReg;
2828 JITCompiler::JumpList slowPathCases;
2830 bool isAppropriateConstant = false;
2831 if (valueUse->isConstant()) {
2832 JSValue jsValue = valueUse->asJSValue();
2833 SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2834 SpeculatedType actualType = speculationFromValue(jsValue);
2835 isAppropriateConstant = (expectedType | actualType) == expectedType;
2838 if (isAppropriateConstant) {
2839 JSValue jsValue = valueUse->asJSValue();
2840 if (!jsValue.isNumber()) {
2841 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2845 double d = jsValue.asNumber();
2846 if (isClamped(type)) {
2847 ASSERT(elementSize(type) == 1);
2848 d = clampDoubleToByte(d);
2850 GPRTemporary scratch(this);
2851 GPRReg scratchReg = scratch.gpr();
2852 m_jit.move(Imm32(toInt32(d)), scratchReg);
2853 value.adopt(scratch);
2854 valueGPR = scratchReg;
2856 switch (valueUse.useKind()) {
2858 SpeculateInt32Operand valueOp(this, valueUse);
2859 GPRTemporary scratch(this);
2860 GPRReg scratchReg = scratch.gpr();
2861 m_jit.move(valueOp.gpr(), scratchReg);
2862 if (isClamped(type)) {
2863 ASSERT(elementSize(type) == 1);
2864 compileClampIntegerToByte(m_jit, scratchReg);
2866 value.adopt(scratch);
2867 valueGPR = scratchReg;
2873 SpeculateStrictInt52Operand valueOp(this, valueUse);
2874 GPRTemporary scratch(this);
2875 GPRReg scratchReg = scratch.gpr();
2876 m_jit.move(valueOp.gpr(), scratchReg);
2877 if (isClamped(type)) {
2878 ASSERT(elementSize(type) == 1);
2879 MacroAssembler::Jump inBounds = m_jit.branch64(
2880 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2881 MacroAssembler::Jump tooBig = m_jit.branch64(
2882 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2883 m_jit.move(TrustedImm32(0), scratchReg);
2884 MacroAssembler::Jump clamped = m_jit.jump();
2885 tooBig.link(&m_jit);
2886 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2887 clamped.link(&m_jit);
2888 inBounds.link(&m_jit);
2890 value.adopt(scratch);
2891 valueGPR = scratchReg;
2894 #endif // USE(JSVALUE64)
2896 case DoubleRepUse: {
2897 if (isClamped(type)) {
2898 ASSERT(elementSize(type) == 1);
2899 SpeculateDoubleOperand valueOp(this, valueUse);
2900 GPRTemporary result(this);
2901 FPRTemporary floatScratch(this);
2902 FPRReg fpr = valueOp.fpr();
2903 GPRReg gpr = result.gpr();
2904 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2905 value.adopt(result);
2908 #if USE(JSVALUE32_64)
2909 GPRTemporary realPropertyTag(this);
2910 propertyTag.adopt(realPropertyTag);
2911 propertyTagGPR = propertyTag.gpr();
2913 GPRTemporary realValueTag(this);
2914 valueTag.adopt(realValueTag);
2915 valueTagGPR = valueTag.gpr();
2917 SpeculateDoubleOperand valueOp(this, valueUse);
2918 GPRTemporary result(this);
2919 FPRReg fpr = valueOp.fpr();
2920 GPRReg gpr = result.gpr();
2921 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2922 m_jit.xorPtr(gpr, gpr);
2923 MacroAssembler::JumpList fixed(m_jit.jump());
2924 notNaN.link(&m_jit);
2926 fixed.append(m_jit.branchTruncateDoubleToInt32(
2927 fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2930 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2931 boxDouble(fpr, gpr);
2933 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2934 boxDouble(fpr, valueTagGPR, gpr);
2936 slowPathCases.append(m_jit.jump());
2939 value.adopt(result);
2946 RELEASE_ASSERT_NOT_REACHED();
2951 ASSERT_UNUSED(valueGPR, valueGPR != property);
2952 ASSERT(valueGPR != base);
2953 ASSERT(valueGPR != storageReg);
2954 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2956 switch (elementSize(type)) {
2958 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2961 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2964 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2970 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2974 if (!slowPathCases.empty()) {
2976 if (node->op() == PutByValDirect) {
2977 addSlowPathGenerator(slowPathCall(
2978 slowPathCases, this,
2979 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2980 NoResult, base, property, valueGPR));
2982 addSlowPathGenerator(slowPathCall(
2983 slowPathCases, this,
2984 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2985 NoResult, base, property, valueGPR));
2987 #else // not USE(JSVALUE64)
2988 if (node->op() == PutByValDirect) {
2989 addSlowPathGenerator(slowPathCall(
2990 slowPathCases, this,
2991 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2992 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2994 addSlowPathGenerator(slowPathCall(
2995 slowPathCases, this,
2996 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2997 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3004 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3006 ASSERT(isFloat(type));
3008 SpeculateCellOperand base(this, node->child1());
3009 SpeculateStrictInt32Operand property(this, node->child2());
3010 StorageOperand storage(this, node->child3());
3012 GPRReg baseReg = base.gpr();
3013 GPRReg propertyReg = property.gpr();
3014 GPRReg storageReg = storage.gpr();
3016 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3018 FPRTemporary result(this);
3019 FPRReg resultReg = result.fpr();
3020 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3021 switch (elementSize(type)) {
3023 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3024 m_jit.convertFloatToDouble(resultReg, resultReg);
3027 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3031 RELEASE_ASSERT_NOT_REACHED();
3034 doubleResult(resultReg, node);
3037 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3039 ASSERT(isFloat(type));
3041 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3042 GPRReg storageReg = storage.gpr();
3044 Edge baseUse = m_jit.graph().varArgChild(node, 0);
3045 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3047 SpeculateDoubleOperand valueOp(this, valueUse);
3048 FPRTemporary scratch(this);
3049 FPRReg valueFPR = valueOp.fpr();
3050 FPRReg scratchFPR = scratch.fpr();
3052 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3054 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3056 switch (elementSize(type)) {
3058 m_jit.moveDouble(valueFPR, scratchFPR);
3059 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3060 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3064 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3067 RELEASE_ASSERT_NOT_REACHED();
3070 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3076 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3078 // Check that prototype is an object.
3079 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3081 // Initialize scratchReg with the value being checked.
3082 m_jit.move(valueReg, scratchReg);
3084 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3085 MacroAssembler::Label loop(&m_jit);
3086 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3087 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3088 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3089 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3090 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3092 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3094 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3097 // No match - result is false.
3099 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3101 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3103 MacroAssembler::JumpList doneJumps;
3104 doneJumps.append(m_jit.jump());
3106 performDefaultHasInstance.link(&m_jit);
3107 silentSpillAllRegisters(scratchReg);
3108 callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg);
3109 silentFillAllRegisters(scratchReg);
3110 m_jit.exceptionCheck();
3112 m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3114 doneJumps.append(m_jit.jump());
3116 isInstance.link(&m_jit);
3118 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3120 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3123 doneJumps.link(&m_jit);
3126 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3128 SpeculateCellOperand base(this, node->child1());
3130 GPRReg baseGPR = base.gpr();
3132 speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3137 void SpeculativeJIT::compileParseInt(Node* node)
3139 RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3141 GPRFlushedCallResult resultPayload(this);
3142 GPRReg resultPayloadGPR = resultPayload.gpr();
3144 JSValueRegs resultRegs(resultPayloadGPR);
3146 GPRFlushedCallResult2 resultTag(this);
3147 GPRReg resultTagGPR = resultTag.gpr();
3148 JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3151 if (node->child2()) {
3152 SpeculateInt32Operand radix(this, node->child2());
3153 GPRReg radixGPR = radix.gpr();
3154 if (node->child1().useKind() == UntypedUse) {
3155 JSValueOperand value(this, node->child1());
3159 callOperation(operationParseIntGeneric, resultRegs.gpr(), value.gpr(), radixGPR);
3161 callOperation(operationParseIntGeneric, resultRegs, value.jsValueRegs(), radixGPR);
3163 m_jit.exceptionCheck();
3165 SpeculateCellOperand value(this, node->child1());
3166 GPRReg valueGPR = value.gpr();
3167 speculateString(node->child1(), valueGPR);
3171 callOperation(operationParseIntString, resultRegs.gpr(), valueGPR, radixGPR);
3173 callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3175 m_jit.exceptionCheck();
3178 if (node->child1().useKind() == UntypedUse) {
3179 JSValueOperand value(this, node->child1());
3183 callOperation(operationParseIntNoRadixGeneric, resultRegs.gpr(), value.jsValueRegs());
3185 callOperation(operationParseIntNoRadixGeneric, resultRegs, value.jsValueRegs());
3187 m_jit.exceptionCheck();
3189 SpeculateCellOperand value(this, node->child1());
3190 GPRReg valueGPR = value.gpr();
3191 speculateString(node->child1(), valueGPR);
3194 callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3195 m_jit.exceptionCheck();
3199 jsValueResult(resultRegs, node);
3202 void SpeculativeJIT::compileInstanceOf(Node* node)
3204 if (node->child1().useKind() == UntypedUse) {
3205 // It might not be a cell. Speculate less aggressively.
3206 // Or: it might only be used once (i.e. by us), so we get zero benefit
3207 // from speculating any more aggressively than we absolutely need to.
3209 JSValueOperand value(this, node->child1());
3210 SpeculateCellOperand prototype(this, node->child2());
3211 GPRTemporary scratch(this);
3212 GPRTemporary scratch2(this);
3214 GPRReg prototypeReg = prototype.gpr();
3215 GPRReg scratchReg = scratch.gpr();
3216 GPRReg scratch2Reg = scratch2.gpr();
3218 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3219 GPRReg valueReg = value.jsValueRegs().payloadGPR();
3220 moveFalseTo(scratchReg);
3222 MacroAssembler::Jump done = m_jit.jump();
3224 isCell.link(&m_jit);
3226 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3230 blessedBooleanResult(scratchReg, node);
3234 SpeculateCellOperand value(this, node->child1());
3235 SpeculateCellOperand prototype(this, node->child2());
3237 GPRTemporary scratch(this);
3238 GPRTemporary scratch2(this);
3240 GPRReg valueReg = value.gpr();
3241 GPRReg prototypeReg = prototype.gpr();
3242 GPRReg scratchReg = scratch.gpr();
3243 GPRReg scratch2Reg = scratch2.gpr();
3245 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3247 blessedBooleanResult(scratchReg, node);
3250 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3251 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3253 Edge& leftChild = node->child1();
3254 Edge& rightChild = node->child2();
3256 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3257 JSValueOperand left(this, leftChild);
3258 JSValueOperand right(this, rightChild);
3259 JSValueRegs leftRegs = left.jsValueRegs();
3260 JSValueRegs rightRegs = right.jsValueRegs();
3262 GPRTemporary result(this);
3263 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3265 GPRTemporary resultTag(this);
3266 GPRTemporary resultPayload(this);
3267 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3270 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3271 m_jit.exceptionCheck();
3273 jsValueResult(resultRegs, node);
3277 std::optional<JSValueOperand> left;
3278 std::optional<JSValueOperand> right;
3280 JSValueRegs leftRegs;
3281 JSValueRegs rightRegs;
3284 GPRTemporary result(this);
3285 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3286 GPRTemporary scratch(this);
3287 GPRReg scratchGPR = scratch.gpr();
3289 GPRTemporary resultTag(this);
3290 GPRTemporary resultPayload(this);
3291 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3292 GPRReg scratchGPR = resultTag.gpr();
3295 SnippetOperand leftOperand;
3296 SnippetOperand rightOperand;
3298 // The snippet generator does not support both operands being constant. If the left
3299 // operand is already const, we'll ignore the right operand's constness.
3300 if (leftChild->isInt32Constant())
3301 leftOperand.setConstInt32(leftChild->asInt32());
3302 else if (rightChild->isInt32Constant())
3303 rightOperand.setConstInt32(rightChild->asInt32());
3305 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3307 if (!leftOperand.isConst()) {
3308 left.emplace(this, leftChild);
3309 leftRegs = left->jsValueRegs();
3311 if (!rightOperand.isConst()) {
3312 right.emplace(this, rightChild);
3313 rightRegs = right->jsValueRegs();
3316 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3317 gen.generateFastPath(m_jit);
3319 ASSERT(gen.didEmitFastPath());
3320 gen.endJumpList().append(m_jit.jump());
3322 gen.slowPathJumpList().link(&m_jit);
3323 silentSpillAllRegisters(resultRegs);
3325 if (leftOperand.isConst()) {
3326 leftRegs = resultRegs;
3327 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3328 } else if (rightOperand.isConst()) {
3329 rightRegs = resultRegs;
3330 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3333 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3335 silentFillAllRegisters(resultRegs);
3336 m_jit.exceptionCheck();
3338 gen.endJumpList().link(&m_jit);
3339 jsValueResult(resultRegs, node);
3342 void SpeculativeJIT::compileBitwiseOp(Node* node)
3344 NodeType op = node->op();
3345 Edge& leftChild = node->child1();
3346 Edge& rightChild = node->child2();