2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITMulGenerator.h"
43 #include "JITSubGenerator.h"
44 #include "JSArrowFunction.h"
45 #include "JSCInlines.h"
46 #include "JSEnvironmentRecord.h"
47 #include "JSLexicalEnvironment.h"
48 #include "LinkBuffer.h"
49 #include "ScopedArguments.h"
50 #include "ScratchRegisterAllocator.h"
51 #include "WriteBarrierBuffer.h"
52 #include <wtf/MathExtras.h>
54 namespace JSC { namespace DFG {
56 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
60 , m_lastGeneratedNode(LastNodeType)
62 , m_generationInfo(m_jit.graph().frameRegisterCount())
63 , m_state(m_jit.graph())
64 , m_interpreter(m_jit.graph(), m_state)
65 , m_stream(&jit.jitCode()->variableEventStream)
66 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
70 SpeculativeJIT::~SpeculativeJIT()
74 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
76 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
78 GPRTemporary scratch(this);
79 GPRTemporary scratch2(this);
80 GPRReg scratchGPR = scratch.gpr();
81 GPRReg scratch2GPR = scratch2.gpr();
83 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
85 JITCompiler::JumpList slowCases;
88 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
89 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
90 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
92 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
93 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
95 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
97 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
98 for (unsigned i = numElements; i < vectorLength; ++i)
99 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
101 EncodedValueDescriptor value;
102 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
103 for (unsigned i = numElements; i < vectorLength; ++i) {
104 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
105 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
110 // I want a slow path that also loads out the storage pointer, and that's
111 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
112 // of work for a very small piece of functionality. :-/
113 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
114 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
115 structure, numElements));
118 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
120 if (inlineCallFrame && !inlineCallFrame->isVarargs())
121 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
123 VirtualRegister argumentCountRegister;
124 if (!inlineCallFrame)
125 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
127 argumentCountRegister = inlineCallFrame->argumentCountRegister;
128 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
130 m_jit.sub32(TrustedImm32(1), lengthGPR);
134 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
136 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
139 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
141 if (origin.inlineCallFrame) {
142 if (origin.inlineCallFrame->isClosureCall) {
144 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
148 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
152 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
155 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
159 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
160 GPRInfo::callFrameRegister, startGPR);
163 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
165 if (!doOSRExitFuzzing())
166 return MacroAssembler::Jump();
168 MacroAssembler::Jump result;
170 m_jit.pushToSave(GPRInfo::regT0);
171 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
172 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
173 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
174 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
175 unsigned at = Options::fireOSRExitFuzzAt();
176 if (at || atOrAfter) {
178 MacroAssembler::RelationalCondition condition;
180 threshold = atOrAfter;
181 condition = MacroAssembler::Below;
184 condition = MacroAssembler::NotEqual;
186 MacroAssembler::Jump ok = m_jit.branch32(
187 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
188 m_jit.popToRestore(GPRInfo::regT0);
189 result = m_jit.jump();
192 m_jit.popToRestore(GPRInfo::regT0);
197 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
201 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
202 if (fuzzJump.isSet()) {
203 JITCompiler::JumpList jumpsToFail;
204 jumpsToFail.append(fuzzJump);
205 jumpsToFail.append(jumpToFail);
206 m_jit.appendExitInfo(jumpsToFail);
208 m_jit.appendExitInfo(jumpToFail);
209 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
212 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
216 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
217 if (fuzzJump.isSet()) {
218 JITCompiler::JumpList myJumpsToFail;
219 myJumpsToFail.append(jumpsToFail);
220 myJumpsToFail.append(fuzzJump);
221 m_jit.appendExitInfo(myJumpsToFail);
223 m_jit.appendExitInfo(jumpsToFail);
224 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
227 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
230 return OSRExitJumpPlaceholder();
231 unsigned index = m_jit.jitCode()->osrExit.size();
232 m_jit.appendExitInfo();
233 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
234 return OSRExitJumpPlaceholder(index);
237 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
239 return speculationCheck(kind, jsValueSource, nodeUse.node());
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
244 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
247 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
249 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
252 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
256 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
257 m_jit.appendExitInfo(jumpToFail);
258 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
261 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
263 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
266 void SpeculativeJIT::emitInvalidationPoint(Node* node)
270 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
271 m_jit.jitCode()->appendOSRExit(OSRExit(
272 UncountableInvalidation, JSValueSource(),
273 m_jit.graph().methodOfGettingAValueProfileFor(node),
274 this, m_stream->size()));
275 info.m_replacementSource = m_jit.watchpointLabel();
276 ASSERT(info.m_replacementSource.isSet());
280 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
284 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
285 m_compileOkay = false;
286 if (verboseCompilationEnabled())
287 dataLog("Bailing compilation.\n");
290 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
292 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
295 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
297 ASSERT(needsTypeCheck(edge, typesPassedThrough));
298 m_interpreter.filter(edge, typesPassedThrough);
299 speculationCheck(BadType, source, edge.node(), jumpToFail);
302 RegisterSet SpeculativeJIT::usedRegisters()
306 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
307 GPRReg gpr = GPRInfo::toRegister(i);
308 if (m_gprs.isInUse(gpr))
311 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
312 FPRReg fpr = FPRInfo::toRegister(i);
313 if (m_fprs.isInUse(fpr))
317 result.merge(RegisterSet::stubUnavailableRegisters());
322 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
324 m_slowPathGenerators.append(WTF::move(slowPathGenerator));
327 void SpeculativeJIT::runSlowPathGenerators()
329 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
330 m_slowPathGenerators[i]->generate(this);
333 // On Windows we need to wrap fmod; on other platforms we can call it directly.
334 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
335 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
336 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
341 #define fmodAsDFGOperation fmod
344 void SpeculativeJIT::clearGenerationInfo()
346 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
347 m_generationInfo[i] = GenerationInfo();
348 m_gprs = RegisterBank<GPRInfo>();
349 m_fprs = RegisterBank<FPRInfo>();
352 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
354 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
355 Node* node = info.node();
356 DataFormat registerFormat = info.registerFormat();
357 ASSERT(registerFormat != DataFormatNone);
358 ASSERT(registerFormat != DataFormatDouble);
360 SilentSpillAction spillAction;
361 SilentFillAction fillAction;
363 if (!info.needsSpill())
364 spillAction = DoNothingForSpill;
367 ASSERT(info.gpr() == source);
368 if (registerFormat == DataFormatInt32)
369 spillAction = Store32Payload;
370 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
371 spillAction = StorePtr;
372 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
373 spillAction = Store64;
375 ASSERT(registerFormat & DataFormatJS);
376 spillAction = Store64;
378 #elif USE(JSVALUE32_64)
379 if (registerFormat & DataFormatJS) {
380 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
381 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
383 ASSERT(info.gpr() == source);
384 spillAction = Store32Payload;
389 if (registerFormat == DataFormatInt32) {
390 ASSERT(info.gpr() == source);
391 ASSERT(isJSInt32(info.registerFormat()));
392 if (node->hasConstant()) {
393 ASSERT(node->isInt32Constant());
394 fillAction = SetInt32Constant;
396 fillAction = Load32Payload;
397 } else if (registerFormat == DataFormatBoolean) {
399 RELEASE_ASSERT_NOT_REACHED();
400 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
401 fillAction = DoNothingForFill;
403 #elif USE(JSVALUE32_64)
404 ASSERT(info.gpr() == source);
405 if (node->hasConstant()) {
406 ASSERT(node->isBooleanConstant());
407 fillAction = SetBooleanConstant;
409 fillAction = Load32Payload;
411 } else if (registerFormat == DataFormatCell) {
412 ASSERT(info.gpr() == source);
413 if (node->hasConstant()) {
414 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
415 node->asCell(); // To get the assertion.
416 fillAction = SetCellConstant;
419 fillAction = LoadPtr;
421 fillAction = Load32Payload;
424 } else if (registerFormat == DataFormatStorage) {
425 ASSERT(info.gpr() == source);
426 fillAction = LoadPtr;
427 } else if (registerFormat == DataFormatInt52) {
428 if (node->hasConstant())
429 fillAction = SetInt52Constant;
430 else if (info.spillFormat() == DataFormatInt52)
432 else if (info.spillFormat() == DataFormatStrictInt52)
433 fillAction = Load64ShiftInt52Left;
434 else if (info.spillFormat() == DataFormatNone)
437 RELEASE_ASSERT_NOT_REACHED();
438 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
439 fillAction = Load64; // Make GCC happy.
442 } else if (registerFormat == DataFormatStrictInt52) {
443 if (node->hasConstant())
444 fillAction = SetStrictInt52Constant;
445 else if (info.spillFormat() == DataFormatInt52)
446 fillAction = Load64ShiftInt52Right;
447 else if (info.spillFormat() == DataFormatStrictInt52)
449 else if (info.spillFormat() == DataFormatNone)
452 RELEASE_ASSERT_NOT_REACHED();
453 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
454 fillAction = Load64; // Make GCC happy.
458 ASSERT(registerFormat & DataFormatJS);
460 ASSERT(info.gpr() == source);
461 if (node->hasConstant()) {
462 if (node->isCellConstant())
463 fillAction = SetTrustedJSConstant;
465 fillAction = SetJSConstant;
466 } else if (info.spillFormat() == DataFormatInt32) {
467 ASSERT(registerFormat == DataFormatJSInt32);
468 fillAction = Load32PayloadBoxInt;
472 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
473 if (node->hasConstant())
474 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
475 else if (info.payloadGPR() == source)
476 fillAction = Load32Payload;
477 else { // Fill the Tag
478 switch (info.spillFormat()) {
479 case DataFormatInt32:
480 ASSERT(registerFormat == DataFormatJSInt32);
481 fillAction = SetInt32Tag;
484 ASSERT(registerFormat == DataFormatJSCell);
485 fillAction = SetCellTag;
487 case DataFormatBoolean:
488 ASSERT(registerFormat == DataFormatJSBoolean);
489 fillAction = SetBooleanTag;
492 fillAction = Load32Tag;
499 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
502 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
504 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
505 Node* node = info.node();
506 ASSERT(info.registerFormat() == DataFormatDouble);
508 SilentSpillAction spillAction;
509 SilentFillAction fillAction;
511 if (!info.needsSpill())
512 spillAction = DoNothingForSpill;
514 ASSERT(!node->hasConstant());
515 ASSERT(info.spillFormat() == DataFormatNone);
516 ASSERT(info.fpr() == source);
517 spillAction = StoreDouble;
521 if (node->hasConstant()) {
522 node->asNumber(); // To get the assertion.
523 fillAction = SetDoubleConstant;
525 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
526 fillAction = LoadDouble;
528 #elif USE(JSVALUE32_64)
529 ASSERT(info.registerFormat() == DataFormatDouble);
530 if (node->hasConstant()) {
531 node->asNumber(); // To get the assertion.
532 fillAction = SetDoubleConstant;
534 fillAction = LoadDouble;
537 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
540 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
542 switch (plan.spillAction()) {
543 case DoNothingForSpill:
546 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
549 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
552 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
556 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
560 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
563 RELEASE_ASSERT_NOT_REACHED();
567 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
569 #if USE(JSVALUE32_64)
570 UNUSED_PARAM(canTrample);
572 switch (plan.fillAction()) {
573 case DoNothingForFill:
575 case SetInt32Constant:
576 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
579 case SetInt52Constant:
580 m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
582 case SetStrictInt52Constant:
583 m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
585 #endif // USE(JSVALUE64)
586 case SetBooleanConstant:
587 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
589 case SetCellConstant:
590 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
593 case SetTrustedJSConstant:
594 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
597 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
599 case SetDoubleConstant:
600 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
601 m_jit.move64ToDouble(canTrample, plan.fpr());
603 case Load32PayloadBoxInt:
604 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
605 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
607 case Load32PayloadConvertToInt52:
608 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
609 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
610 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
612 case Load32PayloadSignExtend:
613 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
614 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
617 case SetJSConstantTag:
618 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
620 case SetJSConstantPayload:
621 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
624 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
627 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
630 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
632 case SetDoubleConstant:
633 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
637 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
640 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
643 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
647 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
649 case Load64ShiftInt52Right:
650 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
651 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
653 case Load64ShiftInt52Left:
654 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
655 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
659 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
662 RELEASE_ASSERT_NOT_REACHED();
666 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
668 switch (arrayMode.arrayClass()) {
669 case Array::OriginalArray: {
671 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
672 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
678 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
679 return m_jit.branch32(
680 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
682 case Array::NonArray:
683 case Array::OriginalNonArray:
684 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
685 return m_jit.branch32(
686 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
688 case Array::PossiblyArray:
689 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
690 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
693 RELEASE_ASSERT_NOT_REACHED();
694 return JITCompiler::Jump();
697 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
699 JITCompiler::JumpList result;
701 switch (arrayMode.type()) {
703 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
706 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
708 case Array::Contiguous:
709 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
711 case Array::Undecided:
712 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
714 case Array::ArrayStorage:
715 case Array::SlowPutArrayStorage: {
716 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
718 if (arrayMode.isJSArray()) {
719 if (arrayMode.isSlowPut()) {
722 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
723 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
724 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
727 MacroAssembler::Above, tempGPR,
728 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
731 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
733 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
736 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
737 if (arrayMode.isSlowPut()) {
738 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
741 MacroAssembler::Above, tempGPR,
742 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
746 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
757 void SpeculativeJIT::checkArray(Node* node)
759 ASSERT(node->arrayMode().isSpecific());
760 ASSERT(!node->arrayMode().doesConversion());
762 SpeculateCellOperand base(this, node->child1());
763 GPRReg baseReg = base.gpr();
765 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
766 noResult(m_currentNode);
770 const ClassInfo* expectedClassInfo = 0;
772 switch (node->arrayMode().type()) {
773 case Array::AnyTypedArray:
775 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
779 case Array::Contiguous:
780 case Array::Undecided:
781 case Array::ArrayStorage:
782 case Array::SlowPutArrayStorage: {
783 GPRTemporary temp(this);
784 GPRReg tempGPR = temp.gpr();
785 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
787 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
788 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
790 noResult(m_currentNode);
793 case Array::DirectArguments:
794 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
795 noResult(m_currentNode);
797 case Array::ScopedArguments:
798 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
799 noResult(m_currentNode);
802 speculateCellTypeWithoutTypeFiltering(
803 node->child1(), baseReg,
804 typeForTypedArrayType(node->arrayMode().typedArrayType()));
805 noResult(m_currentNode);
809 RELEASE_ASSERT(expectedClassInfo);
811 GPRTemporary temp(this);
812 GPRTemporary temp2(this);
813 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
815 BadType, JSValueSource::unboxedCell(baseReg), node,
817 MacroAssembler::NotEqual,
818 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
819 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
821 noResult(m_currentNode);
824 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
826 ASSERT(node->arrayMode().doesConversion());
828 GPRTemporary temp(this);
829 GPRTemporary structure;
830 GPRReg tempGPR = temp.gpr();
831 GPRReg structureGPR = InvalidGPRReg;
833 if (node->op() != ArrayifyToStructure) {
834 GPRTemporary realStructure(this);
835 structure.adopt(realStructure);
836 structureGPR = structure.gpr();
839 // We can skip all that comes next if we already have array storage.
840 MacroAssembler::JumpList slowPath;
842 if (node->op() == ArrayifyToStructure) {
843 slowPath.append(m_jit.branchWeakStructure(
844 JITCompiler::NotEqual,
845 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
849 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
851 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
854 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
855 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
857 noResult(m_currentNode);
860 void SpeculativeJIT::arrayify(Node* node)
862 ASSERT(node->arrayMode().isSpecific());
864 SpeculateCellOperand base(this, node->child1());
866 if (!node->child2()) {
867 arrayify(node, base.gpr(), InvalidGPRReg);
871 SpeculateInt32Operand property(this, node->child2());
873 arrayify(node, base.gpr(), property.gpr());
876 GPRReg SpeculativeJIT::fillStorage(Edge edge)
878 VirtualRegister virtualRegister = edge->virtualRegister();
879 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
881 switch (info.registerFormat()) {
882 case DataFormatNone: {
883 if (info.spillFormat() == DataFormatStorage) {
884 GPRReg gpr = allocate();
885 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
886 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
887 info.fillStorage(*m_stream, gpr);
891 // Must be a cell; fill it as a cell and then return the pointer.
892 return fillSpeculateCell(edge);
895 case DataFormatStorage: {
896 GPRReg gpr = info.gpr();
902 return fillSpeculateCell(edge);
906 void SpeculativeJIT::useChildren(Node* node)
908 if (node->flags() & NodeHasVarArgs) {
909 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
910 if (!!m_jit.graph().m_varArgChildren[childIdx])
911 use(m_jit.graph().m_varArgChildren[childIdx]);
914 Edge child1 = node->child1();
916 ASSERT(!node->child2() && !node->child3());
921 Edge child2 = node->child2();
923 ASSERT(!node->child3());
928 Edge child3 = node->child3();
935 void SpeculativeJIT::compileIn(Node* node)
937 SpeculateCellOperand base(this, node->child2());
938 GPRReg baseGPR = base.gpr();
940 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
941 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
942 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
944 GPRTemporary result(this);
945 GPRReg resultGPR = result.gpr();
949 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
950 MacroAssembler::Label done = m_jit.label();
952 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
953 // we can cast it to const AtomicStringImpl* safely.
954 auto slowPath = slowPathCall(
955 jump.m_jump, this, operationInOptimize,
956 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
957 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
959 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
960 stubInfo->codeOrigin = node->origin.semantic;
961 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
962 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
963 #if USE(JSVALUE32_64)
964 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
965 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
967 stubInfo->patch.usedRegisters = usedRegisters();
969 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
970 addSlowPathGenerator(WTF::move(slowPath));
974 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
979 JSValueOperand key(this, node->child1());
980 JSValueRegs regs = key.jsValueRegs();
982 GPRFlushedCallResult result(this);
983 GPRReg resultGPR = result.gpr();
990 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
992 m_jit.exceptionCheck();
993 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
996 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
998 unsigned branchIndexInBlock = detectPeepHoleBranch();
999 if (branchIndexInBlock != UINT_MAX) {
1000 Node* branchNode = m_block->at(branchIndexInBlock);
1002 ASSERT(node->adjustedRefCount() == 1);
1004 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1006 m_indexInBlock = branchIndexInBlock;
1007 m_currentNode = branchNode;
1012 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1017 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1019 unsigned branchIndexInBlock = detectPeepHoleBranch();
1020 if (branchIndexInBlock != UINT_MAX) {
1021 Node* branchNode = m_block->at(branchIndexInBlock);
1023 ASSERT(node->adjustedRefCount() == 1);
1025 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1027 m_indexInBlock = branchIndexInBlock;
1028 m_currentNode = branchNode;
1033 nonSpeculativeNonPeepholeStrictEq(node, invert);
1038 static const char* dataFormatString(DataFormat format)
1040 // These values correspond to the DataFormat enum.
1041 const char* strings[] = {
1059 return strings[format];
1062 void SpeculativeJIT::dump(const char* label)
1065 dataLogF("<%s>\n", label);
1067 dataLogF(" gprs:\n");
1069 dataLogF(" fprs:\n");
1071 dataLogF(" VirtualRegisters:\n");
1072 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1073 GenerationInfo& info = m_generationInfo[i];
1075 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1077 dataLogF(" % 3d:[__][__]", i);
1078 if (info.registerFormat() == DataFormatDouble)
1079 dataLogF(":fpr%d\n", info.fpr());
1080 else if (info.registerFormat() != DataFormatNone
1081 #if USE(JSVALUE32_64)
1082 && !(info.registerFormat() & DataFormatJS)
1085 ASSERT(info.gpr() != InvalidGPRReg);
1086 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1091 dataLogF("</%s>\n", label);
1094 GPRTemporary::GPRTemporary()
1096 , m_gpr(InvalidGPRReg)
1100 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1102 , m_gpr(InvalidGPRReg)
1104 m_gpr = m_jit->allocate();
1107 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1109 , m_gpr(InvalidGPRReg)
1111 m_gpr = m_jit->allocate(specific);
1114 #if USE(JSVALUE32_64)
1115 GPRTemporary::GPRTemporary(
1116 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1118 , m_gpr(InvalidGPRReg)
1120 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1121 m_gpr = m_jit->reuse(op1.gpr(which));
1123 m_gpr = m_jit->allocate();
1125 #endif // USE(JSVALUE32_64)
1127 JSValueRegsTemporary::JSValueRegsTemporary() { }
1129 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1139 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1141 JSValueRegs JSValueRegsTemporary::regs()
1144 return JSValueRegs(m_gpr.gpr());
1146 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1150 void GPRTemporary::adopt(GPRTemporary& other)
1153 ASSERT(m_gpr == InvalidGPRReg);
1154 ASSERT(other.m_jit);
1155 ASSERT(other.m_gpr != InvalidGPRReg);
1156 m_jit = other.m_jit;
1157 m_gpr = other.m_gpr;
1159 other.m_gpr = InvalidGPRReg;
1162 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1164 , m_fpr(InvalidFPRReg)
1166 m_fpr = m_jit->fprAllocate();
1169 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1171 , m_fpr(InvalidFPRReg)
1173 if (m_jit->canReuse(op1.node()))
1174 m_fpr = m_jit->reuse(op1.fpr());
1176 m_fpr = m_jit->fprAllocate();
1179 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1181 , m_fpr(InvalidFPRReg)
1183 if (m_jit->canReuse(op1.node()))
1184 m_fpr = m_jit->reuse(op1.fpr());
1185 else if (m_jit->canReuse(op2.node()))
1186 m_fpr = m_jit->reuse(op2.fpr());
1187 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1188 m_fpr = m_jit->reuse(op1.fpr());
1190 m_fpr = m_jit->fprAllocate();
1193 #if USE(JSVALUE32_64)
1194 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1196 , m_fpr(InvalidFPRReg)
1198 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1199 m_fpr = m_jit->reuse(op1.fpr());
1201 m_fpr = m_jit->fprAllocate();
1205 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1207 BasicBlock* taken = branchNode->branchData()->taken.block;
1208 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1210 SpeculateDoubleOperand op1(this, node->child1());
1211 SpeculateDoubleOperand op2(this, node->child2());
1213 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1217 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1219 BasicBlock* taken = branchNode->branchData()->taken.block;
1220 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1222 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1224 if (taken == nextBlock()) {
1225 condition = MacroAssembler::NotEqual;
1226 BasicBlock* tmp = taken;
1231 SpeculateCellOperand op1(this, node->child1());
1232 SpeculateCellOperand op2(this, node->child2());
1234 GPRReg op1GPR = op1.gpr();
1235 GPRReg op2GPR = op2.gpr();
1237 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1238 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1240 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1242 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1244 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1247 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1249 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1250 m_jit.branchIfNotObject(op1GPR));
1252 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1254 MacroAssembler::NonZero,
1255 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1256 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1258 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1260 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1261 m_jit.branchIfNotObject(op2GPR));
1263 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1265 MacroAssembler::NonZero,
1266 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1267 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1270 branchPtr(condition, op1GPR, op2GPR, taken);
1274 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1276 BasicBlock* taken = branchNode->branchData()->taken.block;
1277 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1279 // The branch instruction will branch to the taken block.
1280 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1281 if (taken == nextBlock()) {
1282 condition = JITCompiler::invert(condition);
1283 BasicBlock* tmp = taken;
1288 if (node->child1()->isBooleanConstant()) {
1289 bool imm = node->child1()->asBoolean();
1290 SpeculateBooleanOperand op2(this, node->child2());
1291 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1292 } else if (node->child2()->isBooleanConstant()) {
1293 SpeculateBooleanOperand op1(this, node->child1());
1294 bool imm = node->child2()->asBoolean();
1295 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1297 SpeculateBooleanOperand op1(this, node->child1());
1298 SpeculateBooleanOperand op2(this, node->child2());
1299 branch32(condition, op1.gpr(), op2.gpr(), taken);
1305 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1307 BasicBlock* taken = branchNode->branchData()->taken.block;
1308 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1310 // The branch instruction will branch to the taken block.
1311 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1312 if (taken == nextBlock()) {
1313 condition = JITCompiler::invert(condition);
1314 BasicBlock* tmp = taken;
1319 if (node->child1()->isInt32Constant()) {
1320 int32_t imm = node->child1()->asInt32();
1321 SpeculateInt32Operand op2(this, node->child2());
1322 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1323 } else if (node->child2()->isInt32Constant()) {
1324 SpeculateInt32Operand op1(this, node->child1());
1325 int32_t imm = node->child2()->asInt32();
1326 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1328 SpeculateInt32Operand op1(this, node->child1());
1329 SpeculateInt32Operand op2(this, node->child2());
1330 branch32(condition, op1.gpr(), op2.gpr(), taken);
1336 // Returns true if the compare is fused with a subsequent branch.
1337 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1339 // Fused compare & branch.
1340 unsigned branchIndexInBlock = detectPeepHoleBranch();
1341 if (branchIndexInBlock != UINT_MAX) {
1342 Node* branchNode = m_block->at(branchIndexInBlock);
1344 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1345 // so can be no intervening nodes to also reference the compare.
1346 ASSERT(node->adjustedRefCount() == 1);
1348 if (node->isBinaryUseKind(Int32Use))
1349 compilePeepHoleInt32Branch(node, branchNode, condition);
1351 else if (node->isBinaryUseKind(Int52RepUse))
1352 compilePeepHoleInt52Branch(node, branchNode, condition);
1353 #endif // USE(JSVALUE64)
1354 else if (node->isBinaryUseKind(DoubleRepUse))
1355 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1356 else if (node->op() == CompareEq) {
1357 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1358 // Use non-peephole comparison, for now.
1361 if (node->isBinaryUseKind(BooleanUse))
1362 compilePeepHoleBooleanBranch(node, branchNode, condition);
1363 else if (node->isBinaryUseKind(SymbolUse))
1364 compilePeepHoleSymbolEquality(node, branchNode);
1365 else if (node->isBinaryUseKind(ObjectUse))
1366 compilePeepHoleObjectEquality(node, branchNode);
1367 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1368 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1369 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1370 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1371 else if (!needsTypeCheck(node->child1(), SpecOther))
1372 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1373 else if (!needsTypeCheck(node->child2(), SpecOther))
1374 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1376 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1380 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1384 use(node->child1());
1385 use(node->child2());
1386 m_indexInBlock = branchIndexInBlock;
1387 m_currentNode = branchNode;
1393 void SpeculativeJIT::noticeOSRBirth(Node* node)
1395 if (!node->hasVirtualRegister())
1398 VirtualRegister virtualRegister = node->virtualRegister();
1399 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1401 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1404 void SpeculativeJIT::compileMovHint(Node* node)
1406 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1408 Node* child = node->child1().node();
1409 noticeOSRBirth(child);
1411 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1414 void SpeculativeJIT::bail(AbortReason reason)
1416 if (verboseCompilationEnabled())
1417 dataLog("Bailing compilation.\n");
1418 m_compileOkay = true;
1419 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1420 clearGenerationInfo();
1423 void SpeculativeJIT::compileCurrentBlock()
1425 ASSERT(m_compileOkay);
1430 ASSERT(m_block->isReachable);
1432 m_jit.blockHeads()[m_block->index] = m_jit.label();
1434 if (!m_block->intersectionOfCFAHasVisited) {
1435 // Don't generate code for basic blocks that are unreachable according to CFA.
1436 // But to be sure that nobody has generated a jump to this block, drop in a
1438 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1442 m_stream->appendAndLog(VariableEvent::reset());
1444 m_jit.jitAssertHasValidCallFrame();
1445 m_jit.jitAssertTagsInPlace();
1446 m_jit.jitAssertArgumentCountSane();
1449 m_state.beginBasicBlock(m_block);
1451 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1452 int operand = m_block->variablesAtHead.operandForIndex(i);
1453 Node* node = m_block->variablesAtHead[i];
1455 continue; // No need to record dead SetLocal's.
1457 VariableAccessData* variable = node->variableAccessData();
1459 if (!node->refCount())
1460 continue; // No need to record dead SetLocal's.
1461 format = dataFormatFor(variable->flushFormat());
1462 m_stream->appendAndLog(
1463 VariableEvent::setLocal(
1464 VirtualRegister(operand),
1465 variable->machineLocal(),
1469 m_origin = NodeOrigin();
1471 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1472 m_currentNode = m_block->at(m_indexInBlock);
1474 // We may have hit a contradiction that the CFA was aware of but that the JIT
1475 // didn't cause directly.
1476 if (!m_state.isValid()) {
1477 bail(DFGBailedAtTopOfBlock);
1481 m_interpreter.startExecuting();
1482 m_jit.setForNode(m_currentNode);
1483 m_origin = m_currentNode->origin;
1484 if (validationEnabled())
1485 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1486 m_lastGeneratedNode = m_currentNode->op();
1488 ASSERT(m_currentNode->shouldGenerate());
1490 if (verboseCompilationEnabled()) {
1492 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1493 (int)m_currentNode->index(),
1494 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1498 m_jit.jitAssertNoException();
1500 compile(m_currentNode);
1502 if (belongsInMinifiedGraph(m_currentNode->op()))
1503 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1505 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1506 m_jit.clearRegisterAllocationOffsets();
1509 if (!m_compileOkay) {
1510 bail(DFGBailedAtEndOfNode);
1514 // Make sure that the abstract state is rematerialized for the next node.
1515 m_interpreter.executeEffects(m_indexInBlock);
1518 // Perform the most basic verification that children have been used correctly.
1519 if (!ASSERT_DISABLED) {
1520 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1521 GenerationInfo& info = m_generationInfo[index];
1522 RELEASE_ASSERT(!info.alive());
1527 // If we are making type predictions about our arguments then
1528 // we need to check that they are correct on function entry.
1529 void SpeculativeJIT::checkArgumentTypes()
1531 ASSERT(!m_currentNode);
1532 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1534 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1535 Node* node = m_jit.graph().m_arguments[i];
1537 // The argument is dead. We don't do any checks for such arguments.
1541 ASSERT(node->op() == SetArgument);
1542 ASSERT(node->shouldGenerate());
1544 VariableAccessData* variableAccessData = node->variableAccessData();
1545 FlushFormat format = variableAccessData->flushFormat();
1547 if (format == FlushedJSValue)
1550 VirtualRegister virtualRegister = variableAccessData->local();
1552 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1556 case FlushedInt32: {
1557 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1560 case FlushedBoolean: {
1561 GPRTemporary temp(this);
1562 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1563 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1564 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1568 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1572 RELEASE_ASSERT_NOT_REACHED();
1577 case FlushedInt32: {
1578 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1581 case FlushedBoolean: {
1582 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1586 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1590 RELEASE_ASSERT_NOT_REACHED();
1596 m_origin = NodeOrigin();
1599 bool SpeculativeJIT::compile()
1601 checkArgumentTypes();
1603 ASSERT(!m_currentNode);
1604 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1605 m_jit.setForBlockIndex(blockIndex);
1606 m_block = m_jit.graph().block(blockIndex);
1607 compileCurrentBlock();
1613 void SpeculativeJIT::createOSREntries()
1615 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1616 BasicBlock* block = m_jit.graph().block(blockIndex);
1619 if (!block->isOSRTarget)
1622 // Currently we don't have OSR entry trampolines. We could add them
1624 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1628 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1630 unsigned osrEntryIndex = 0;
1631 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1632 BasicBlock* block = m_jit.graph().block(blockIndex);
1635 if (!block->isOSRTarget)
1637 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1639 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1641 if (verboseCompilationEnabled()) {
1642 DumpContext dumpContext;
1643 dataLog("OSR Entries:\n");
1644 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1645 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1646 if (!dumpContext.isEmpty())
1647 dumpContext.dump(WTF::dataFile());
1651 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1653 Edge child3 = m_jit.graph().varArgChild(node, 2);
1654 Edge child4 = m_jit.graph().varArgChild(node, 3);
1656 ArrayMode arrayMode = node->arrayMode();
1658 GPRReg baseReg = base.gpr();
1659 GPRReg propertyReg = property.gpr();
1661 SpeculateDoubleOperand value(this, child3);
1663 FPRReg valueReg = value.fpr();
1666 JSValueRegs(), child3, SpecFullRealNumber,
1668 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1673 StorageOperand storage(this, child4);
1674 GPRReg storageReg = storage.gpr();
1676 if (node->op() == PutByValAlias) {
1677 // Store the value to the array.
1678 GPRReg propertyReg = property.gpr();
1679 FPRReg valueReg = value.fpr();
1680 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1682 noResult(m_currentNode);
1686 GPRTemporary temporary;
1687 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1689 MacroAssembler::Jump slowCase;
1691 if (arrayMode.isInBounds()) {
1693 OutOfBounds, JSValueRegs(), 0,
1694 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1696 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1698 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1700 if (!arrayMode.isOutOfBounds())
1701 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1703 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1704 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1706 inBounds.link(&m_jit);
1709 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1716 if (arrayMode.isOutOfBounds()) {
1717 addSlowPathGenerator(
1720 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1721 NoResult, baseReg, propertyReg, valueReg));
1724 noResult(m_currentNode, UseChildrenCalledExplicitly);
1727 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1729 SpeculateCellOperand string(this, node->child1());
1730 SpeculateStrictInt32Operand index(this, node->child2());
1731 StorageOperand storage(this, node->child3());
1733 GPRReg stringReg = string.gpr();
1734 GPRReg indexReg = index.gpr();
1735 GPRReg storageReg = storage.gpr();
1737 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1739 // unsigned comparison so we can filter out negative indices and indices that are too large
1740 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1742 GPRTemporary scratch(this);
1743 GPRReg scratchReg = scratch.gpr();
1745 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1747 // Load the character into scratchReg
1748 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1750 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1751 JITCompiler::Jump cont8Bit = m_jit.jump();
1753 is16Bit.link(&m_jit);
1755 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1757 cont8Bit.link(&m_jit);
1759 int32Result(scratchReg, m_currentNode);
1762 void SpeculativeJIT::compileGetByValOnString(Node* node)
1764 SpeculateCellOperand base(this, node->child1());
1765 SpeculateStrictInt32Operand property(this, node->child2());
1766 StorageOperand storage(this, node->child3());
1767 GPRReg baseReg = base.gpr();
1768 GPRReg propertyReg = property.gpr();
1769 GPRReg storageReg = storage.gpr();
1771 GPRTemporary scratch(this);
1772 GPRReg scratchReg = scratch.gpr();
1773 #if USE(JSVALUE32_64)
1774 GPRTemporary resultTag;
1775 GPRReg resultTagReg = InvalidGPRReg;
1776 if (node->arrayMode().isOutOfBounds()) {
1777 GPRTemporary realResultTag(this);
1778 resultTag.adopt(realResultTag);
1779 resultTagReg = resultTag.gpr();
1783 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1785 // unsigned comparison so we can filter out negative indices and indices that are too large
1786 JITCompiler::Jump outOfBounds = m_jit.branch32(
1787 MacroAssembler::AboveOrEqual, propertyReg,
1788 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1789 if (node->arrayMode().isInBounds())
1790 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1792 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1794 // Load the character into scratchReg
1795 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1797 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1798 JITCompiler::Jump cont8Bit = m_jit.jump();
1800 is16Bit.link(&m_jit);
1802 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1804 JITCompiler::Jump bigCharacter =
1805 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1807 // 8 bit string values don't need the isASCII check.
1808 cont8Bit.link(&m_jit);
1810 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1811 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1812 m_jit.loadPtr(scratchReg, scratchReg);
1814 addSlowPathGenerator(
1816 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1818 if (node->arrayMode().isOutOfBounds()) {
1819 #if USE(JSVALUE32_64)
1820 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1823 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1824 if (globalObject->stringPrototypeChainIsSane()) {
1825 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1826 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1827 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1828 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1829 // indexed properties either.
1830 // https://bugs.webkit.org/show_bug.cgi?id=144668
1831 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1832 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1835 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1836 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1838 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1839 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1840 baseReg, propertyReg));
1844 addSlowPathGenerator(
1846 outOfBounds, this, operationGetByValStringInt,
1847 scratchReg, baseReg, propertyReg));
1849 addSlowPathGenerator(
1851 outOfBounds, this, operationGetByValStringInt,
1852 resultTagReg, scratchReg, baseReg, propertyReg));
1857 jsValueResult(scratchReg, m_currentNode);
1859 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1862 cellResult(scratchReg, m_currentNode);
1865 void SpeculativeJIT::compileFromCharCode(Node* node)
1867 SpeculateStrictInt32Operand property(this, node->child1());
1868 GPRReg propertyReg = property.gpr();
1869 GPRTemporary smallStrings(this);
1870 GPRTemporary scratch(this);
1871 GPRReg scratchReg = scratch.gpr();
1872 GPRReg smallStringsReg = smallStrings.gpr();
1874 JITCompiler::JumpList slowCases;
1875 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1876 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1877 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1879 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1880 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1881 cellResult(scratchReg, m_currentNode);
1884 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1886 VirtualRegister virtualRegister = node->virtualRegister();
1887 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1889 switch (info.registerFormat()) {
1890 case DataFormatStorage:
1891 RELEASE_ASSERT_NOT_REACHED();
1893 case DataFormatBoolean:
1894 case DataFormatCell:
1895 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1896 return GeneratedOperandTypeUnknown;
1898 case DataFormatNone:
1899 case DataFormatJSCell:
1901 case DataFormatJSBoolean:
1902 case DataFormatJSDouble:
1903 return GeneratedOperandJSValue;
1905 case DataFormatJSInt32:
1906 case DataFormatInt32:
1907 return GeneratedOperandInteger;
1910 RELEASE_ASSERT_NOT_REACHED();
1911 return GeneratedOperandTypeUnknown;
1915 void SpeculativeJIT::compileValueToInt32(Node* node)
1917 switch (node->child1().useKind()) {
1920 SpeculateStrictInt52Operand op1(this, node->child1());
1921 GPRTemporary result(this, Reuse, op1);
1922 GPRReg op1GPR = op1.gpr();
1923 GPRReg resultGPR = result.gpr();
1924 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1925 int32Result(resultGPR, node, DataFormatInt32);
1928 #endif // USE(JSVALUE64)
1930 case DoubleRepUse: {
1931 GPRTemporary result(this);
1932 SpeculateDoubleOperand op1(this, node->child1());
1933 FPRReg fpr = op1.fpr();
1934 GPRReg gpr = result.gpr();
1935 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1937 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1939 int32Result(gpr, node);
1945 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1946 case GeneratedOperandInteger: {
1947 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1948 GPRTemporary result(this, Reuse, op1);
1949 m_jit.move(op1.gpr(), result.gpr());
1950 int32Result(result.gpr(), node, op1.format());
1953 case GeneratedOperandJSValue: {
1954 GPRTemporary result(this);
1956 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1958 GPRReg gpr = op1.gpr();
1959 GPRReg resultGpr = result.gpr();
1960 FPRTemporary tempFpr(this);
1961 FPRReg fpr = tempFpr.fpr();
1963 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1964 JITCompiler::JumpList converted;
1966 if (node->child1().useKind() == NumberUse) {
1968 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1970 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1972 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1975 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1977 // It's not a cell: so true turns into 1 and all else turns into 0.
1978 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1979 converted.append(m_jit.jump());
1981 isNumber.link(&m_jit);
1984 // First, if we get here we have a double encoded as a JSValue
1985 m_jit.move(gpr, resultGpr);
1986 unboxDouble(resultGpr, fpr);
1988 silentSpillAllRegisters(resultGpr);
1989 callOperation(toInt32, resultGpr, fpr);
1990 silentFillAllRegisters(resultGpr);
1992 converted.append(m_jit.jump());
1994 isInteger.link(&m_jit);
1995 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1997 converted.link(&m_jit);
1999 Node* childNode = node->child1().node();
2000 VirtualRegister virtualRegister = childNode->virtualRegister();
2001 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2003 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2005 GPRReg payloadGPR = op1.payloadGPR();
2006 GPRReg resultGpr = result.gpr();
2008 JITCompiler::JumpList converted;
2010 if (info.registerFormat() == DataFormatJSInt32)
2011 m_jit.move(payloadGPR, resultGpr);
2013 GPRReg tagGPR = op1.tagGPR();
2014 FPRTemporary tempFpr(this);
2015 FPRReg fpr = tempFpr.fpr();
2016 FPRTemporary scratch(this);
2018 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2020 if (node->child1().useKind() == NumberUse) {
2022 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2024 MacroAssembler::AboveOrEqual, tagGPR,
2025 TrustedImm32(JSValue::LowestTag)));
2027 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2030 op1.jsValueRegs(), node->child1(), ~SpecCell,
2031 m_jit.branchIfCell(op1.jsValueRegs()));
2033 // It's not a cell: so true turns into 1 and all else turns into 0.
2034 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2035 m_jit.move(TrustedImm32(0), resultGpr);
2036 converted.append(m_jit.jump());
2038 isBoolean.link(&m_jit);
2039 m_jit.move(payloadGPR, resultGpr);
2040 converted.append(m_jit.jump());
2042 isNumber.link(&m_jit);
2045 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2047 silentSpillAllRegisters(resultGpr);
2048 callOperation(toInt32, resultGpr, fpr);
2049 silentFillAllRegisters(resultGpr);
2051 converted.append(m_jit.jump());
2053 isInteger.link(&m_jit);
2054 m_jit.move(payloadGPR, resultGpr);
2056 converted.link(&m_jit);
2059 int32Result(resultGpr, node);
2062 case GeneratedOperandTypeUnknown:
2063 RELEASE_ASSERT(!m_compileOkay);
2066 RELEASE_ASSERT_NOT_REACHED();
2071 ASSERT(!m_compileOkay);
2076 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2078 if (doesOverflow(node->arithMode())) {
2079 // We know that this sometimes produces doubles. So produce a double every
2080 // time. This at least allows subsequent code to not have weird conditionals.
2082 SpeculateInt32Operand op1(this, node->child1());
2083 FPRTemporary result(this);
2085 GPRReg inputGPR = op1.gpr();
2086 FPRReg outputFPR = result.fpr();
2088 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2090 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2091 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2092 positive.link(&m_jit);
2094 doubleResult(outputFPR, node);
2098 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2100 SpeculateInt32Operand op1(this, node->child1());
2101 GPRTemporary result(this);
2103 m_jit.move(op1.gpr(), result.gpr());
2105 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2107 int32Result(result.gpr(), node, op1.format());
2110 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2112 SpeculateDoubleOperand op1(this, node->child1());
2113 FPRTemporary scratch(this);
2114 GPRTemporary result(this);
2116 FPRReg valueFPR = op1.fpr();
2117 FPRReg scratchFPR = scratch.fpr();
2118 GPRReg resultGPR = result.gpr();
2120 JITCompiler::JumpList failureCases;
2121 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2122 m_jit.branchConvertDoubleToInt32(
2123 valueFPR, resultGPR, failureCases, scratchFPR,
2124 shouldCheckNegativeZero(node->arithMode()));
2125 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2127 int32Result(resultGPR, node);
2130 void SpeculativeJIT::compileDoubleRep(Node* node)
2132 switch (node->child1().useKind()) {
2133 case RealNumberUse: {
2134 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2135 FPRTemporary result(this);
2137 JSValueRegs op1Regs = op1.jsValueRegs();
2138 FPRReg resultFPR = result.fpr();
2141 GPRTemporary temp(this);
2142 GPRReg tempGPR = temp.gpr();
2143 m_jit.move(op1Regs.gpr(), tempGPR);
2144 m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2146 FPRTemporary temp(this);
2147 FPRReg tempFPR = temp.fpr();
2148 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2151 JITCompiler::Jump done = m_jit.branchDouble(
2152 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2155 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2156 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2160 doubleResult(resultFPR, node);
2166 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2168 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2169 if (isInt32Speculation(possibleTypes)) {
2170 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2171 FPRTemporary result(this);
2172 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2173 doubleResult(result.fpr(), node);
2177 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2178 FPRTemporary result(this);
2181 GPRTemporary temp(this);
2183 GPRReg op1GPR = op1.gpr();
2184 GPRReg tempGPR = temp.gpr();
2185 FPRReg resultFPR = result.fpr();
2186 JITCompiler::JumpList done;
2188 JITCompiler::Jump isInteger = m_jit.branch64(
2189 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2191 if (node->child1().useKind() == NotCellUse) {
2192 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2193 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2195 static const double zero = 0;
2196 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2198 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2199 done.append(isNull);
2201 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2202 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2204 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2205 static const double one = 1;
2206 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2207 done.append(m_jit.jump());
2208 done.append(isFalse);
2210 isUndefined.link(&m_jit);
2211 static const double NaN = PNaN;
2212 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2213 done.append(m_jit.jump());
2215 isNumber.link(&m_jit);
2216 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2218 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2219 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2222 m_jit.move(op1GPR, tempGPR);
2223 unboxDouble(tempGPR, resultFPR);
2224 done.append(m_jit.jump());
2226 isInteger.link(&m_jit);
2227 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2229 #else // USE(JSVALUE64) -> this is the 32_64 case
2230 FPRTemporary temp(this);
2232 GPRReg op1TagGPR = op1.tagGPR();
2233 GPRReg op1PayloadGPR = op1.payloadGPR();
2234 FPRReg tempFPR = temp.fpr();
2235 FPRReg resultFPR = result.fpr();
2236 JITCompiler::JumpList done;
2238 JITCompiler::Jump isInteger = m_jit.branch32(
2239 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2241 if (node->child1().useKind() == NotCellUse) {
2242 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2243 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2245 static const double zero = 0;
2246 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2248 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2249 done.append(isNull);
2251 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2253 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2254 static const double one = 1;
2255 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2256 done.append(m_jit.jump());
2257 done.append(isFalse);
2259 isUndefined.link(&m_jit);
2260 static const double NaN = PNaN;
2261 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2262 done.append(m_jit.jump());
2264 isNumber.link(&m_jit);
2265 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2267 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2268 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2271 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2272 done.append(m_jit.jump());
2274 isInteger.link(&m_jit);
2275 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2277 #endif // USE(JSVALUE64)
2279 doubleResult(resultFPR, node);
2285 SpeculateStrictInt52Operand value(this, node->child1());
2286 FPRTemporary result(this);
2288 GPRReg valueGPR = value.gpr();
2289 FPRReg resultFPR = result.fpr();
2291 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2293 doubleResult(resultFPR, node);
2296 #endif // USE(JSVALUE64)
2299 RELEASE_ASSERT_NOT_REACHED();
2304 void SpeculativeJIT::compileValueRep(Node* node)
2306 switch (node->child1().useKind()) {
2307 case DoubleRepUse: {
2308 SpeculateDoubleOperand value(this, node->child1());
2309 JSValueRegsTemporary result(this);
2311 FPRReg valueFPR = value.fpr();
2312 JSValueRegs resultRegs = result.regs();
2314 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2315 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2316 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2317 // local was purified.
2318 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2319 m_jit.purifyNaN(valueFPR);
2321 boxDouble(valueFPR, resultRegs);
2323 jsValueResult(resultRegs, node);
2329 SpeculateStrictInt52Operand value(this, node->child1());
2330 GPRTemporary result(this);
2332 GPRReg valueGPR = value.gpr();
2333 GPRReg resultGPR = result.gpr();
2335 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2337 jsValueResult(resultGPR, node);
2340 #endif // USE(JSVALUE64)
2343 RELEASE_ASSERT_NOT_REACHED();
2348 static double clampDoubleToByte(double d)
2358 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2360 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2361 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2362 jit.xorPtr(result, result);
2363 MacroAssembler::Jump clamped = jit.jump();
2365 jit.move(JITCompiler::TrustedImm32(255), result);
2367 inBounds.link(&jit);
2370 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2372 // Unordered compare so we pick up NaN
2373 static const double zero = 0;
2374 static const double byteMax = 255;
2375 static const double half = 0.5;
2376 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2377 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2378 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2379 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2381 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2382 // FIXME: This should probably just use a floating point round!
2383 // https://bugs.webkit.org/show_bug.cgi?id=72054
2384 jit.addDouble(source, scratch);
2385 jit.truncateDoubleToInt32(scratch, result);
2386 MacroAssembler::Jump truncatedInt = jit.jump();
2388 tooSmall.link(&jit);
2389 jit.xorPtr(result, result);
2390 MacroAssembler::Jump zeroed = jit.jump();
2393 jit.move(JITCompiler::TrustedImm32(255), result);
2395 truncatedInt.link(&jit);
2400 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2402 if (node->op() == PutByValAlias)
2403 return JITCompiler::Jump();
2404 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2405 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2407 uint32_t length = view->length();
2408 Node* indexNode = m_jit.graph().child(node, 1).node();
2409 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2410 return JITCompiler::Jump();
2411 return m_jit.branch32(
2412 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2414 return m_jit.branch32(
2415 MacroAssembler::AboveOrEqual, indexGPR,
2416 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2419 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2421 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2424 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2427 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2429 ASSERT(isInt(type));
2431 SpeculateCellOperand base(this, node->child1());
2432 SpeculateStrictInt32Operand property(this, node->child2());
2433 StorageOperand storage(this, node->child3());
2435 GPRReg baseReg = base.gpr();
2436 GPRReg propertyReg = property.gpr();
2437 GPRReg storageReg = storage.gpr();
2439 GPRTemporary result(this);
2440 GPRReg resultReg = result.gpr();
2442 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2444 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2445 switch (elementSize(type)) {
2448 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2450 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2454 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2456 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2459 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2464 if (elementSize(type) < 4 || isSigned(type)) {
2465 int32Result(resultReg, node);
2469 ASSERT(elementSize(type) == 4 && !isSigned(type));
2470 if (node->shouldSpeculateInt32()) {
2471 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2472 int32Result(resultReg, node);
2477 if (node->shouldSpeculateMachineInt()) {
2478 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2479 strictInt52Result(resultReg, node);
2484 FPRTemporary fresult(this);
2485 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2486 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2487 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2488 positive.link(&m_jit);
2489 doubleResult(fresult.fpr(), node);
2492 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2494 ASSERT(isInt(type));
2496 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2497 GPRReg storageReg = storage.gpr();
2499 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2502 GPRReg valueGPR = InvalidGPRReg;
2504 if (valueUse->isConstant()) {
2505 JSValue jsValue = valueUse->asJSValue();
2506 if (!jsValue.isNumber()) {
2507 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2511 double d = jsValue.asNumber();
2512 if (isClamped(type)) {
2513 ASSERT(elementSize(type) == 1);
2514 d = clampDoubleToByte(d);
2516 GPRTemporary scratch(this);
2517 GPRReg scratchReg = scratch.gpr();
2518 m_jit.move(Imm32(toInt32(d)), scratchReg);
2519 value.adopt(scratch);
2520 valueGPR = scratchReg;
2522 switch (valueUse.useKind()) {
2524 SpeculateInt32Operand valueOp(this, valueUse);
2525 GPRTemporary scratch(this);
2526 GPRReg scratchReg = scratch.gpr();
2527 m_jit.move(valueOp.gpr(), scratchReg);
2528 if (isClamped(type)) {
2529 ASSERT(elementSize(type) == 1);
2530 compileClampIntegerToByte(m_jit, scratchReg);
2532 value.adopt(scratch);
2533 valueGPR = scratchReg;
2539 SpeculateStrictInt52Operand valueOp(this, valueUse);
2540 GPRTemporary scratch(this);
2541 GPRReg scratchReg = scratch.gpr();
2542 m_jit.move(valueOp.gpr(), scratchReg);
2543 if (isClamped(type)) {
2544 ASSERT(elementSize(type) == 1);
2545 MacroAssembler::Jump inBounds = m_jit.branch64(
2546 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2547 MacroAssembler::Jump tooBig = m_jit.branch64(
2548 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2549 m_jit.move(TrustedImm32(0), scratchReg);
2550 MacroAssembler::Jump clamped = m_jit.jump();
2551 tooBig.link(&m_jit);
2552 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2553 clamped.link(&m_jit);
2554 inBounds.link(&m_jit);
2556 value.adopt(scratch);
2557 valueGPR = scratchReg;
2560 #endif // USE(JSVALUE64)
2562 case DoubleRepUse: {
2563 if (isClamped(type)) {
2564 ASSERT(elementSize(type) == 1);
2565 SpeculateDoubleOperand valueOp(this, valueUse);
2566 GPRTemporary result(this);
2567 FPRTemporary floatScratch(this);
2568 FPRReg fpr = valueOp.fpr();
2569 GPRReg gpr = result.gpr();
2570 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2571 value.adopt(result);
2574 SpeculateDoubleOperand valueOp(this, valueUse);
2575 GPRTemporary result(this);
2576 FPRReg fpr = valueOp.fpr();
2577 GPRReg gpr = result.gpr();
2578 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2579 m_jit.xorPtr(gpr, gpr);
2580 MacroAssembler::Jump fixed = m_jit.jump();
2581 notNaN.link(&m_jit);
2583 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2584 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2586 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2589 value.adopt(result);
2596 RELEASE_ASSERT_NOT_REACHED();
2601 ASSERT_UNUSED(valueGPR, valueGPR != property);
2602 ASSERT(valueGPR != base);
2603 ASSERT(valueGPR != storageReg);
2604 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2605 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2606 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2607 outOfBounds = MacroAssembler::Jump();
2610 switch (elementSize(type)) {
2612 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2615 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2618 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2623 if (outOfBounds.isSet())
2624 outOfBounds.link(&m_jit);
2628 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2630 ASSERT(isFloat(type));
2632 SpeculateCellOperand base(this, node->child1());
2633 SpeculateStrictInt32Operand property(this, node->child2());
2634 StorageOperand storage(this, node->child3());
2636 GPRReg baseReg = base.gpr();
2637 GPRReg propertyReg = property.gpr();
2638 GPRReg storageReg = storage.gpr();
2640 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2642 FPRTemporary result(this);
2643 FPRReg resultReg = result.fpr();
2644 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2645 switch (elementSize(type)) {
2647 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2648 m_jit.convertFloatToDouble(resultReg, resultReg);
2651 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2655 RELEASE_ASSERT_NOT_REACHED();
2658 doubleResult(resultReg, node);
2661 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2663 ASSERT(isFloat(type));
2665 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2666 GPRReg storageReg = storage.gpr();
2668 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2669 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2671 SpeculateDoubleOperand valueOp(this, valueUse);
2672 FPRTemporary scratch(this);
2673 FPRReg valueFPR = valueOp.fpr();
2674 FPRReg scratchFPR = scratch.fpr();
2676 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2678 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2679 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2680 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2681 outOfBounds = MacroAssembler::Jump();
2684 switch (elementSize(type)) {
2686 m_jit.moveDouble(valueFPR, scratchFPR);
2687 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2688 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2692 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2695 RELEASE_ASSERT_NOT_REACHED();
2697 if (outOfBounds.isSet())
2698 outOfBounds.link(&m_jit);
2702 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2704 // Check that prototype is an object.
2705 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2707 // Initialize scratchReg with the value being checked.
2708 m_jit.move(valueReg, scratchReg);
2710 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2711 MacroAssembler::Label loop(&m_jit);
2712 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2713 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2714 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2716 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2718 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2721 // No match - result is false.
2723 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2725 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2727 MacroAssembler::Jump putResult = m_jit.jump();
2729 isInstance.link(&m_jit);
2731 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2733 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2736 putResult.link(&m_jit);
2739 void SpeculativeJIT::compileInstanceOf(Node* node)
2741 if (node->child1().useKind() == UntypedUse) {
2742 // It might not be a cell. Speculate less aggressively.
2743 // Or: it might only be used once (i.e. by us), so we get zero benefit
2744 // from speculating any more aggressively than we absolutely need to.
2746 JSValueOperand value(this, node->child1());
2747 SpeculateCellOperand prototype(this, node->child2());
2748 GPRTemporary scratch(this);
2749 GPRTemporary scratch2(this);
2751 GPRReg prototypeReg = prototype.gpr();
2752 GPRReg scratchReg = scratch.gpr();
2753 GPRReg scratch2Reg = scratch2.gpr();
2755 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2756 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2757 moveFalseTo(scratchReg);
2759 MacroAssembler::Jump done = m_jit.jump();
2761 isCell.link(&m_jit);
2763 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2767 blessedBooleanResult(scratchReg, node);
2771 SpeculateCellOperand value(this, node->child1());
2772 SpeculateCellOperand prototype(this, node->child2());
2774 GPRTemporary scratch(this);
2775 GPRTemporary scratch2(this);
2777 GPRReg valueReg = value.gpr();
2778 GPRReg prototypeReg = prototype.gpr();
2779 GPRReg scratchReg = scratch.gpr();
2780 GPRReg scratch2Reg = scratch2.gpr();
2782 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2784 blessedBooleanResult(scratchReg, node);
2787 void SpeculativeJIT::compileValueAdd(Node* node)
2789 if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) {
2790 JSValueOperand left(this, node->child1());
2791 JSValueOperand right(this, node->child2());
2792 JSValueRegs leftRegs = left.jsValueRegs();
2793 JSValueRegs rightRegs = right.jsValueRegs();
2795 GPRTemporary result(this);
2796 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2798 GPRTemporary resultTag(this);
2799 GPRTemporary resultPayload(this);
2800 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2803 callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
2804 m_jit.exceptionCheck();
2806 jsValueResult(resultRegs, node);
2810 bool leftIsConstInt32 = node->child1()->isInt32Constant();
2811 bool rightIsConstInt32 = node->child2()->isInt32Constant();
2813 // The DFG does not always fold the sum of 2 constant int operands together.
2814 if (leftIsConstInt32 && rightIsConstInt32) {
2816 GPRTemporary result(this);
2817 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2819 GPRTemporary resultTag(this);
2820 GPRTemporary resultPayload(this);
2821 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2823 int64_t leftConst = node->child1()->asInt32();
2824 int64_t rightConst = node->child2()->asInt32();
2825 int64_t resultConst = leftConst + rightConst;
2826 m_jit.moveValue(JSValue(resultConst), resultRegs);
2827 jsValueResult(resultRegs, node);
2831 Optional<JSValueOperand> left;
2832 Optional<JSValueOperand> right;
2834 JSValueRegs leftRegs;
2835 JSValueRegs rightRegs;
2837 FPRTemporary leftNumber(this);
2838 FPRTemporary rightNumber(this);
2839 FPRReg leftFPR = leftNumber.fpr();
2840 FPRReg rightFPR = rightNumber.fpr();
2843 GPRTemporary result(this);
2844 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2845 GPRTemporary scratch(this);
2846 GPRReg scratchGPR = scratch.gpr();
2847 FPRReg scratchFPR = InvalidFPRReg;
2849 GPRTemporary resultTag(this);
2850 GPRTemporary resultPayload(this);
2851 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2852 GPRReg scratchGPR = resultTag.gpr();
2853 FPRTemporary fprScratch(this);
2854 FPRReg scratchFPR = fprScratch.fpr();
2857 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2858 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2860 if (leftIsConstInt32)
2861 leftOperand.setConstInt32(node->child1()->asInt32());
2862 if (rightIsConstInt32)
2863 rightOperand.setConstInt32(node->child2()->asInt32());
2865 ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2867 if (!leftOperand.isConst()) {
2868 left = JSValueOperand(this, node->child1());
2869 leftRegs = left->jsValueRegs();
2871 if (!rightOperand.isConst()) {
2872 right = JSValueOperand(this, node->child2());
2873 rightRegs = right->jsValueRegs();
2876 JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
2877 leftFPR, rightFPR, scratchGPR, scratchFPR);
2878 gen.generateFastPath(m_jit);
2880 ASSERT(gen.didEmitFastPath());
2881 gen.endJumpList().append(m_jit.jump());
2883 gen.slowPathJumpList().link(&m_jit);
2885 silentSpillAllRegisters(resultRegs);
2887 if (leftIsConstInt32) {
2888 leftRegs = resultRegs;
2889 int64_t leftConst = node->child1()->asInt32();
2890 m_jit.moveValue(JSValue(leftConst), leftRegs);
2891 } else if (rightIsConstInt32) {
2892 rightRegs = resultRegs;
2893 int64_t rightConst = node->child2()->asInt32();
2894 m_jit.moveValue(JSValue(rightConst), rightRegs);
2897 callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
2899 silentFillAllRegisters(resultRegs);
2900 m_jit.exceptionCheck();
2902 gen.endJumpList().link(&m_jit);
2903 jsValueResult(resultRegs, node);
2907 void SpeculativeJIT::compileArithAdd(Node* node)
2909 switch (node->binaryUseKind()) {
2911 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2913 if (node->child1()->isInt32Constant()) {
2914 int32_t imm1 = node->child1()->asInt32();
2915 SpeculateInt32Operand op2(this, node->child2());
2916 GPRTemporary result(this);
2918 if (!shouldCheckOverflow(node->arithMode())) {
2919 m_jit.move(op2.gpr(), result.gpr());
2920 m_jit.add32(Imm32(imm1), result.gpr());
2922 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2924 int32Result(result.gpr(), node);
2928 if (node->child2()->isInt32Constant()) {
2929 SpeculateInt32Operand op1(this, node->child1());
2930 int32_t imm2 = node->child2()->asInt32();
2931 GPRTemporary result(this);
2933 if (!shouldCheckOverflow(node->arithMode())) {
2934 m_jit.move(op1.gpr(), result.gpr());
2935 m_jit.add32(Imm32(imm2), result.gpr());
2937 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2939 int32Result(result.gpr(), node);
2943 SpeculateInt32Operand op1(this, node->child1());
2944 SpeculateInt32Operand op2(this, node->child2());
2945 GPRTemporary result(this, Reuse, op1, op2);
2947 GPRReg gpr1 = op1.gpr();
2948 GPRReg gpr2 = op2.gpr();
2949 GPRReg gprResult = result.gpr();
2951 if (!shouldCheckOverflow(node->arithMode())) {
2952 if (gpr1 == gprResult)
2953 m_jit.add32(gpr2, gprResult);
2955 m_jit.move(gpr2, gprResult);
2956 m_jit.add32(gpr1, gprResult);
2959 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2961 if (gpr1 == gprResult)
2962 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2963 else if (gpr2 == gprResult)
2964 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2966 speculationCheck(Overflow, JSValueRegs(), 0, check);
2969 int32Result(gprResult, node);
2975 ASSERT(shouldCheckOverflow(node->arithMode()));
2976 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2978 // Will we need an overflow check? If we can prove that neither input can be
2979 // Int52 then the overflow check will not be necessary.
2980 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2981 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2982 SpeculateWhicheverInt52Operand op1(this, node->child1());
2983 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2984 GPRTemporary result(this, Reuse, op1);
2985 m_jit.move(op1.gpr(), result.gpr());
2986 m_jit.add64(op2.gpr(), result.gpr());
2987 int52Result(result.gpr(), node, op1.format());
2991 SpeculateInt52Operand op1(this, node->child1());
2992 SpeculateInt52Operand op2(this, node->child2());
2993 GPRTemporary result(this);
2994 m_jit.move(op1.gpr(), result.gpr());
2996 Int52Overflow, JSValueRegs(), 0,
2997 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2998 int52Result(result.gpr(), node);
3001 #endif // USE(JSVALUE64)
3003 case DoubleRepUse: {
3004 SpeculateDoubleOperand op1(this, node->child1());
3005 SpeculateDoubleOperand op2(this, node->child2());
3006 FPRTemporary result(this, op1, op2);
3008 FPRReg reg1 = op1.fpr();
3009 FPRReg reg2 = op2.fpr();
3010 m_jit.addDouble(reg1, reg2, result.fpr());
3012 doubleResult(result.fpr(), node);
3017 RELEASE_ASSERT_NOT_REACHED();
3022 void SpeculativeJIT::compileMakeRope(Node* node)
3024 ASSERT(node->child1().useKind() == KnownStringUse);
3025 ASSERT(node->child2().useKind() == KnownStringUse);
3026 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3028 SpeculateCellOperand op1(this, node->child1());
3029 SpeculateCellOperand op2(this, node->child2());
3030 SpeculateCellOperand op3(this, node->child3());
3031 GPRTemporary result(this);
3032 GPRTemporary allocator(this);
3033 GPRTemporary scratch(this);
3037 opGPRs[0] = op1.gpr();
3038 opGPRs[1] = op2.gpr();
3039 if (node->child3()) {
3040 opGPRs[2] = op3.gpr();
3043 opGPRs[2] = InvalidGPRReg;
3046 GPRReg resultGPR = result.gpr();
3047 GPRReg allocatorGPR = allocator.gpr();
3048 GPRReg scratchGPR = scratch.gpr();
3050 JITCompiler::JumpList slowPath;
3051 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3052 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3053 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3055 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3056 for (unsigned i = 0; i < numOpGPRs; ++i)
3057 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3058 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3059 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3060 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3061 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3062 if (!ASSERT_DISABLED) {
3063 JITCompiler::Jump ok = m_jit.branch32(
3064 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3065 m_jit.abortWithReason(DFGNegativeStringLength);
3068 for (unsigned i = 1; i < numOpGPRs; ++i) {
3069 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3071 Uncountable, JSValueSource(), nullptr,
3073 JITCompiler::Overflow,
3074 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3076 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3077 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3078 if (!ASSERT_DISABLED) {
3079 JITCompiler::Jump ok = m_jit.branch32(
3080 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3081 m_jit.abortWithReason(DFGNegativeStringLength);
3084 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3086 switch (numOpGPRs) {
3088 addSlowPathGenerator(slowPathCall(
3089 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3092 addSlowPathGenerator(slowPathCall(
3093 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3096 RELEASE_ASSERT_NOT_REACHED();
3100 cellResult(resultGPR, node);
3103 void SpeculativeJIT::compileArithClz32(Node* node)
3105 ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3106 SpeculateInt32Operand value(this, node->child1());
3107 GPRTemporary result(this, Reuse, value);
3108 GPRReg valueReg = value.gpr();
3109 GPRReg resultReg = result.gpr();
3110 m_jit.countLeadingZeros32(valueReg, resultReg);
3111 int32Result(resultReg, node);
3114 void SpeculativeJIT::compileArithSub(Node* node)
3116 switch (node->binaryUseKind()) {
3118 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3120 if (node->child2()->isInt32Constant()) {
3121 SpeculateInt32Operand op1(this, node->child1());
3122 int32_t imm2 = node->child2()->asInt32();
3123 GPRTemporary result(this);
3125 if (!shouldCheckOverflow(node->arithMode())) {
3126 m_jit.move(op1.gpr(), result.gpr());
3127 m_jit.sub32(Imm32(imm2), result.gpr());
3129 GPRTemporary scratch(this);
3130 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3133 int32Result(result.gpr(), node);
3137 if (node->child1()->isInt32Constant()) {
3138 int32_t imm1 = node->child1()->asInt32();
3139 SpeculateInt32Operand op2(this, node->child2());
3140 GPRTemporary result(this);
3142 m_jit.move(Imm32(imm1), result.gpr());
3143 if (!shouldCheckOverflow(node->arithMode()))
3144 m_jit.sub32(op2.gpr(), result.gpr());
3146 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3148 int32Result(result.gpr(), node);
3152 SpeculateInt32Operand op1(this, node->child1());
3153 SpeculateInt32Operand op2(this, node->child2());
3154 GPRTemporary result(this);
3156 if (!shouldCheckOverflow(node->arithMode())) {
3157 m_jit.move(op1.gpr(), result.gpr());
3158 m_jit.sub32(op2.gpr(), result.gpr());
3160 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3162 int32Result(result.gpr(), node);
3168 ASSERT(shouldCheckOverflow(node->arithMode()));
3169 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3171 // Will we need an overflow check? If we can prove that neither input can be
3172 // Int52 then the overflow check will not be necessary.
3173 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3174 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3175 SpeculateWhicheverInt52Operand op1(this, node->child1());
3176 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3177 GPRTemporary result(this, Reuse, op1);
3178 m_jit.move(op1.gpr(), result.gpr());
3179 m_jit.sub64(op2.gpr(), result.gpr());
3180 int52Result(result.gpr(), node, op1.format());
3184 SpeculateInt52Operand op1(this, node->child1());
3185 SpeculateInt52Operand op2(this, node->child2());
3186 GPRTemporary result(this);
3187 m_jit.move(op1.gpr(), result.gpr());
3189 Int52Overflow, JSValueRegs(), 0,
3190 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3191 int52Result(result.gpr(), node);
3194 #endif // USE(JSVALUE64)
3196 case DoubleRepUse: {
3197 SpeculateDoubleOperand op1(this, node->child1());
3198 SpeculateDoubleOperand op2(this, node->child2());
3199 FPRTemporary result(this, op1);
3201 FPRReg reg1 = op1.fpr();
3202 FPRReg reg2 = op2.fpr();
3203 m_jit.subDouble(reg1, reg2, result.fpr());
3205 doubleResult(result.fpr(), node);
3210 JSValueOperand left(this, node->child1());
3211 JSValueOperand right(this, node->child2());
3213 JSValueRegs leftRegs = left.jsValueRegs();
3214 JSValueRegs rightRegs = right.jsValueRegs();
3216 FPRTemporary leftNumber(this);
3217 FPRTemporary rightNumber(this);
3218 FPRReg leftFPR = leftNumber.fpr();
3219 FPRReg rightFPR = rightNumber.fpr();
3222 GPRTemporary result(this);
3223 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3224 GPRTemporary scratch(this);
3225 GPRReg scratchGPR = scratch.gpr();
3226 FPRReg scratchFPR = InvalidFPRReg;
3228 GPRTemporary resultTag(this);
3229 GPRTemporary resultPayload(this);
3230 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3231 GPRReg scratchGPR = resultTag.gpr();
3232 FPRTemporary fprScratch(this);
3233 FPRReg scratchFPR = fprScratch.fpr();
3236 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
3237 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
3239 JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3240 leftFPR, rightFPR, scratchGPR, scratchFPR);
3241 gen.generateFastPath(m_jit);
3243 ASSERT(gen.didEmitFastPath());
3244 gen.endJumpList().append(m_jit.jump());
3246 gen.slowPathJumpList().link(&m_jit);
3247 silentSpillAllRegisters(resultRegs);
3248 callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3249 silentFillAllRegisters(resultRegs);
3250 m_jit.exceptionCheck();
3252 gen.endJumpList().link(&m_jit);
3253 jsValueResult(resultRegs, node);
3258 RELEASE_ASSERT_NOT_REACHED();
3263 void SpeculativeJIT::compileArithNegate(Node* node)
3265 switch (node->child1().useKind()) {
3267 SpeculateInt32Operand op1(this, node->child1());
3268 GPRTemporary result(this);
3270 m_jit.move(op1.gpr(), result.gpr());
3272 // Note: there is no notion of being not used as a number, but someone
3273 // caring about negative zero.
3275 if (!shouldCheckOverflow(node->arithMode()))
3276 m_jit.neg32(result.gpr());
3277 else if (!shouldCheckNegativeZero(node->arithMode()))
3278 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3280 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3281 m_jit.neg32(result.gpr());
3284 int32Result(result.gpr(), node);
3290 ASSERT(shouldCheckOverflow(node->arithMode()));
3292 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3293 SpeculateWhicheverInt52Operand op1(this, node->child1());
3294 GPRTemporary result(this);
3295 GPRReg op1GPR = op1.gpr();
3296 GPRReg resultGPR = result.gpr();
3297 m_jit.move(op1GPR, resultGPR);
3298 m_jit.neg64(resultGPR);
3299 if (shouldCheckNegativeZero(node->arithMode())) {
3301 NegativeZero, JSValueRegs(), 0,
3302 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3304 int52Result(resultGPR, node, op1.format());
3308 SpeculateInt52Operand op1(this, node->child1());
3309 GPRTemporary result(this);
3310 GPRReg op1GPR = op1.gpr();
3311 GPRReg resultGPR = result.gpr();
3312 m_jit.move(op1GPR, resultGPR);
3314 Int52Overflow, JSValueRegs(), 0,
3315 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3316 if (shouldCheckNegativeZero(node->arithMode())) {
3318 NegativeZero, JSValueRegs(), 0,
3319 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3321 int52Result(resultGPR, node);
3324 #endif // USE(JSVALUE64)
3326 case DoubleRepUse: {
3327 SpeculateDoubleOperand op1(this, node->child1());
3328 FPRTemporary result(this);
3330 m_jit.negateDouble(op1.fpr(), result.fpr());
3332 doubleResult(result.fpr(), node);
3337 RELEASE_ASSERT_NOT_REACHED();
3341 void SpeculativeJIT::compileArithMul(Node* node)
3343 switch (node->binaryUseKind()) {
3345 SpeculateInt32Operand op1(this, node->child1());
3346 SpeculateInt32Operand op2(this, node->child2());
3347 GPRTemporary result(this);
3349 GPRReg reg1 = op1.gpr();
3350 GPRReg reg2 = op2.gpr();
3352 // We can perform truncated multiplications if we get to this point, because if the
3353 // fixup phase could not prove that it would be safe, it would have turned us into
3354 // a double multiplication.
3355 if (!shouldCheckOverflow(node->arithMode())) {
3356 m_jit.move(reg1, result.gpr());
3357 m_jit.mul32(reg2, result.gpr());
3360 Overflow, JSValueRegs(), 0,
3361 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3364 // Check for negative zero, if the users of this node care about such things.
3365 if (shouldCheckNegativeZero(node->arithMode())) {
3366 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3367 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3368 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3369 resultNonZero.link(&m_jit);
3372 int32Result(result.gpr(), node);
3378 ASSERT(shouldCheckOverflow(node->arithMode()));
3380 // This is super clever. We want to do an int52 multiplication and check the
3381 // int52 overflow bit. There is no direct hardware support for this, but we do
3382 // have the ability to do an int64 multiplication and check the int64 overflow
3383 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3384 // registers, with the high 12 bits being sign-extended. We can do:
3388 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3389 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3390 // multiplication overflows is identical to whether the 'a * b' 52-bit
3391 // multiplication overflows.
3393 // In our nomenclature, this is:
3395 // strictInt52(a) * int52(b) => int52
3397 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3400 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3401 // we just do whatever is more convenient for op1 and have op2 do the
3402 // opposite. This ensures that we do at most one shift.
3404 SpeculateWhicheverInt52Operand op1(this, node->child1());
3405 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3406 GPRTemporary result(this);
3408 GPRReg op1GPR = op1.gpr();
3409 GPRReg op2GPR = op2.gpr();
3410 GPRReg resultGPR = result.gpr();
3412 m_jit.move(op1GPR, resultGPR);
3414 Int52Overflow, JSValueRegs(), 0,
3415 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3417 if (shouldCheckNegativeZero(node->arithMode())) {
3418 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3419 MacroAssembler::NonZero, resultGPR);
3421 NegativeZero, JSValueRegs(), 0,
3422 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3424 NegativeZero, JSValueRegs(), 0,
3425 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3426 resultNonZero.link(&m_jit);
3429 int52Result(resultGPR, node);
3432 #endif // USE(JSVALUE64)
3434 case DoubleRepUse: {
3435 SpeculateDoubleOperand op1(this, node->child1());
3436 SpeculateDoubleOperand op2(this, node->child2());
3437 FPRTemporary result(this, op1, op2);
3439 FPRReg reg1 = op1.fpr();
3440 FPRReg reg2 = op2.fpr();
3442 m_jit.mulDouble(reg1, reg2, result.fpr());
3444 doubleResult(result.fpr(), node);
3449 Edge& leftChild = node->child1();
3450 Edge& rightChild = node->child2();
3452 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3453 JSValueOperand left(this, leftChild);
3454 JSValueOperand right(this, rightChild);
3455 JSValueRegs leftRegs = left.jsValueRegs();
3456 JSValueRegs rightRegs = right.jsValueRegs();
3458 GPRTemporary result(this);
3459 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3461 GPRTemporary resultTag(this);
3462 GPRTemporary resultPayload(this);
3463 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3466 callOperation(operationValueMul, resultRegs, leftRegs, rightRegs);
3467 m_jit.exceptionCheck();
3469 jsValueResult(resultRegs, node);
3473 Optional<JSValueOperand> left;
3474 Optional<JSValueOperand> right;
3476 JSValueRegs leftRegs;
3477 JSValueRegs rightRegs;
3479 FPRTemporary leftNumber(this);
3480 FPRTemporary rightNumber(this);
3481 FPRReg leftFPR = leftNumber.fpr();
3482 FPRReg rightFPR = rightNumber.fpr();
3485 GPRTemporary result(this);
3486 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3487 GPRTemporary scratch(this);
3488 GPRReg scratchGPR = scratch.gpr();
3489 FPRReg scratchFPR = InvalidFPRReg;
3491 GPRTemporary resultTag(this);
3492 GPRTemporary resultPayload(this);
3493 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3494 GPRReg scratchGPR = resultTag.gpr();
3495 FPRTemporary fprScratch(this);
3496 FPRReg scratchFPR = fprScratch.fpr();
3499 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3500 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3502 if (leftChild->isInt32Constant())
3503 leftOperand.setConstInt32(leftChild->asInt32());
3504 if (rightChild->isInt32Constant())
3505 rightOperand.setConstInt32(rightChild->asInt32());
3507 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3509 if (!leftOperand.isPositiveConstInt32()) {
3510 left = JSValueOperand(this, leftChild);
3511 leftRegs = left->jsValueRegs();
3513 if (!rightOperand.isPositiveConstInt32()) {
3514 right = JSValueOperand(this, rightChild);
3515 rightRegs = right->jsValueRegs();
3518 JITMulGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3519 leftFPR, rightFPR, scratchGPR, scratchFPR);
3520 gen.generateFastPath(m_jit);
3522 ASSERT(gen.didEmitFastPath());
3523 gen.endJumpList().append(m_jit.jump());
3525 gen.slowPathJumpList().link(&m_jit);
3526 silentSpillAllRegisters(resultRegs);
3528 if (leftOperand.isPositiveConstInt32()) {
3529 leftRegs = resultRegs;
3530 int64_t leftConst = leftOperand.asConstInt32();
3531 m_jit.moveValue(JSValue(leftConst), leftRegs);
3533 if (rightOperand.isPositiveConstInt32()) {
3534 rightRegs = resultRegs;
3535 int64_t rightConst = rightOperand.asConstInt32();
3536 m_jit.moveValue(JSValue(rightConst), rightRegs);
3539 callOperation(operationValueMul, resultRegs, leftRegs, rightRegs);
3541 silentFillAllRegisters(resultRegs);
3542 m_jit.exceptionCheck();
3544 gen.endJumpList().link(&m_jit);
3545 jsValueResult(resultRegs, node);
3550 RELEASE_ASSERT_NOT_REACHED();
3555 void SpeculativeJIT::compileArithDiv(Node* node)
3557 switch (node->binaryUseKind()) {
3559 #if CPU(X86) || CPU(X86_64)
3560 SpeculateInt32Operand op1(this, node->child1());
3561 SpeculateInt32Operand op2(this, node->child2());
3562 GPRTemporary eax(this, X86Registers::eax);
3563 GPRTemporary edx(this, X86Registers::edx);
3564 GPRReg op1GPR = op1.gpr();
3565 GPRReg op2GPR = op2.gpr();
3569 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3570 op2TempGPR = allocate();
3573 op2TempGPR = InvalidGPRReg;
3574 if (op1GPR == X86Registers::eax)
3575 temp = X86Registers::edx;
3577 temp = X86Registers::eax;
3580 ASSERT(temp != op1GPR);
3581 ASSERT(temp != op2GPR);
3583 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3585 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3587 JITCompiler::JumpList done;
3588 if (shouldCheckOverflow(node->arithMode())) {
3589 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3590 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3592 // This is the case where we convert the result to an int after we're done, and we
3593 // already know that the denominator is either -1 or 0. So, if the denominator is
3594 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3595 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3596 // are happy to fall through to a normal division, since we're just dividing
3597 // something by negative 1.
3599 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3600 m_jit.move(TrustedImm32(0), eax.gpr());
3601 done.append(m_jit.jump());
3603 notZero.link(&m_jit);
3604 JITCompiler::Jump notNeg2ToThe31 =
3605 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3606 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3607 done.append(m_jit.jump());
3609 notNeg2ToThe31.link(&m_jit);
3612 safeDenominator.link(&m_jit);
3614 // If the user cares about negative zero, then speculate that we're not about
3615 // to produce negative zero.
3616 if (shouldCheckNegativeZero(node->arithMode())) {
3617 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3618 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3619 numeratorNonZero.link(&m_jit);
3622 if (op2TempGPR != InvalidGPRReg) {
3623 m_jit.move(op2GPR, op2TempGPR);
3624 op2GPR = op2TempGPR;
3627 m_jit.move(op1GPR, eax.gpr());
3628 m_jit.x86ConvertToDoubleWord32();
3629 m_jit.x86Div32(op2GPR);
3631 if (op2TempGPR != InvalidGPRReg)
3634 // Check that there was no remainder. If there had been, then we'd be obligated to
3635 // produce a double result instead.
3636 if (shouldCheckOverflow(node->arithMode()))
3637 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3640 int32Result(eax.gpr(), node);
3641 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3642 SpeculateInt32Operand op1(this, node->child1());
3643 SpeculateInt32Operand op2(this, node->child2());
3644 GPRReg op1GPR = op1.gpr();
3645 GPRReg op2GPR = op2.gpr();
3646 GPRTemporary quotient(this);
3647 GPRTemporary multiplyAnswer(this);
3649 // If the user cares about negative zero, then speculate that we're not about
3650 // to produce negative zero.
3651 if (shouldCheckNegativeZero(node->arithMode())) {
3652 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3653 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3654 numeratorNonZero.link(&m_jit);
3657 if (shouldCheckOverflow(node->arithMode()))
3658 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR));
3660 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3662 // Check that there was no remainder. If there had been, then we'd be obligated to
3663 // produce a double result instead.
3664 if (shouldCheckOverflow(node->arithMode())) {
3665 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3666 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3669 int32Result(quotient.gpr(), node);
3671 RELEASE_ASSERT_NOT_REACHED();
3676 case DoubleRepUse: {
3677 SpeculateDoubleOperand op1(this, node->child1());
3678 SpeculateDoubleOperand op2(this, node->child2());
3679 FPRTemporary result(this, op1);
3681 FPRReg reg1 = op1.fpr();
3682 FPRReg reg2 = op2.fpr();
3683 m_jit.divDouble(reg1, reg2, result.fpr());
3685 doubleResult(result.fpr(), node);
3690 RELEASE_ASSERT_NOT_REACHED();
3695 void SpeculativeJIT::compileArithMod(Node* node)
3697 switch (node->binaryUseKind()) {
3699 // In the fast path, the dividend value could be the final result
3700 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3701 SpeculateStrictInt32Operand op1(this, node->child1());
3703 if (node->child2()->isInt32Constant()) {
3704 int32_t divisor = node->child2()->asInt32();
3705 if (divisor > 1 && hasOneBitSet(divisor)) {
3706 unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor));
3707 GPRReg dividendGPR = op1.gpr();
3708 GPRTemporary result(this);
3709 GPRReg resultGPR = result.gpr();
3711 // This is what LLVM generates. It's pretty crazy. Here's my
3712 // attempt at understanding it.
3714 // First, compute either divisor - 1, or 0, depending on whether
3715 // the dividend is negative:
3717 // If dividend < 0: resultGPR = divisor - 1
3718 // If dividend >= 0: resultGPR = 0
3719 m_jit.move(dividendGPR, resultGPR);
3720 m_jit.rshift32(TrustedImm32(31), resultGPR);
3721 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3723 // Add in the dividend, so that:
3725 // If dividend < 0: resultGPR = dividend + divisor - 1
3726 // If dividend >= 0: resultGPR = dividend
3727 m_jit.add32(dividendGPR, resultGPR);
3729 // Mask so as to only get the *high* bits. This rounds down
3730 // (towards negative infinity) resultGPR to the nearest multiple
3731 // of divisor, so that:
3733 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3734 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3736 // Note that this can be simplified to:
3738 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3739 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3741 // Note that if the dividend is negative, resultGPR will also be negative.
3742 // Regardless of the sign of dividend, resultGPR will be rounded towards
3743 // zero, because of how things are conditionalized.
3744 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3746 // Subtract resultGPR from dividendGPR, which yields the remainder:
3748 // resultGPR = dividendGPR - resultGPR
3749 m_jit.neg32(resultGPR);
3750 m_jit.add32(dividendGPR, resultGPR);
3752 if (shouldCheckNegativeZero(node->arithMode())) {
3753 // Check that we're not about to create negative zero.
3754 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3755 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3756 numeratorPositive.link(&m_jit);
3759 int32Result(resultGPR, node);
3764 #if CPU(X86) || CPU(X86_64)
3765 if (node->child2()->isInt32Constant()) {
3766 int32_t divisor = node->child2()->asInt32();
3767 if (divisor && divisor != -1) {
3768 GPRReg op1Gpr = op1.gpr();
3770 GPRTemporary eax(this, X86Registers::eax);
3771 GPRTemporary edx(this, X86Registers::edx);
3772 GPRTemporary scratch(this);
3773 GPRReg scratchGPR = scratch.gpr();
3776 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3777 op1SaveGPR = allocate();
3778 ASSERT(op1Gpr != op1SaveGPR);
3779 m_jit.move(op1Gpr, op1SaveGPR);
3781 op1SaveGPR = op1Gpr;
3782 ASSERT(op1SaveGPR != X86Registers::eax);
3783 ASSERT(op1SaveGPR != X86Registers::edx);
3785 m_jit.move(op1Gpr, eax.gpr());
3786 m_jit.move(TrustedImm32(divisor), scratchGPR);
3787 m_jit.x86ConvertToDoubleWord32();
3788 m_jit.x86Div32(scratchGPR);
3789 if (shouldCheckNegativeZero(node->arithMode())) {
3790 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3791 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3792 numeratorPositive.link(&m_jit);
3795 if (op1SaveGPR != op1Gpr)
3798 int32Result(edx.gpr(), node);
3804 SpeculateInt32Operand op2(this, node->child2());
3805 #if CPU(X86) || CPU(X86_64)
3806 GPRTemporary eax(this, X86Registers::eax);
3807 GPRTemporary edx(this, X86Registers::edx);