2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSArrowFunction.h"
42 #include "JSCInlines.h"
43 #include "JSEnvironmentRecord.h"
44 #include "JSLexicalEnvironment.h"
45 #include "LinkBuffer.h"
46 #include "ScopedArguments.h"
47 #include "ScratchRegisterAllocator.h"
48 #include "WriteBarrierBuffer.h"
49 #include <wtf/MathExtras.h>
51 namespace JSC { namespace DFG {
53 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
57 , m_lastGeneratedNode(LastNodeType)
59 , m_generationInfo(m_jit.graph().frameRegisterCount())
60 , m_state(m_jit.graph())
61 , m_interpreter(m_jit.graph(), m_state)
62 , m_stream(&jit.jitCode()->variableEventStream)
63 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
67 SpeculativeJIT::~SpeculativeJIT()
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
73 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
75 GPRTemporary scratch(this);
76 GPRTemporary scratch2(this);
77 GPRReg scratchGPR = scratch.gpr();
78 GPRReg scratch2GPR = scratch2.gpr();
80 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
82 JITCompiler::JumpList slowCases;
85 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
89 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
92 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
94 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95 for (unsigned i = numElements; i < vectorLength; ++i)
96 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
98 EncodedValueDescriptor value;
99 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100 for (unsigned i = numElements; i < vectorLength; ++i) {
101 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
107 // I want a slow path that also loads out the storage pointer, and that's
108 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109 // of work for a very small piece of functionality. :-/
110 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112 structure, numElements));
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
117 if (inlineCallFrame && !inlineCallFrame->isVarargs())
118 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
120 VirtualRegister argumentCountRegister;
121 if (!inlineCallFrame)
122 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
124 argumentCountRegister = inlineCallFrame->argumentCountRegister;
125 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
127 m_jit.sub32(TrustedImm32(1), lengthGPR);
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
133 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
138 if (origin.inlineCallFrame) {
139 if (origin.inlineCallFrame->isClosureCall) {
141 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
145 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
149 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
156 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157 GPRInfo::callFrameRegister, startGPR);
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
162 if (!doOSRExitFuzzing())
163 return MacroAssembler::Jump();
165 MacroAssembler::Jump result;
167 m_jit.pushToSave(GPRInfo::regT0);
168 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172 unsigned at = Options::fireOSRExitFuzzAt();
173 if (at || atOrAfter) {
175 MacroAssembler::RelationalCondition condition;
177 threshold = atOrAfter;
178 condition = MacroAssembler::Below;
181 condition = MacroAssembler::NotEqual;
183 MacroAssembler::Jump ok = m_jit.branch32(
184 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185 m_jit.popToRestore(GPRInfo::regT0);
186 result = m_jit.jump();
189 m_jit.popToRestore(GPRInfo::regT0);
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
198 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
199 if (fuzzJump.isSet()) {
200 JITCompiler::JumpList jumpsToFail;
201 jumpsToFail.append(fuzzJump);
202 jumpsToFail.append(jumpToFail);
203 m_jit.appendExitInfo(jumpsToFail);
205 m_jit.appendExitInfo(jumpToFail);
206 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
209 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
213 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
214 if (fuzzJump.isSet()) {
215 JITCompiler::JumpList myJumpsToFail;
216 myJumpsToFail.append(jumpsToFail);
217 myJumpsToFail.append(fuzzJump);
218 m_jit.appendExitInfo(myJumpsToFail);
220 m_jit.appendExitInfo(jumpsToFail);
221 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
224 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
227 return OSRExitJumpPlaceholder();
228 unsigned index = m_jit.jitCode()->osrExit.size();
229 m_jit.appendExitInfo();
230 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
231 return OSRExitJumpPlaceholder(index);
234 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
236 return speculationCheck(kind, jsValueSource, nodeUse.node());
239 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
241 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
246 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
253 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
254 m_jit.appendExitInfo(jumpToFail);
255 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
258 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
260 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
263 void SpeculativeJIT::emitInvalidationPoint(Node* node)
267 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
268 m_jit.jitCode()->appendOSRExit(OSRExit(
269 UncountableInvalidation, JSValueSource(),
270 m_jit.graph().methodOfGettingAValueProfileFor(node),
271 this, m_stream->size()));
272 info.m_replacementSource = m_jit.watchpointLabel();
273 ASSERT(info.m_replacementSource.isSet());
277 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
281 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
282 m_compileOkay = false;
283 if (verboseCompilationEnabled())
284 dataLog("Bailing compilation.\n");
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
289 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
292 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
294 ASSERT(needsTypeCheck(edge, typesPassedThrough));
295 m_interpreter.filter(edge, typesPassedThrough);
296 speculationCheck(BadType, source, edge.node(), jumpToFail);
299 RegisterSet SpeculativeJIT::usedRegisters()
303 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
304 GPRReg gpr = GPRInfo::toRegister(i);
305 if (m_gprs.isInUse(gpr))
308 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
309 FPRReg fpr = FPRInfo::toRegister(i);
310 if (m_fprs.isInUse(fpr))
314 result.merge(RegisterSet::stubUnavailableRegisters());
319 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
321 m_slowPathGenerators.append(WTF::move(slowPathGenerator));
324 void SpeculativeJIT::runSlowPathGenerators()
326 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
327 m_slowPathGenerators[i]->generate(this);
330 // On Windows we need to wrap fmod; on other platforms we can call it directly.
331 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
332 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
333 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
338 #define fmodAsDFGOperation fmod
341 void SpeculativeJIT::clearGenerationInfo()
343 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
344 m_generationInfo[i] = GenerationInfo();
345 m_gprs = RegisterBank<GPRInfo>();
346 m_fprs = RegisterBank<FPRInfo>();
349 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
351 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
352 Node* node = info.node();
353 DataFormat registerFormat = info.registerFormat();
354 ASSERT(registerFormat != DataFormatNone);
355 ASSERT(registerFormat != DataFormatDouble);
357 SilentSpillAction spillAction;
358 SilentFillAction fillAction;
360 if (!info.needsSpill())
361 spillAction = DoNothingForSpill;
364 ASSERT(info.gpr() == source);
365 if (registerFormat == DataFormatInt32)
366 spillAction = Store32Payload;
367 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
368 spillAction = StorePtr;
369 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
370 spillAction = Store64;
372 ASSERT(registerFormat & DataFormatJS);
373 spillAction = Store64;
375 #elif USE(JSVALUE32_64)
376 if (registerFormat & DataFormatJS) {
377 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
378 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
380 ASSERT(info.gpr() == source);
381 spillAction = Store32Payload;
386 if (registerFormat == DataFormatInt32) {
387 ASSERT(info.gpr() == source);
388 ASSERT(isJSInt32(info.registerFormat()));
389 if (node->hasConstant()) {
390 ASSERT(node->isInt32Constant());
391 fillAction = SetInt32Constant;
393 fillAction = Load32Payload;
394 } else if (registerFormat == DataFormatBoolean) {
396 RELEASE_ASSERT_NOT_REACHED();
397 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
398 fillAction = DoNothingForFill;
400 #elif USE(JSVALUE32_64)
401 ASSERT(info.gpr() == source);
402 if (node->hasConstant()) {
403 ASSERT(node->isBooleanConstant());
404 fillAction = SetBooleanConstant;
406 fillAction = Load32Payload;
408 } else if (registerFormat == DataFormatCell) {
409 ASSERT(info.gpr() == source);
410 if (node->hasConstant()) {
411 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
412 node->asCell(); // To get the assertion.
413 fillAction = SetCellConstant;
416 fillAction = LoadPtr;
418 fillAction = Load32Payload;
421 } else if (registerFormat == DataFormatStorage) {
422 ASSERT(info.gpr() == source);
423 fillAction = LoadPtr;
424 } else if (registerFormat == DataFormatInt52) {
425 if (node->hasConstant())
426 fillAction = SetInt52Constant;
427 else if (info.spillFormat() == DataFormatInt52)
429 else if (info.spillFormat() == DataFormatStrictInt52)
430 fillAction = Load64ShiftInt52Left;
431 else if (info.spillFormat() == DataFormatNone)
434 RELEASE_ASSERT_NOT_REACHED();
435 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
436 fillAction = Load64; // Make GCC happy.
439 } else if (registerFormat == DataFormatStrictInt52) {
440 if (node->hasConstant())
441 fillAction = SetStrictInt52Constant;
442 else if (info.spillFormat() == DataFormatInt52)
443 fillAction = Load64ShiftInt52Right;
444 else if (info.spillFormat() == DataFormatStrictInt52)
446 else if (info.spillFormat() == DataFormatNone)
449 RELEASE_ASSERT_NOT_REACHED();
450 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
451 fillAction = Load64; // Make GCC happy.
455 ASSERT(registerFormat & DataFormatJS);
457 ASSERT(info.gpr() == source);
458 if (node->hasConstant()) {
459 if (node->isCellConstant())
460 fillAction = SetTrustedJSConstant;
462 fillAction = SetJSConstant;
463 } else if (info.spillFormat() == DataFormatInt32) {
464 ASSERT(registerFormat == DataFormatJSInt32);
465 fillAction = Load32PayloadBoxInt;
469 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
470 if (node->hasConstant())
471 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
472 else if (info.payloadGPR() == source)
473 fillAction = Load32Payload;
474 else { // Fill the Tag
475 switch (info.spillFormat()) {
476 case DataFormatInt32:
477 ASSERT(registerFormat == DataFormatJSInt32);
478 fillAction = SetInt32Tag;
481 ASSERT(registerFormat == DataFormatJSCell);
482 fillAction = SetCellTag;
484 case DataFormatBoolean:
485 ASSERT(registerFormat == DataFormatJSBoolean);
486 fillAction = SetBooleanTag;
489 fillAction = Load32Tag;
496 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
499 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
501 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
502 Node* node = info.node();
503 ASSERT(info.registerFormat() == DataFormatDouble);
505 SilentSpillAction spillAction;
506 SilentFillAction fillAction;
508 if (!info.needsSpill())
509 spillAction = DoNothingForSpill;
511 ASSERT(!node->hasConstant());
512 ASSERT(info.spillFormat() == DataFormatNone);
513 ASSERT(info.fpr() == source);
514 spillAction = StoreDouble;
518 if (node->hasConstant()) {
519 node->asNumber(); // To get the assertion.
520 fillAction = SetDoubleConstant;
522 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
523 fillAction = LoadDouble;
525 #elif USE(JSVALUE32_64)
526 ASSERT(info.registerFormat() == DataFormatDouble);
527 if (node->hasConstant()) {
528 node->asNumber(); // To get the assertion.
529 fillAction = SetDoubleConstant;
531 fillAction = LoadDouble;
534 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
537 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
539 switch (plan.spillAction()) {
540 case DoNothingForSpill:
543 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
546 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
549 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
553 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
557 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
560 RELEASE_ASSERT_NOT_REACHED();
564 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
566 #if USE(JSVALUE32_64)
567 UNUSED_PARAM(canTrample);
569 switch (plan.fillAction()) {
570 case DoNothingForFill:
572 case SetInt32Constant:
573 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
576 case SetInt52Constant:
577 m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
579 case SetStrictInt52Constant:
580 m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
582 #endif // USE(JSVALUE64)
583 case SetBooleanConstant:
584 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
586 case SetCellConstant:
587 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
590 case SetTrustedJSConstant:
591 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
594 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
596 case SetDoubleConstant:
597 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
598 m_jit.move64ToDouble(canTrample, plan.fpr());
600 case Load32PayloadBoxInt:
601 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
602 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
604 case Load32PayloadConvertToInt52:
605 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
606 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
607 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
609 case Load32PayloadSignExtend:
610 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
611 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
614 case SetJSConstantTag:
615 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
617 case SetJSConstantPayload:
618 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
621 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
624 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
627 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
629 case SetDoubleConstant:
630 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
634 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
637 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
640 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
644 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
646 case Load64ShiftInt52Right:
647 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
648 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
650 case Load64ShiftInt52Left:
651 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
656 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
659 RELEASE_ASSERT_NOT_REACHED();
663 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
665 switch (arrayMode.arrayClass()) {
666 case Array::OriginalArray: {
668 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
669 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
675 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
676 return m_jit.branch32(
677 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
679 case Array::NonArray:
680 case Array::OriginalNonArray:
681 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
682 return m_jit.branch32(
683 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
685 case Array::PossiblyArray:
686 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
687 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
690 RELEASE_ASSERT_NOT_REACHED();
691 return JITCompiler::Jump();
694 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
696 JITCompiler::JumpList result;
698 switch (arrayMode.type()) {
700 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
703 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
705 case Array::Contiguous:
706 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
708 case Array::Undecided:
709 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
711 case Array::ArrayStorage:
712 case Array::SlowPutArrayStorage: {
713 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
715 if (arrayMode.isJSArray()) {
716 if (arrayMode.isSlowPut()) {
719 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
720 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
721 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
724 MacroAssembler::Above, tempGPR,
725 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
728 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
730 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
733 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
734 if (arrayMode.isSlowPut()) {
735 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
738 MacroAssembler::Above, tempGPR,
739 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
743 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
754 void SpeculativeJIT::checkArray(Node* node)
756 ASSERT(node->arrayMode().isSpecific());
757 ASSERT(!node->arrayMode().doesConversion());
759 SpeculateCellOperand base(this, node->child1());
760 GPRReg baseReg = base.gpr();
762 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
763 noResult(m_currentNode);
767 const ClassInfo* expectedClassInfo = 0;
769 switch (node->arrayMode().type()) {
771 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
775 case Array::Contiguous:
776 case Array::Undecided:
777 case Array::ArrayStorage:
778 case Array::SlowPutArrayStorage: {
779 GPRTemporary temp(this);
780 GPRReg tempGPR = temp.gpr();
781 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
783 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
784 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
786 noResult(m_currentNode);
789 case Array::DirectArguments:
790 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
791 noResult(m_currentNode);
793 case Array::ScopedArguments:
794 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
795 noResult(m_currentNode);
798 speculateCellTypeWithoutTypeFiltering(
799 node->child1(), baseReg,
800 typeForTypedArrayType(node->arrayMode().typedArrayType()));
801 noResult(m_currentNode);
805 RELEASE_ASSERT(expectedClassInfo);
807 GPRTemporary temp(this);
808 GPRTemporary temp2(this);
809 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
811 BadType, JSValueSource::unboxedCell(baseReg), node,
813 MacroAssembler::NotEqual,
814 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
815 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
817 noResult(m_currentNode);
820 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
822 ASSERT(node->arrayMode().doesConversion());
824 GPRTemporary temp(this);
825 GPRTemporary structure;
826 GPRReg tempGPR = temp.gpr();
827 GPRReg structureGPR = InvalidGPRReg;
829 if (node->op() != ArrayifyToStructure) {
830 GPRTemporary realStructure(this);
831 structure.adopt(realStructure);
832 structureGPR = structure.gpr();
835 // We can skip all that comes next if we already have array storage.
836 MacroAssembler::JumpList slowPath;
838 if (node->op() == ArrayifyToStructure) {
839 slowPath.append(m_jit.branchWeakStructure(
840 JITCompiler::NotEqual,
841 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
845 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
847 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
850 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
851 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
853 noResult(m_currentNode);
856 void SpeculativeJIT::arrayify(Node* node)
858 ASSERT(node->arrayMode().isSpecific());
860 SpeculateCellOperand base(this, node->child1());
862 if (!node->child2()) {
863 arrayify(node, base.gpr(), InvalidGPRReg);
867 SpeculateInt32Operand property(this, node->child2());
869 arrayify(node, base.gpr(), property.gpr());
872 GPRReg SpeculativeJIT::fillStorage(Edge edge)
874 VirtualRegister virtualRegister = edge->virtualRegister();
875 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
877 switch (info.registerFormat()) {
878 case DataFormatNone: {
879 if (info.spillFormat() == DataFormatStorage) {
880 GPRReg gpr = allocate();
881 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
882 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
883 info.fillStorage(*m_stream, gpr);
887 // Must be a cell; fill it as a cell and then return the pointer.
888 return fillSpeculateCell(edge);
891 case DataFormatStorage: {
892 GPRReg gpr = info.gpr();
898 return fillSpeculateCell(edge);
902 void SpeculativeJIT::useChildren(Node* node)
904 if (node->flags() & NodeHasVarArgs) {
905 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
906 if (!!m_jit.graph().m_varArgChildren[childIdx])
907 use(m_jit.graph().m_varArgChildren[childIdx]);
910 Edge child1 = node->child1();
912 ASSERT(!node->child2() && !node->child3());
917 Edge child2 = node->child2();
919 ASSERT(!node->child3());
924 Edge child3 = node->child3();
931 void SpeculativeJIT::compileIn(Node* node)
933 SpeculateCellOperand base(this, node->child2());
934 GPRReg baseGPR = base.gpr();
936 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
937 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
938 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
940 GPRTemporary result(this);
941 GPRReg resultGPR = result.gpr();
945 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
946 MacroAssembler::Label done = m_jit.label();
948 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
949 // we can cast it to const AtomicStringImpl* safely.
950 auto slowPath = slowPathCall(
951 jump.m_jump, this, operationInOptimize,
952 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
953 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
955 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
956 stubInfo->codeOrigin = node->origin.semantic;
957 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
958 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
959 #if USE(JSVALUE32_64)
960 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
962 stubInfo->patch.usedRegisters = usedRegisters();
963 stubInfo->patch.spillMode = NeedToSpill;
965 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
966 addSlowPathGenerator(WTF::move(slowPath));
970 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
975 JSValueOperand key(this, node->child1());
976 JSValueRegs regs = key.jsValueRegs();
978 GPRFlushedCallResult result(this);
979 GPRReg resultGPR = result.gpr();
986 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
988 m_jit.exceptionCheck();
989 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
992 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
994 unsigned branchIndexInBlock = detectPeepHoleBranch();
995 if (branchIndexInBlock != UINT_MAX) {
996 Node* branchNode = m_block->at(branchIndexInBlock);
998 ASSERT(node->adjustedRefCount() == 1);
1000 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1002 m_indexInBlock = branchIndexInBlock;
1003 m_currentNode = branchNode;
1008 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1013 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1015 unsigned branchIndexInBlock = detectPeepHoleBranch();
1016 if (branchIndexInBlock != UINT_MAX) {
1017 Node* branchNode = m_block->at(branchIndexInBlock);
1019 ASSERT(node->adjustedRefCount() == 1);
1021 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1023 m_indexInBlock = branchIndexInBlock;
1024 m_currentNode = branchNode;
1029 nonSpeculativeNonPeepholeStrictEq(node, invert);
1034 static const char* dataFormatString(DataFormat format)
1036 // These values correspond to the DataFormat enum.
1037 const char* strings[] = {
1055 return strings[format];
1058 void SpeculativeJIT::dump(const char* label)
1061 dataLogF("<%s>\n", label);
1063 dataLogF(" gprs:\n");
1065 dataLogF(" fprs:\n");
1067 dataLogF(" VirtualRegisters:\n");
1068 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1069 GenerationInfo& info = m_generationInfo[i];
1071 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1073 dataLogF(" % 3d:[__][__]", i);
1074 if (info.registerFormat() == DataFormatDouble)
1075 dataLogF(":fpr%d\n", info.fpr());
1076 else if (info.registerFormat() != DataFormatNone
1077 #if USE(JSVALUE32_64)
1078 && !(info.registerFormat() & DataFormatJS)
1081 ASSERT(info.gpr() != InvalidGPRReg);
1082 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1087 dataLogF("</%s>\n", label);
1090 GPRTemporary::GPRTemporary()
1092 , m_gpr(InvalidGPRReg)
1096 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1098 , m_gpr(InvalidGPRReg)
1100 m_gpr = m_jit->allocate();
1103 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1105 , m_gpr(InvalidGPRReg)
1107 m_gpr = m_jit->allocate(specific);
1110 #if USE(JSVALUE32_64)
1111 GPRTemporary::GPRTemporary(
1112 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1114 , m_gpr(InvalidGPRReg)
1116 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1117 m_gpr = m_jit->reuse(op1.gpr(which));
1119 m_gpr = m_jit->allocate();
1121 #endif // USE(JSVALUE32_64)
1123 JSValueRegsTemporary::JSValueRegsTemporary() { }
1125 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1135 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1137 JSValueRegs JSValueRegsTemporary::regs()
1140 return JSValueRegs(m_gpr.gpr());
1142 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1146 void GPRTemporary::adopt(GPRTemporary& other)
1149 ASSERT(m_gpr == InvalidGPRReg);
1150 ASSERT(other.m_jit);
1151 ASSERT(other.m_gpr != InvalidGPRReg);
1152 m_jit = other.m_jit;
1153 m_gpr = other.m_gpr;
1155 other.m_gpr = InvalidGPRReg;
1158 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1160 , m_fpr(InvalidFPRReg)
1162 m_fpr = m_jit->fprAllocate();
1165 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1167 , m_fpr(InvalidFPRReg)
1169 if (m_jit->canReuse(op1.node()))
1170 m_fpr = m_jit->reuse(op1.fpr());
1172 m_fpr = m_jit->fprAllocate();
1175 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1177 , m_fpr(InvalidFPRReg)
1179 if (m_jit->canReuse(op1.node()))
1180 m_fpr = m_jit->reuse(op1.fpr());
1181 else if (m_jit->canReuse(op2.node()))
1182 m_fpr = m_jit->reuse(op2.fpr());
1183 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1184 m_fpr = m_jit->reuse(op1.fpr());
1186 m_fpr = m_jit->fprAllocate();
1189 #if USE(JSVALUE32_64)
1190 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1192 , m_fpr(InvalidFPRReg)
1194 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1195 m_fpr = m_jit->reuse(op1.fpr());
1197 m_fpr = m_jit->fprAllocate();
1201 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1203 BasicBlock* taken = branchNode->branchData()->taken.block;
1204 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1206 SpeculateDoubleOperand op1(this, node->child1());
1207 SpeculateDoubleOperand op2(this, node->child2());
1209 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1213 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1215 BasicBlock* taken = branchNode->branchData()->taken.block;
1216 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1218 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1220 if (taken == nextBlock()) {
1221 condition = MacroAssembler::NotEqual;
1222 BasicBlock* tmp = taken;
1227 SpeculateCellOperand op1(this, node->child1());
1228 SpeculateCellOperand op2(this, node->child2());
1230 GPRReg op1GPR = op1.gpr();
1231 GPRReg op2GPR = op2.gpr();
1233 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1234 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1236 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1238 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1240 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1243 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1245 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1246 m_jit.branchIfNotObject(op1GPR));
1248 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1250 MacroAssembler::NonZero,
1251 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1252 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1254 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1256 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1257 m_jit.branchIfNotObject(op2GPR));
1259 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1261 MacroAssembler::NonZero,
1262 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1263 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1266 branchPtr(condition, op1GPR, op2GPR, taken);
1270 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1272 BasicBlock* taken = branchNode->branchData()->taken.block;
1273 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1275 // The branch instruction will branch to the taken block.
1276 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1277 if (taken == nextBlock()) {
1278 condition = JITCompiler::invert(condition);
1279 BasicBlock* tmp = taken;
1284 if (node->child1()->isBooleanConstant()) {
1285 bool imm = node->child1()->asBoolean();
1286 SpeculateBooleanOperand op2(this, node->child2());
1287 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1288 } else if (node->child2()->isBooleanConstant()) {
1289 SpeculateBooleanOperand op1(this, node->child1());
1290 bool imm = node->child2()->asBoolean();
1291 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1293 SpeculateBooleanOperand op1(this, node->child1());
1294 SpeculateBooleanOperand op2(this, node->child2());
1295 branch32(condition, op1.gpr(), op2.gpr(), taken);
1301 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1303 BasicBlock* taken = branchNode->branchData()->taken.block;
1304 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1306 // The branch instruction will branch to the taken block.
1307 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1308 if (taken == nextBlock()) {
1309 condition = JITCompiler::invert(condition);
1310 BasicBlock* tmp = taken;
1315 if (node->child1()->isInt32Constant()) {
1316 int32_t imm = node->child1()->asInt32();
1317 SpeculateInt32Operand op2(this, node->child2());
1318 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1319 } else if (node->child2()->isInt32Constant()) {
1320 SpeculateInt32Operand op1(this, node->child1());
1321 int32_t imm = node->child2()->asInt32();
1322 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1324 SpeculateInt32Operand op1(this, node->child1());
1325 SpeculateInt32Operand op2(this, node->child2());
1326 branch32(condition, op1.gpr(), op2.gpr(), taken);
1332 // Returns true if the compare is fused with a subsequent branch.
1333 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1335 // Fused compare & branch.
1336 unsigned branchIndexInBlock = detectPeepHoleBranch();
1337 if (branchIndexInBlock != UINT_MAX) {
1338 Node* branchNode = m_block->at(branchIndexInBlock);
1340 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1341 // so can be no intervening nodes to also reference the compare.
1342 ASSERT(node->adjustedRefCount() == 1);
1344 if (node->isBinaryUseKind(Int32Use))
1345 compilePeepHoleInt32Branch(node, branchNode, condition);
1347 else if (node->isBinaryUseKind(Int52RepUse))
1348 compilePeepHoleInt52Branch(node, branchNode, condition);
1349 #endif // USE(JSVALUE64)
1350 else if (node->isBinaryUseKind(DoubleRepUse))
1351 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1352 else if (node->op() == CompareEq) {
1353 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1354 // Use non-peephole comparison, for now.
1357 if (node->isBinaryUseKind(BooleanUse))
1358 compilePeepHoleBooleanBranch(node, branchNode, condition);
1359 else if (node->isBinaryUseKind(ObjectUse))
1360 compilePeepHoleObjectEquality(node, branchNode);
1361 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1362 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1363 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1364 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1365 else if (!needsTypeCheck(node->child1(), SpecOther))
1366 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1367 else if (!needsTypeCheck(node->child2(), SpecOther))
1368 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1370 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1374 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1378 use(node->child1());
1379 use(node->child2());
1380 m_indexInBlock = branchIndexInBlock;
1381 m_currentNode = branchNode;
1387 void SpeculativeJIT::noticeOSRBirth(Node* node)
1389 if (!node->hasVirtualRegister())
1392 VirtualRegister virtualRegister = node->virtualRegister();
1393 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1395 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1398 void SpeculativeJIT::compileMovHint(Node* node)
1400 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1402 Node* child = node->child1().node();
1403 noticeOSRBirth(child);
1405 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1408 void SpeculativeJIT::bail(AbortReason reason)
1410 if (verboseCompilationEnabled())
1411 dataLog("Bailing compilation.\n");
1412 m_compileOkay = true;
1413 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1414 clearGenerationInfo();
1417 void SpeculativeJIT::compileCurrentBlock()
1419 ASSERT(m_compileOkay);
1424 ASSERT(m_block->isReachable);
1426 m_jit.blockHeads()[m_block->index] = m_jit.label();
1428 if (!m_block->intersectionOfCFAHasVisited) {
1429 // Don't generate code for basic blocks that are unreachable according to CFA.
1430 // But to be sure that nobody has generated a jump to this block, drop in a
1432 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1436 m_stream->appendAndLog(VariableEvent::reset());
1438 m_jit.jitAssertHasValidCallFrame();
1439 m_jit.jitAssertTagsInPlace();
1440 m_jit.jitAssertArgumentCountSane();
1443 m_state.beginBasicBlock(m_block);
1445 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1446 int operand = m_block->variablesAtHead.operandForIndex(i);
1447 Node* node = m_block->variablesAtHead[i];
1449 continue; // No need to record dead SetLocal's.
1451 VariableAccessData* variable = node->variableAccessData();
1453 if (!node->refCount())
1454 continue; // No need to record dead SetLocal's.
1455 format = dataFormatFor(variable->flushFormat());
1456 m_stream->appendAndLog(
1457 VariableEvent::setLocal(
1458 VirtualRegister(operand),
1459 variable->machineLocal(),
1463 m_origin = NodeOrigin();
1465 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1466 m_currentNode = m_block->at(m_indexInBlock);
1468 // We may have hit a contradiction that the CFA was aware of but that the JIT
1469 // didn't cause directly.
1470 if (!m_state.isValid()) {
1471 bail(DFGBailedAtTopOfBlock);
1475 m_interpreter.startExecuting();
1476 m_jit.setForNode(m_currentNode);
1477 m_origin = m_currentNode->origin;
1478 if (validationEnabled())
1479 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1480 m_lastGeneratedNode = m_currentNode->op();
1482 ASSERT(m_currentNode->shouldGenerate());
1484 if (verboseCompilationEnabled()) {
1486 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1487 (int)m_currentNode->index(),
1488 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1492 m_jit.jitAssertNoException();
1494 compile(m_currentNode);
1496 if (belongsInMinifiedGraph(m_currentNode->op()))
1497 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1499 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1500 m_jit.clearRegisterAllocationOffsets();
1503 if (!m_compileOkay) {
1504 bail(DFGBailedAtEndOfNode);
1508 // Make sure that the abstract state is rematerialized for the next node.
1509 m_interpreter.executeEffects(m_indexInBlock);
1512 // Perform the most basic verification that children have been used correctly.
1513 if (!ASSERT_DISABLED) {
1514 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1515 GenerationInfo& info = m_generationInfo[index];
1516 RELEASE_ASSERT(!info.alive());
1521 // If we are making type predictions about our arguments then
1522 // we need to check that they are correct on function entry.
1523 void SpeculativeJIT::checkArgumentTypes()
1525 ASSERT(!m_currentNode);
1526 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1528 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1529 Node* node = m_jit.graph().m_arguments[i];
1531 // The argument is dead. We don't do any checks for such arguments.
1535 ASSERT(node->op() == SetArgument);
1536 ASSERT(node->shouldGenerate());
1538 VariableAccessData* variableAccessData = node->variableAccessData();
1539 FlushFormat format = variableAccessData->flushFormat();
1541 if (format == FlushedJSValue)
1544 VirtualRegister virtualRegister = variableAccessData->local();
1546 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1550 case FlushedInt32: {
1551 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1554 case FlushedBoolean: {
1555 GPRTemporary temp(this);
1556 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1557 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1558 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1562 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1566 RELEASE_ASSERT_NOT_REACHED();
1571 case FlushedInt32: {
1572 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1575 case FlushedBoolean: {
1576 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1580 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1584 RELEASE_ASSERT_NOT_REACHED();
1590 m_origin = NodeOrigin();
1593 bool SpeculativeJIT::compile()
1595 checkArgumentTypes();
1597 ASSERT(!m_currentNode);
1598 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1599 m_jit.setForBlockIndex(blockIndex);
1600 m_block = m_jit.graph().block(blockIndex);
1601 compileCurrentBlock();
1607 void SpeculativeJIT::createOSREntries()
1609 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1610 BasicBlock* block = m_jit.graph().block(blockIndex);
1613 if (!block->isOSRTarget)
1616 // Currently we don't have OSR entry trampolines. We could add them
1618 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1622 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1624 unsigned osrEntryIndex = 0;
1625 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1626 BasicBlock* block = m_jit.graph().block(blockIndex);
1629 if (!block->isOSRTarget)
1631 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1633 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1635 if (verboseCompilationEnabled()) {
1636 DumpContext dumpContext;
1637 dataLog("OSR Entries:\n");
1638 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1639 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1640 if (!dumpContext.isEmpty())
1641 dumpContext.dump(WTF::dataFile());
1645 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1647 Edge child3 = m_jit.graph().varArgChild(node, 2);
1648 Edge child4 = m_jit.graph().varArgChild(node, 3);
1650 ArrayMode arrayMode = node->arrayMode();
1652 GPRReg baseReg = base.gpr();
1653 GPRReg propertyReg = property.gpr();
1655 SpeculateDoubleOperand value(this, child3);
1657 FPRReg valueReg = value.fpr();
1660 JSValueRegs(), child3, SpecFullRealNumber,
1662 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1667 StorageOperand storage(this, child4);
1668 GPRReg storageReg = storage.gpr();
1670 if (node->op() == PutByValAlias) {
1671 // Store the value to the array.
1672 GPRReg propertyReg = property.gpr();
1673 FPRReg valueReg = value.fpr();
1674 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1676 noResult(m_currentNode);
1680 GPRTemporary temporary;
1681 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1683 MacroAssembler::Jump slowCase;
1685 if (arrayMode.isInBounds()) {
1687 OutOfBounds, JSValueRegs(), 0,
1688 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1690 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1692 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1694 if (!arrayMode.isOutOfBounds())
1695 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1697 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1698 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1700 inBounds.link(&m_jit);
1703 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1710 if (arrayMode.isOutOfBounds()) {
1711 addSlowPathGenerator(
1714 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1715 NoResult, baseReg, propertyReg, valueReg));
1718 noResult(m_currentNode, UseChildrenCalledExplicitly);
1721 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1723 SpeculateCellOperand string(this, node->child1());
1724 SpeculateStrictInt32Operand index(this, node->child2());
1725 StorageOperand storage(this, node->child3());
1727 GPRReg stringReg = string.gpr();
1728 GPRReg indexReg = index.gpr();
1729 GPRReg storageReg = storage.gpr();
1731 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1733 // unsigned comparison so we can filter out negative indices and indices that are too large
1734 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1736 GPRTemporary scratch(this);
1737 GPRReg scratchReg = scratch.gpr();
1739 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1741 // Load the character into scratchReg
1742 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1744 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1745 JITCompiler::Jump cont8Bit = m_jit.jump();
1747 is16Bit.link(&m_jit);
1749 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1751 cont8Bit.link(&m_jit);
1753 int32Result(scratchReg, m_currentNode);
1756 void SpeculativeJIT::compileGetByValOnString(Node* node)
1758 SpeculateCellOperand base(this, node->child1());
1759 SpeculateStrictInt32Operand property(this, node->child2());
1760 StorageOperand storage(this, node->child3());
1761 GPRReg baseReg = base.gpr();
1762 GPRReg propertyReg = property.gpr();
1763 GPRReg storageReg = storage.gpr();
1765 GPRTemporary scratch(this);
1766 GPRReg scratchReg = scratch.gpr();
1767 #if USE(JSVALUE32_64)
1768 GPRTemporary resultTag;
1769 GPRReg resultTagReg = InvalidGPRReg;
1770 if (node->arrayMode().isOutOfBounds()) {
1771 GPRTemporary realResultTag(this);
1772 resultTag.adopt(realResultTag);
1773 resultTagReg = resultTag.gpr();
1777 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1779 // unsigned comparison so we can filter out negative indices and indices that are too large
1780 JITCompiler::Jump outOfBounds = m_jit.branch32(
1781 MacroAssembler::AboveOrEqual, propertyReg,
1782 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1783 if (node->arrayMode().isInBounds())
1784 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1786 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1788 // Load the character into scratchReg
1789 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1791 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1792 JITCompiler::Jump cont8Bit = m_jit.jump();
1794 is16Bit.link(&m_jit);
1796 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1798 JITCompiler::Jump bigCharacter =
1799 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1801 // 8 bit string values don't need the isASCII check.
1802 cont8Bit.link(&m_jit);
1804 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1805 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1806 m_jit.loadPtr(scratchReg, scratchReg);
1808 addSlowPathGenerator(
1810 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1812 if (node->arrayMode().isOutOfBounds()) {
1813 #if USE(JSVALUE32_64)
1814 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1817 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1818 if (globalObject->stringPrototypeChainIsSane()) {
1819 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1820 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1821 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1822 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1823 // indexed properties either.
1824 // https://bugs.webkit.org/show_bug.cgi?id=144668
1825 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1826 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1829 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1830 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1832 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1833 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1834 baseReg, propertyReg));
1838 addSlowPathGenerator(
1840 outOfBounds, this, operationGetByValStringInt,
1841 scratchReg, baseReg, propertyReg));
1843 addSlowPathGenerator(
1845 outOfBounds, this, operationGetByValStringInt,
1846 resultTagReg, scratchReg, baseReg, propertyReg));
1851 jsValueResult(scratchReg, m_currentNode);
1853 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1856 cellResult(scratchReg, m_currentNode);
1859 void SpeculativeJIT::compileFromCharCode(Node* node)
1861 SpeculateStrictInt32Operand property(this, node->child1());
1862 GPRReg propertyReg = property.gpr();
1863 GPRTemporary smallStrings(this);
1864 GPRTemporary scratch(this);
1865 GPRReg scratchReg = scratch.gpr();
1866 GPRReg smallStringsReg = smallStrings.gpr();
1868 JITCompiler::JumpList slowCases;
1869 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1870 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1871 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1873 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1874 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1875 cellResult(scratchReg, m_currentNode);
1878 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1880 VirtualRegister virtualRegister = node->virtualRegister();
1881 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1883 switch (info.registerFormat()) {
1884 case DataFormatStorage:
1885 RELEASE_ASSERT_NOT_REACHED();
1887 case DataFormatBoolean:
1888 case DataFormatCell:
1889 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1890 return GeneratedOperandTypeUnknown;
1892 case DataFormatNone:
1893 case DataFormatJSCell:
1895 case DataFormatJSBoolean:
1896 case DataFormatJSDouble:
1897 return GeneratedOperandJSValue;
1899 case DataFormatJSInt32:
1900 case DataFormatInt32:
1901 return GeneratedOperandInteger;
1904 RELEASE_ASSERT_NOT_REACHED();
1905 return GeneratedOperandTypeUnknown;
1909 void SpeculativeJIT::compileValueToInt32(Node* node)
1911 switch (node->child1().useKind()) {
1914 SpeculateStrictInt52Operand op1(this, node->child1());
1915 GPRTemporary result(this, Reuse, op1);
1916 GPRReg op1GPR = op1.gpr();
1917 GPRReg resultGPR = result.gpr();
1918 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1919 int32Result(resultGPR, node, DataFormatInt32);
1922 #endif // USE(JSVALUE64)
1924 case DoubleRepUse: {
1925 GPRTemporary result(this);
1926 SpeculateDoubleOperand op1(this, node->child1());
1927 FPRReg fpr = op1.fpr();
1928 GPRReg gpr = result.gpr();
1929 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1931 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1933 int32Result(gpr, node);
1939 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1940 case GeneratedOperandInteger: {
1941 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1942 GPRTemporary result(this, Reuse, op1);
1943 m_jit.move(op1.gpr(), result.gpr());
1944 int32Result(result.gpr(), node, op1.format());
1947 case GeneratedOperandJSValue: {
1948 GPRTemporary result(this);
1950 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1952 GPRReg gpr = op1.gpr();
1953 GPRReg resultGpr = result.gpr();
1954 FPRTemporary tempFpr(this);
1955 FPRReg fpr = tempFpr.fpr();
1957 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1958 JITCompiler::JumpList converted;
1960 if (node->child1().useKind() == NumberUse) {
1962 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1964 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1966 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1969 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1971 // It's not a cell: so true turns into 1 and all else turns into 0.
1972 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1973 converted.append(m_jit.jump());
1975 isNumber.link(&m_jit);
1978 // First, if we get here we have a double encoded as a JSValue
1979 m_jit.move(gpr, resultGpr);
1980 unboxDouble(resultGpr, fpr);
1982 silentSpillAllRegisters(resultGpr);
1983 callOperation(toInt32, resultGpr, fpr);
1984 silentFillAllRegisters(resultGpr);
1986 converted.append(m_jit.jump());
1988 isInteger.link(&m_jit);
1989 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1991 converted.link(&m_jit);
1993 Node* childNode = node->child1().node();
1994 VirtualRegister virtualRegister = childNode->virtualRegister();
1995 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1997 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1999 GPRReg payloadGPR = op1.payloadGPR();
2000 GPRReg resultGpr = result.gpr();
2002 JITCompiler::JumpList converted;
2004 if (info.registerFormat() == DataFormatJSInt32)
2005 m_jit.move(payloadGPR, resultGpr);
2007 GPRReg tagGPR = op1.tagGPR();
2008 FPRTemporary tempFpr(this);
2009 FPRReg fpr = tempFpr.fpr();
2010 FPRTemporary scratch(this);
2012 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2014 if (node->child1().useKind() == NumberUse) {
2016 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2018 MacroAssembler::AboveOrEqual, tagGPR,
2019 TrustedImm32(JSValue::LowestTag)));
2021 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2024 op1.jsValueRegs(), node->child1(), ~SpecCell,
2025 m_jit.branchIfCell(op1.jsValueRegs()));
2027 // It's not a cell: so true turns into 1 and all else turns into 0.
2028 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2029 m_jit.move(TrustedImm32(0), resultGpr);
2030 converted.append(m_jit.jump());
2032 isBoolean.link(&m_jit);
2033 m_jit.move(payloadGPR, resultGpr);
2034 converted.append(m_jit.jump());
2036 isNumber.link(&m_jit);
2039 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2041 silentSpillAllRegisters(resultGpr);
2042 callOperation(toInt32, resultGpr, fpr);
2043 silentFillAllRegisters(resultGpr);
2045 converted.append(m_jit.jump());
2047 isInteger.link(&m_jit);
2048 m_jit.move(payloadGPR, resultGpr);
2050 converted.link(&m_jit);
2053 int32Result(resultGpr, node);
2056 case GeneratedOperandTypeUnknown:
2057 RELEASE_ASSERT(!m_compileOkay);
2060 RELEASE_ASSERT_NOT_REACHED();
2065 ASSERT(!m_compileOkay);
2070 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2072 if (doesOverflow(node->arithMode())) {
2073 // We know that this sometimes produces doubles. So produce a double every
2074 // time. This at least allows subsequent code to not have weird conditionals.
2076 SpeculateInt32Operand op1(this, node->child1());
2077 FPRTemporary result(this);
2079 GPRReg inputGPR = op1.gpr();
2080 FPRReg outputFPR = result.fpr();
2082 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2084 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2085 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2086 positive.link(&m_jit);
2088 doubleResult(outputFPR, node);
2092 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2094 SpeculateInt32Operand op1(this, node->child1());
2095 GPRTemporary result(this);
2097 m_jit.move(op1.gpr(), result.gpr());
2099 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2101 int32Result(result.gpr(), node, op1.format());
2104 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2106 SpeculateDoubleOperand op1(this, node->child1());
2107 FPRTemporary scratch(this);
2108 GPRTemporary result(this);
2110 FPRReg valueFPR = op1.fpr();
2111 FPRReg scratchFPR = scratch.fpr();
2112 GPRReg resultGPR = result.gpr();
2114 JITCompiler::JumpList failureCases;
2115 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2116 m_jit.branchConvertDoubleToInt32(
2117 valueFPR, resultGPR, failureCases, scratchFPR,
2118 shouldCheckNegativeZero(node->arithMode()));
2119 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2121 int32Result(resultGPR, node);
2124 void SpeculativeJIT::compileDoubleRep(Node* node)
2126 switch (node->child1().useKind()) {
2127 case RealNumberUse: {
2128 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2129 FPRTemporary result(this);
2131 JSValueRegs op1Regs = op1.jsValueRegs();
2132 FPRReg resultFPR = result.fpr();
2135 GPRTemporary temp(this);
2136 GPRReg tempGPR = temp.gpr();
2137 m_jit.move(op1Regs.gpr(), tempGPR);
2138 m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2140 FPRTemporary temp(this);
2141 FPRReg tempFPR = temp.fpr();
2142 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2145 JITCompiler::Jump done = m_jit.branchDouble(
2146 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2149 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2150 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2154 doubleResult(resultFPR, node);
2160 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2162 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2163 if (isInt32Speculation(possibleTypes)) {
2164 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2165 FPRTemporary result(this);
2166 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2167 doubleResult(result.fpr(), node);
2171 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2172 FPRTemporary result(this);
2175 GPRTemporary temp(this);
2177 GPRReg op1GPR = op1.gpr();
2178 GPRReg tempGPR = temp.gpr();
2179 FPRReg resultFPR = result.fpr();
2180 JITCompiler::JumpList done;
2182 JITCompiler::Jump isInteger = m_jit.branch64(
2183 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2185 if (node->child1().useKind() == NotCellUse) {
2186 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2187 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2189 static const double zero = 0;
2190 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2192 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2193 done.append(isNull);
2195 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2196 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2198 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2199 static const double one = 1;
2200 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2201 done.append(isFalse);
2203 isUndefined.link(&m_jit);
2204 static const double NaN = PNaN;
2205 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2206 done.append(m_jit.jump());
2208 isNumber.link(&m_jit);
2209 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2211 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2212 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2215 m_jit.move(op1GPR, tempGPR);
2216 unboxDouble(tempGPR, resultFPR);
2217 done.append(m_jit.jump());
2219 isInteger.link(&m_jit);
2220 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2222 #else // USE(JSVALUE64) -> this is the 32_64 case
2223 FPRTemporary temp(this);
2225 GPRReg op1TagGPR = op1.tagGPR();
2226 GPRReg op1PayloadGPR = op1.payloadGPR();
2227 FPRReg tempFPR = temp.fpr();
2228 FPRReg resultFPR = result.fpr();
2229 JITCompiler::JumpList done;
2231 JITCompiler::Jump isInteger = m_jit.branch32(
2232 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2234 if (node->child1().useKind() == NotCellUse) {
2235 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2236 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2238 static const double zero = 0;
2239 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2241 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2242 done.append(isNull);
2244 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2246 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2247 static const double one = 1;
2248 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2249 done.append(isFalse);
2251 isUndefined.link(&m_jit);
2252 static const double NaN = PNaN;
2253 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2254 done.append(m_jit.jump());
2256 isNumber.link(&m_jit);
2257 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2259 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2260 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2263 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2264 done.append(m_jit.jump());
2266 isInteger.link(&m_jit);
2267 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2269 #endif // USE(JSVALUE64)
2271 doubleResult(resultFPR, node);
2277 SpeculateStrictInt52Operand value(this, node->child1());
2278 FPRTemporary result(this);
2280 GPRReg valueGPR = value.gpr();
2281 FPRReg resultFPR = result.fpr();
2283 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2285 doubleResult(resultFPR, node);
2288 #endif // USE(JSVALUE64)
2291 RELEASE_ASSERT_NOT_REACHED();
2296 void SpeculativeJIT::compileValueRep(Node* node)
2298 switch (node->child1().useKind()) {
2299 case DoubleRepUse: {
2300 SpeculateDoubleOperand value(this, node->child1());
2301 JSValueRegsTemporary result(this);
2303 FPRReg valueFPR = value.fpr();
2304 JSValueRegs resultRegs = result.regs();
2306 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2307 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2308 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2309 // local was purified.
2310 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2311 m_jit.purifyNaN(valueFPR);
2313 boxDouble(valueFPR, resultRegs);
2315 jsValueResult(resultRegs, node);
2321 SpeculateStrictInt52Operand value(this, node->child1());
2322 GPRTemporary result(this);
2324 GPRReg valueGPR = value.gpr();
2325 GPRReg resultGPR = result.gpr();
2327 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2329 jsValueResult(resultGPR, node);
2332 #endif // USE(JSVALUE64)
2335 RELEASE_ASSERT_NOT_REACHED();
2340 static double clampDoubleToByte(double d)
2350 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2352 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2353 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2354 jit.xorPtr(result, result);
2355 MacroAssembler::Jump clamped = jit.jump();
2357 jit.move(JITCompiler::TrustedImm32(255), result);
2359 inBounds.link(&jit);
2362 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2364 // Unordered compare so we pick up NaN
2365 static const double zero = 0;
2366 static const double byteMax = 255;
2367 static const double half = 0.5;
2368 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2369 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2370 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2371 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2373 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2374 // FIXME: This should probably just use a floating point round!
2375 // https://bugs.webkit.org/show_bug.cgi?id=72054
2376 jit.addDouble(source, scratch);
2377 jit.truncateDoubleToInt32(scratch, result);
2378 MacroAssembler::Jump truncatedInt = jit.jump();
2380 tooSmall.link(&jit);
2381 jit.xorPtr(result, result);
2382 MacroAssembler::Jump zeroed = jit.jump();
2385 jit.move(JITCompiler::TrustedImm32(255), result);
2387 truncatedInt.link(&jit);
2392 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2394 if (node->op() == PutByValAlias)
2395 return JITCompiler::Jump();
2396 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2397 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2399 uint32_t length = view->length();
2400 Node* indexNode = m_jit.graph().child(node, 1).node();
2401 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2402 return JITCompiler::Jump();
2403 return m_jit.branch32(
2404 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2406 return m_jit.branch32(
2407 MacroAssembler::AboveOrEqual, indexGPR,
2408 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2411 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2413 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2416 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2419 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2421 ASSERT(isInt(type));
2423 SpeculateCellOperand base(this, node->child1());
2424 SpeculateStrictInt32Operand property(this, node->child2());
2425 StorageOperand storage(this, node->child3());
2427 GPRReg baseReg = base.gpr();
2428 GPRReg propertyReg = property.gpr();
2429 GPRReg storageReg = storage.gpr();
2431 GPRTemporary result(this);
2432 GPRReg resultReg = result.gpr();
2434 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2436 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2437 switch (elementSize(type)) {
2440 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2442 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2446 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2448 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2451 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2456 if (elementSize(type) < 4 || isSigned(type)) {
2457 int32Result(resultReg, node);
2461 ASSERT(elementSize(type) == 4 && !isSigned(type));
2462 if (node->shouldSpeculateInt32()) {
2463 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2464 int32Result(resultReg, node);
2469 if (node->shouldSpeculateMachineInt()) {
2470 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2471 strictInt52Result(resultReg, node);
2476 FPRTemporary fresult(this);
2477 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2478 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2479 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2480 positive.link(&m_jit);
2481 doubleResult(fresult.fpr(), node);
2484 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2486 ASSERT(isInt(type));
2488 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2489 GPRReg storageReg = storage.gpr();
2491 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2494 GPRReg valueGPR = InvalidGPRReg;
2496 if (valueUse->isConstant()) {
2497 JSValue jsValue = valueUse->asJSValue();
2498 if (!jsValue.isNumber()) {
2499 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2503 double d = jsValue.asNumber();
2504 if (isClamped(type)) {
2505 ASSERT(elementSize(type) == 1);
2506 d = clampDoubleToByte(d);
2508 GPRTemporary scratch(this);
2509 GPRReg scratchReg = scratch.gpr();
2510 m_jit.move(Imm32(toInt32(d)), scratchReg);
2511 value.adopt(scratch);
2512 valueGPR = scratchReg;
2514 switch (valueUse.useKind()) {
2516 SpeculateInt32Operand valueOp(this, valueUse);
2517 GPRTemporary scratch(this);
2518 GPRReg scratchReg = scratch.gpr();
2519 m_jit.move(valueOp.gpr(), scratchReg);
2520 if (isClamped(type)) {
2521 ASSERT(elementSize(type) == 1);
2522 compileClampIntegerToByte(m_jit, scratchReg);
2524 value.adopt(scratch);
2525 valueGPR = scratchReg;
2531 SpeculateStrictInt52Operand valueOp(this, valueUse);
2532 GPRTemporary scratch(this);
2533 GPRReg scratchReg = scratch.gpr();
2534 m_jit.move(valueOp.gpr(), scratchReg);
2535 if (isClamped(type)) {
2536 ASSERT(elementSize(type) == 1);
2537 MacroAssembler::Jump inBounds = m_jit.branch64(
2538 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2539 MacroAssembler::Jump tooBig = m_jit.branch64(
2540 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2541 m_jit.move(TrustedImm32(0), scratchReg);
2542 MacroAssembler::Jump clamped = m_jit.jump();
2543 tooBig.link(&m_jit);
2544 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2545 clamped.link(&m_jit);
2546 inBounds.link(&m_jit);
2548 value.adopt(scratch);
2549 valueGPR = scratchReg;
2552 #endif // USE(JSVALUE64)
2554 case DoubleRepUse: {
2555 if (isClamped(type)) {
2556 ASSERT(elementSize(type) == 1);
2557 SpeculateDoubleOperand valueOp(this, valueUse);
2558 GPRTemporary result(this);
2559 FPRTemporary floatScratch(this);
2560 FPRReg fpr = valueOp.fpr();
2561 GPRReg gpr = result.gpr();
2562 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2563 value.adopt(result);
2566 SpeculateDoubleOperand valueOp(this, valueUse);
2567 GPRTemporary result(this);
2568 FPRReg fpr = valueOp.fpr();
2569 GPRReg gpr = result.gpr();
2570 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2571 m_jit.xorPtr(gpr, gpr);
2572 MacroAssembler::Jump fixed = m_jit.jump();
2573 notNaN.link(&m_jit);
2575 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2576 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2578 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2581 value.adopt(result);
2588 RELEASE_ASSERT_NOT_REACHED();
2593 ASSERT_UNUSED(valueGPR, valueGPR != property);
2594 ASSERT(valueGPR != base);
2595 ASSERT(valueGPR != storageReg);
2596 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2597 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2598 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2599 outOfBounds = MacroAssembler::Jump();
2602 switch (elementSize(type)) {
2604 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2607 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2610 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2615 if (outOfBounds.isSet())
2616 outOfBounds.link(&m_jit);
2620 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2622 ASSERT(isFloat(type));
2624 SpeculateCellOperand base(this, node->child1());
2625 SpeculateStrictInt32Operand property(this, node->child2());
2626 StorageOperand storage(this, node->child3());
2628 GPRReg baseReg = base.gpr();
2629 GPRReg propertyReg = property.gpr();
2630 GPRReg storageReg = storage.gpr();
2632 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2634 FPRTemporary result(this);
2635 FPRReg resultReg = result.fpr();
2636 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2637 switch (elementSize(type)) {
2639 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2640 m_jit.convertFloatToDouble(resultReg, resultReg);
2643 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2647 RELEASE_ASSERT_NOT_REACHED();
2650 doubleResult(resultReg, node);
2653 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2655 ASSERT(isFloat(type));
2657 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2658 GPRReg storageReg = storage.gpr();
2660 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2661 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2663 SpeculateDoubleOperand valueOp(this, valueUse);
2664 FPRTemporary scratch(this);
2665 FPRReg valueFPR = valueOp.fpr();
2666 FPRReg scratchFPR = scratch.fpr();
2668 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2670 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2671 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2672 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2673 outOfBounds = MacroAssembler::Jump();
2676 switch (elementSize(type)) {
2678 m_jit.moveDouble(valueFPR, scratchFPR);
2679 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2680 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2684 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2687 RELEASE_ASSERT_NOT_REACHED();
2689 if (outOfBounds.isSet())
2690 outOfBounds.link(&m_jit);
2694 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2696 // Check that prototype is an object.
2697 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2699 // Initialize scratchReg with the value being checked.
2700 m_jit.move(valueReg, scratchReg);
2702 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2703 MacroAssembler::Label loop(&m_jit);
2704 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2705 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2706 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2708 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2710 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2713 // No match - result is false.
2715 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2717 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2719 MacroAssembler::Jump putResult = m_jit.jump();
2721 isInstance.link(&m_jit);
2723 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2725 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2728 putResult.link(&m_jit);
2731 void SpeculativeJIT::compileInstanceOf(Node* node)
2733 if (node->child1().useKind() == UntypedUse) {
2734 // It might not be a cell. Speculate less aggressively.
2735 // Or: it might only be used once (i.e. by us), so we get zero benefit
2736 // from speculating any more aggressively than we absolutely need to.
2738 JSValueOperand value(this, node->child1());
2739 SpeculateCellOperand prototype(this, node->child2());
2740 GPRTemporary scratch(this);
2741 GPRTemporary scratch2(this);
2743 GPRReg prototypeReg = prototype.gpr();
2744 GPRReg scratchReg = scratch.gpr();
2745 GPRReg scratch2Reg = scratch2.gpr();
2747 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2748 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2749 moveFalseTo(scratchReg);
2751 MacroAssembler::Jump done = m_jit.jump();
2753 isCell.link(&m_jit);
2755 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2759 blessedBooleanResult(scratchReg, node);
2763 SpeculateCellOperand value(this, node->child1());
2764 SpeculateCellOperand prototype(this, node->child2());
2766 GPRTemporary scratch(this);
2767 GPRTemporary scratch2(this);
2769 GPRReg valueReg = value.gpr();
2770 GPRReg prototypeReg = prototype.gpr();
2771 GPRReg scratchReg = scratch.gpr();
2772 GPRReg scratch2Reg = scratch2.gpr();
2774 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2776 blessedBooleanResult(scratchReg, node);
2779 void SpeculativeJIT::compileAdd(Node* node)
2781 switch (node->binaryUseKind()) {
2783 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2785 if (node->child1()->isInt32Constant()) {
2786 int32_t imm1 = node->child1()->asInt32();
2787 SpeculateInt32Operand op2(this, node->child2());
2788 GPRTemporary result(this);
2790 if (!shouldCheckOverflow(node->arithMode())) {
2791 m_jit.move(op2.gpr(), result.gpr());
2792 m_jit.add32(Imm32(imm1), result.gpr());
2794 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2796 int32Result(result.gpr(), node);
2800 if (node->child2()->isInt32Constant()) {
2801 SpeculateInt32Operand op1(this, node->child1());
2802 int32_t imm2 = node->child2()->asInt32();
2803 GPRTemporary result(this);
2805 if (!shouldCheckOverflow(node->arithMode())) {
2806 m_jit.move(op1.gpr(), result.gpr());
2807 m_jit.add32(Imm32(imm2), result.gpr());
2809 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2811 int32Result(result.gpr(), node);
2815 SpeculateInt32Operand op1(this, node->child1());
2816 SpeculateInt32Operand op2(this, node->child2());
2817 GPRTemporary result(this, Reuse, op1, op2);
2819 GPRReg gpr1 = op1.gpr();
2820 GPRReg gpr2 = op2.gpr();
2821 GPRReg gprResult = result.gpr();
2823 if (!shouldCheckOverflow(node->arithMode())) {
2824 if (gpr1 == gprResult)
2825 m_jit.add32(gpr2, gprResult);
2827 m_jit.move(gpr2, gprResult);
2828 m_jit.add32(gpr1, gprResult);
2831 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2833 if (gpr1 == gprResult)
2834 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2835 else if (gpr2 == gprResult)
2836 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2838 speculationCheck(Overflow, JSValueRegs(), 0, check);
2841 int32Result(gprResult, node);
2847 ASSERT(shouldCheckOverflow(node->arithMode()));
2848 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2850 // Will we need an overflow check? If we can prove that neither input can be
2851 // Int52 then the overflow check will not be necessary.
2852 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2853 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2854 SpeculateWhicheverInt52Operand op1(this, node->child1());
2855 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2856 GPRTemporary result(this, Reuse, op1);
2857 m_jit.move(op1.gpr(), result.gpr());
2858 m_jit.add64(op2.gpr(), result.gpr());
2859 int52Result(result.gpr(), node, op1.format());
2863 SpeculateInt52Operand op1(this, node->child1());
2864 SpeculateInt52Operand op2(this, node->child2());
2865 GPRTemporary result(this);
2866 m_jit.move(op1.gpr(), result.gpr());
2868 Int52Overflow, JSValueRegs(), 0,
2869 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2870 int52Result(result.gpr(), node);
2873 #endif // USE(JSVALUE64)
2875 case DoubleRepUse: {
2876 SpeculateDoubleOperand op1(this, node->child1());
2877 SpeculateDoubleOperand op2(this, node->child2());
2878 FPRTemporary result(this, op1, op2);
2880 FPRReg reg1 = op1.fpr();
2881 FPRReg reg2 = op2.fpr();
2882 m_jit.addDouble(reg1, reg2, result.fpr());
2884 doubleResult(result.fpr(), node);
2889 RELEASE_ASSERT_NOT_REACHED();
2894 void SpeculativeJIT::compileMakeRope(Node* node)
2896 ASSERT(node->child1().useKind() == KnownStringUse);
2897 ASSERT(node->child2().useKind() == KnownStringUse);
2898 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2900 SpeculateCellOperand op1(this, node->child1());
2901 SpeculateCellOperand op2(this, node->child2());
2902 SpeculateCellOperand op3(this, node->child3());
2903 GPRTemporary result(this);
2904 GPRTemporary allocator(this);
2905 GPRTemporary scratch(this);
2909 opGPRs[0] = op1.gpr();
2910 opGPRs[1] = op2.gpr();
2911 if (node->child3()) {
2912 opGPRs[2] = op3.gpr();
2915 opGPRs[2] = InvalidGPRReg;
2918 GPRReg resultGPR = result.gpr();
2919 GPRReg allocatorGPR = allocator.gpr();
2920 GPRReg scratchGPR = scratch.gpr();
2922 JITCompiler::JumpList slowPath;
2923 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2924 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2925 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2927 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2928 for (unsigned i = 0; i < numOpGPRs; ++i)
2929 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2930 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2931 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2932 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2933 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2934 if (!ASSERT_DISABLED) {
2935 JITCompiler::Jump ok = m_jit.branch32(
2936 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2937 m_jit.abortWithReason(DFGNegativeStringLength);
2940 for (unsigned i = 1; i < numOpGPRs; ++i) {
2941 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2943 Uncountable, JSValueSource(), nullptr,
2945 JITCompiler::Overflow,
2946 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2948 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2949 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2950 if (!ASSERT_DISABLED) {
2951 JITCompiler::Jump ok = m_jit.branch32(
2952 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2953 m_jit.abortWithReason(DFGNegativeStringLength);
2956 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2958 switch (numOpGPRs) {
2960 addSlowPathGenerator(slowPathCall(
2961 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2964 addSlowPathGenerator(slowPathCall(
2965 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2968 RELEASE_ASSERT_NOT_REACHED();
2972 cellResult(resultGPR, node);
2975 void SpeculativeJIT::compileArithClz32(Node* node)
2977 ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2978 SpeculateInt32Operand value(this, node->child1());
2979 GPRTemporary result(this, Reuse, value);
2980 GPRReg valueReg = value.gpr();
2981 GPRReg resultReg = result.gpr();
2982 m_jit.countLeadingZeros32(valueReg, resultReg);
2983 int32Result(resultReg, node);
2986 void SpeculativeJIT::compileArithSub(Node* node)
2988 switch (node->binaryUseKind()) {
2990 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2992 if (node->child2()->isInt32Constant()) {
2993 SpeculateInt32Operand op1(this, node->child1());
2994 int32_t imm2 = node->child2()->asInt32();
2995 GPRTemporary result(this);
2997 if (!shouldCheckOverflow(node->arithMode())) {
2998 m_jit.move(op1.gpr(), result.gpr());
2999 m_jit.sub32(Imm32(imm2), result.gpr());
3001 GPRTemporary scratch(this);
3002 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3005 int32Result(result.gpr(), node);
3009 if (node->child1()->isInt32Constant()) {
3010 int32_t imm1 = node->child1()->asInt32();
3011 SpeculateInt32Operand op2(this, node->child2());
3012 GPRTemporary result(this);
3014 m_jit.move(Imm32(imm1), result.gpr());
3015 if (!shouldCheckOverflow(node->arithMode()))
3016 m_jit.sub32(op2.gpr(), result.gpr());
3018 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3020 int32Result(result.gpr(), node);
3024 SpeculateInt32Operand op1(this, node->child1());
3025 SpeculateInt32Operand op2(this, node->child2());
3026 GPRTemporary result(this);
3028 if (!shouldCheckOverflow(node->arithMode())) {
3029 m_jit.move(op1.gpr(), result.gpr());
3030 m_jit.sub32(op2.gpr(), result.gpr());
3032 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3034 int32Result(result.gpr(), node);
3040 ASSERT(shouldCheckOverflow(node->arithMode()));
3041 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3043 // Will we need an overflow check? If we can prove that neither input can be
3044 // Int52 then the overflow check will not be necessary.
3045 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3046 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3047 SpeculateWhicheverInt52Operand op1(this, node->child1());
3048 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3049 GPRTemporary result(this, Reuse, op1);
3050 m_jit.move(op1.gpr(), result.gpr());
3051 m_jit.sub64(op2.gpr(), result.gpr());
3052 int52Result(result.gpr(), node, op1.format());
3056 SpeculateInt52Operand op1(this, node->child1());
3057 SpeculateInt52Operand op2(this, node->child2());
3058 GPRTemporary result(this);
3059 m_jit.move(op1.gpr(), result.gpr());
3061 Int52Overflow, JSValueRegs(), 0,
3062 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3063 int52Result(result.gpr(), node);
3066 #endif // USE(JSVALUE64)
3068 case DoubleRepUse: {
3069 SpeculateDoubleOperand op1(this, node->child1());
3070 SpeculateDoubleOperand op2(this, node->child2());
3071 FPRTemporary result(this, op1);
3073 FPRReg reg1 = op1.fpr();
3074 FPRReg reg2 = op2.fpr();
3075 m_jit.subDouble(reg1, reg2, result.fpr());
3077 doubleResult(result.fpr(), node);
3082 RELEASE_ASSERT_NOT_REACHED();
3087 void SpeculativeJIT::compileArithNegate(Node* node)
3089 switch (node->child1().useKind()) {
3091 SpeculateInt32Operand op1(this, node->child1());
3092 GPRTemporary result(this);
3094 m_jit.move(op1.gpr(), result.gpr());
3096 // Note: there is no notion of being not used as a number, but someone
3097 // caring about negative zero.
3099 if (!shouldCheckOverflow(node->arithMode()))
3100 m_jit.neg32(result.gpr());
3101 else if (!shouldCheckNegativeZero(node->arithMode()))
3102 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3104 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3105 m_jit.neg32(result.gpr());
3108 int32Result(result.gpr(), node);
3114 ASSERT(shouldCheckOverflow(node->arithMode()));
3116 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3117 SpeculateWhicheverInt52Operand op1(this, node->child1());
3118 GPRTemporary result(this);
3119 GPRReg op1GPR = op1.gpr();
3120 GPRReg resultGPR = result.gpr();
3121 m_jit.move(op1GPR, resultGPR);
3122 m_jit.neg64(resultGPR);
3123 if (shouldCheckNegativeZero(node->arithMode())) {
3125 NegativeZero, JSValueRegs(), 0,
3126 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3128 int52Result(resultGPR, node, op1.format());
3132 SpeculateInt52Operand op1(this, node->child1());
3133 GPRTemporary result(this);
3134 GPRReg op1GPR = op1.gpr();
3135 GPRReg resultGPR = result.gpr();
3136 m_jit.move(op1GPR, resultGPR);
3138 Int52Overflow, JSValueRegs(), 0,
3139 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3140 if (shouldCheckNegativeZero(node->arithMode())) {
3142 NegativeZero, JSValueRegs(), 0,
3143 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3145 int52Result(resultGPR, node);
3148 #endif // USE(JSVALUE64)
3150 case DoubleRepUse: {
3151 SpeculateDoubleOperand op1(this, node->child1());
3152 FPRTemporary result(this);
3154 m_jit.negateDouble(op1.fpr(), result.fpr());
3156 doubleResult(result.fpr(), node);
3161 RELEASE_ASSERT_NOT_REACHED();
3165 void SpeculativeJIT::compileArithMul(Node* node)
3167 switch (node->binaryUseKind()) {
3169 SpeculateInt32Operand op1(this, node->child1());
3170 SpeculateInt32Operand op2(this, node->child2());
3171 GPRTemporary result(this);
3173 GPRReg reg1 = op1.gpr();
3174 GPRReg reg2 = op2.gpr();
3176 // We can perform truncated multiplications if we get to this point, because if the
3177 // fixup phase could not prove that it would be safe, it would have turned us into
3178 // a double multiplication.
3179 if (!shouldCheckOverflow(node->arithMode())) {
3180 m_jit.move(reg1, result.gpr());
3181 m_jit.mul32(reg2, result.gpr());
3184 Overflow, JSValueRegs(), 0,
3185 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3188 // Check for negative zero, if the users of this node care about such things.
3189 if (shouldCheckNegativeZero(node->arithMode())) {
3190 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3191 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3192 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3193 resultNonZero.link(&m_jit);
3196 int32Result(result.gpr(), node);
3202 ASSERT(shouldCheckOverflow(node->arithMode()));
3204 // This is super clever. We want to do an int52 multiplication and check the
3205 // int52 overflow bit. There is no direct hardware support for this, but we do
3206 // have the ability to do an int64 multiplication and check the int64 overflow
3207 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3208 // registers, with the high 12 bits being sign-extended. We can do:
3212 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3213 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3214 // multiplication overflows is identical to whether the 'a * b' 52-bit
3215 // multiplication overflows.
3217 // In our nomenclature, this is:
3219 // strictInt52(a) * int52(b) => int52
3221 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3224 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3225 // we just do whatever is more convenient for op1 and have op2 do the
3226 // opposite. This ensures that we do at most one shift.
3228 SpeculateWhicheverInt52Operand op1(this, node->child1());
3229 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3230 GPRTemporary result(this);
3232 GPRReg op1GPR = op1.gpr();
3233 GPRReg op2GPR = op2.gpr();
3234 GPRReg resultGPR = result.gpr();
3236 m_jit.move(op1GPR, resultGPR);
3238 Int52Overflow, JSValueRegs(), 0,
3239 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3241 if (shouldCheckNegativeZero(node->arithMode())) {
3242 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3243 MacroAssembler::NonZero, resultGPR);
3245 NegativeZero, JSValueRegs(), 0,
3246 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3248 NegativeZero, JSValueRegs(), 0,
3249 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3250 resultNonZero.link(&m_jit);
3253 int52Result(resultGPR, node);
3256 #endif // USE(JSVALUE64)
3258 case DoubleRepUse: {
3259 SpeculateDoubleOperand op1(this, node->child1());
3260 SpeculateDoubleOperand op2(this, node->child2());
3261 FPRTemporary result(this, op1, op2);
3263 FPRReg reg1 = op1.fpr();
3264 FPRReg reg2 = op2.fpr();
3266 m_jit.mulDouble(reg1, reg2, result.fpr());
3268 doubleResult(result.fpr(), node);
3273 RELEASE_ASSERT_NOT_REACHED();
3278 void SpeculativeJIT::compileArithDiv(Node* node)
3280 switch (node->binaryUseKind()) {
3282 #if CPU(X86) || CPU(X86_64)
3283 SpeculateInt32Operand op1(this, node->child1());
3284 SpeculateInt32Operand op2(this, node->child2());
3285 GPRTemporary eax(this, X86Registers::eax);
3286 GPRTemporary edx(this, X86Registers::edx);
3287 GPRReg op1GPR = op1.gpr();
3288 GPRReg op2GPR = op2.gpr();
3292 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3293 op2TempGPR = allocate();
3296 op2TempGPR = InvalidGPRReg;
3297 if (op1GPR == X86Registers::eax)
3298 temp = X86Registers::edx;
3300 temp = X86Registers::eax;
3303 ASSERT(temp != op1GPR);
3304 ASSERT(temp != op2GPR);
3306 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3308 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3310 JITCompiler::JumpList done;
3311 if (shouldCheckOverflow(node->arithMode())) {
3312 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3313 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3315 // This is the case where we convert the result to an int after we're done, and we
3316 // already know that the denominator is either -1 or 0. So, if the denominator is
3317 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3318 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3319 // are happy to fall through to a normal division, since we're just dividing
3320 // something by negative 1.
3322 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3323 m_jit.move(TrustedImm32(0), eax.gpr());
3324 done.append(m_jit.jump());
3326 notZero.link(&m_jit);
3327 JITCompiler::Jump notNeg2ToThe31 =
3328 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3329 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3330 done.append(m_jit.jump());
3332 notNeg2ToThe31.link(&m_jit);
3335 safeDenominator.link(&m_jit);
3337 // If the user cares about negative zero, then speculate that we're not about
3338 // to produce negative zero.
3339 if (shouldCheckNegativeZero(node->arithMode())) {
3340 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3341 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3342 numeratorNonZero.link(&m_jit);
3345 if (op2TempGPR != InvalidGPRReg) {
3346 m_jit.move(op2GPR, op2TempGPR);
3347 op2GPR = op2TempGPR;
3350 m_jit.move(op1GPR, eax.gpr());
3351 m_jit.assembler().cdq();
3352 m_jit.assembler().idivl_r(op2GPR);
3354 if (op2TempGPR != InvalidGPRReg)
3357 // Check that there was no remainder. If there had been, then we'd be obligated to
3358 // produce a double result instead.
3359 if (shouldCheckOverflow(node->arithMode()))
3360 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3363 int32Result(eax.gpr(), node);
3364 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3365 SpeculateInt32Operand op1(this, node->child1());
3366 SpeculateInt32Operand op2(this, node->child2());
3367 GPRReg op1GPR = op1.gpr();
3368 GPRReg op2GPR = op2.gpr();
3369 GPRTemporary quotient(this);
3370 GPRTemporary multiplyAnswer(this);
3372 // If the user cares about negative zero, then speculate that we're not about
3373 // to produce negative zero.
3374 if (shouldCheckNegativeZero(node->arithMode())) {
3375 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3376 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3377 numeratorNonZero.link(&m_jit);
3380 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3382 // Check that there was no remainder. If there had been, then we'd be obligated to
3383 // produce a double result instead.
3384 if (shouldCheckOverflow(node->arithMode())) {
3385 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3386 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3389 int32Result(quotient.gpr(), node);
3391 RELEASE_ASSERT_NOT_REACHED();
3396 case DoubleRepUse: {
3397 SpeculateDoubleOperand op1(this, node->child1());
3398 SpeculateDoubleOperand op2(this, node->child2());
3399 FPRTemporary result(this, op1);
3401 FPRReg reg1 = op1.fpr();
3402 FPRReg reg2 = op2.fpr();
3403 m_jit.divDouble(reg1, reg2, result.fpr());
3405 doubleResult(result.fpr(), node);
3410 RELEASE_ASSERT_NOT_REACHED();
3415 void SpeculativeJIT::compileArithMod(Node* node)
3417 switch (node->binaryUseKind()) {
3419 // In the fast path, the dividend value could be the final result
3420 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3421 SpeculateStrictInt32Operand op1(this, node->child1());
3423 if (node->child2()->isInt32Constant()) {
3424 int32_t divisor = node->child2()->asInt32();
3425 if (divisor > 1 && hasOneBitSet(divisor)) {
3426 unsigned logarithm = WTF::fastLog2(divisor);
3427 GPRReg dividendGPR = op1.gpr();
3428 GPRTemporary result(this);
3429 GPRReg resultGPR = result.gpr();
3431 // This is what LLVM generates. It's pretty crazy. Here's my
3432 // attempt at understanding it.
3434 // First, compute either divisor - 1, or 0, depending on whether
3435 // the dividend is negative:
3437 // If dividend < 0: resultGPR = divisor - 1
3438 // If dividend >= 0: resultGPR = 0
3439 m_jit.move(dividendGPR, resultGPR);
3440 m_jit.rshift32(TrustedImm32(31), resultGPR);
3441 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3443 // Add in the dividend, so that:
3445 // If dividend < 0: resultGPR = dividend + divisor - 1
3446 // If dividend >= 0: resultGPR = dividend
3447 m_jit.add32(dividendGPR, resultGPR);
3449 // Mask so as to only get the *high* bits. This rounds down
3450 // (towards negative infinity) resultGPR to the nearest multiple
3451 // of divisor, so that:
3453 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3454 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3456 // Note that this can be simplified to:
3458 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3459 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3461 // Note that if the dividend is negative, resultGPR will also be negative.
3462 // Regardless of the sign of dividend, resultGPR will be rounded towards
3463 // zero, because of how things are conditionalized.
3464 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3466 // Subtract resultGPR from dividendGPR, which yields the remainder:
3468 // resultGPR = dividendGPR - resultGPR
3469 m_jit.neg32(resultGPR);
3470 m_jit.add32(dividendGPR, resultGPR);
3472 if (shouldCheckNegativeZero(node->arithMode())) {
3473 // Check that we're not about to create negative zero.
3474 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3475 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3476 numeratorPositive.link(&m_jit);
3479 int32Result(resultGPR, node);
3484 #if CPU(X86) || CPU(X86_64)
3485 if (node->child2()->isInt32Constant()) {
3486 int32_t divisor = node->child2()->asInt32();
3487 if (divisor && divisor != -1) {
3488 GPRReg op1Gpr = op1.gpr();
3490 GPRTemporary eax(this, X86Registers::eax);
3491 GPRTemporary edx(this, X86Registers::edx);
3492 GPRTemporary scratch(this);
3493 GPRReg scratchGPR = scratch.gpr();
3496 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3497 op1SaveGPR = allocate();
3498 ASSERT(op1Gpr != op1SaveGPR);
3499 m_jit.move(op1Gpr, op1SaveGPR);
3501 op1SaveGPR = op1Gpr;
3502 ASSERT(op1SaveGPR != X86Registers::eax);
3503 ASSERT(op1SaveGPR != X86Registers::edx);
3505 m_jit.move(op1Gpr, eax.gpr());
3506 m_jit.move(TrustedImm32(divisor), scratchGPR);
3507 m_jit.assembler().cdq();
3508 m_jit.assembler().idivl_r(scratchGPR);
3509 if (shouldCheckNegativeZero(node->arithMode())) {
3510 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3511 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3512 numeratorPositive.link(&m_jit);
3515 if (op1SaveGPR != op1Gpr)
3518 int32Result(edx.gpr(), node);
3524 SpeculateInt32Operand op2(this, node->child2());
3525 #if CPU(X86) || CPU(X86_64)
3526 GPRTemporary eax(this, X86Registers::eax);
3527 GPRTemporary edx(this, X86Registers::edx);
3528 GPRReg op1GPR = op1.gpr();
3529 GPRReg op2GPR = op2.gpr();
3535 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3536 op2TempGPR = allocate();
3539 op2TempGPR = InvalidGPRReg;
3540 if (op1GPR == X86Registers::eax)
3541 temp = X86Registers::edx;
3543 temp = X86Registers::eax;
3546 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3547 op1SaveGPR = allocate();
3548 ASSERT(op1GPR != op1SaveGPR);
3549 m_jit.move(op1GPR, op1SaveGPR);
3551 op1SaveGPR = op1GPR;
3553 ASSERT(temp != op1GPR);
3554 ASSERT(temp != op2GPR);
3555 ASSERT(op1SaveGPR != X86Registers::eax);
3556 ASSERT(op1SaveGPR != X86Registers::edx);
3558 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3560 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3562 JITCompiler::JumpList done;
3564 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3565 // separate case for that. But it probably doesn't matter so much.
3566 if (shouldCheckOverflow(node->arithMode())) {
3567 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3568 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3570 // This is the case where we convert the result to an int after we're done, and we
3571 // already know that the denominator is either -1 or 0. So, if the denominator is
3572 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3573 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3574 // happy to fall through to a normal division, since we're just dividing something
3577 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3578 m_jit.move(TrustedImm32(0), edx.gpr());
3579 done.append(m_jit.jump());
3581 notZero.link(&m_jit);
3582 JITCompiler::Jump notNeg2ToThe31 =
3583 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3584 m_jit.move(TrustedImm32(0), edx.gpr());
3585 done.append(m_jit.jump());
3587 notNeg2ToThe31.link(&m_jit);
3590 safeDenominator.link(&m_jit);
3592 if (op2TempGPR != InvalidGPRReg) {
3593 m_jit.move(op2GPR, op2TempGPR);
3594 op2GPR = op2TempGPR;
3597 m_jit.move(op1GPR, eax.gpr());
3598 m_jit.assembler().cdq();
3599 m_jit.assembler().idivl_r(op2GPR);
3601 if (op2TempGPR != InvalidGPRReg)
3604 // Check that we're not about to create negative zero.
3605 if (shouldCheckNegativeZero(node->arithMode())) {
3606 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3607 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3608 numeratorPositive.link(&m_jit);
3611 if (op1SaveGPR != op1GPR)
3615 int32Result(edx.gpr(), node);
3617 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3618 GPRTemporary temp(this);
3619 GPRTemporary quotientThenRemainder(this);
3620 GPRTemporary multiplyAnswer(this);
3621 GPRReg dividendGPR = op1.gpr();
3622 GPRReg divisorGPR = op2.gpr();
3623 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3624 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3626 JITCompiler::JumpList done;
3628 if (shouldCheckOverflow(node->arithMode()))
3629 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3631 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3632 m_jit.move(divisorGPR, quotientThenRemainderGPR);
3633 done.append(m_jit.jump());
3634 denominatorNotZero.link(&m_jit);
3637 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3638 // FIXME: It seems like there are cases where we don't need this? What if we have
3639 // arithMode() == Arith::Unchecked?
3640 // https://bugs.webkit.org/show_bug.cgi?id=126444
3641 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3642 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3643 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3645 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3648 // If the user cares about negative zero, then speculate that we're not about
3649 // to produce negative zero.
3650 if (shouldCheckNegativeZero(node->arithMode())) {
3651 // Check that we're not about to create negative zero.
3652 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3653 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3654 numeratorPositive.link(&m_jit);
3659 int32Result(quotientThenRemainderGPR, node);
3660 #else // not architecture that can do integer division
3661 RELEASE_ASSERT_NOT_REACHED();
3666 case DoubleRepUse: {
3667 SpeculateDoubleOperand op1(this, node->child1());
3668 SpeculateDoubleOperand op2(this, node->child2());
3670 FPRReg op1FPR = op1.fpr();
3671 FPRReg op2FPR = op2.fpr();
3675 FPRResult result(this);
3677 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3679 doubleResult(result.fpr(), node);
3684 RELEASE_ASSERT_NOT_REACHED();
3689 void SpeculativeJIT::compileArithRound(Node* node)
3691 ASSERT(node->child1().useKind() == DoubleRepUse);
3693 SpeculateDoubleOperand value(this, node->child1());
3694 FPRReg valueFPR = value.fpr();
3696 if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3697 FPRTemporary oneHalf(this);
3698 GPRTemporary roundedResultAsInt32(this);
3699 FPRReg oneHalfFPR = oneHalf.fpr();
3700 GPRReg resultGPR = roundedResultAsInt32.gpr();
3702 static const double halfConstant = 0.5;
3703 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3704 m_jit.addDouble(valueFPR, oneHalfFPR);
3706 JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3707 speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3708 int32Result(resultGPR, node);
3713 FPRResult roundedResultAsDouble(this);
3714 FPRReg resultFPR = roundedResultAsDouble.fpr();
3715 callOperation(jsRound, resultFPR, valueFPR);
3716 m_jit.exceptionCheck();
3717 if (producesInteger(node->arithRoundingMode())) {
3718 GPRTemporary roundedResultAsInt32(this);
3719 FPRTemporary scratch(this);
3720 FPRReg scratchFPR = scratch.fpr();
3721 GPRReg resultGPR = roundedResultAsInt32.gpr();
3722 JITCompiler::JumpList failureCases;
3723 m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3724 speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3726 int32Result(resultGPR, node);
3728 doubleResult(resultFPR, node);
3731 void SpeculativeJIT::compileArithSqrt(Node* node)
3733 SpeculateDoubleOperand op1(this, node->child1());
3734 FPRReg op1FPR = op1.fpr();
3736 if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3738 FPRResult result(this);
3739 callOperation(sqrt, result.fpr(), op1FPR);
3740 doubleResult(result.fpr(), node);
3742 FPRTemporary result(this, op1);
3743 m_jit.sqrtDouble(op1.fpr(), result.fpr());
3744 doubleResult(result.fpr(), node);
3748 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3749 // Every register is clobbered by this helper.
3750 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3752 MacroAssembler::JumpList skipFastPath;
3753 skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3754 skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3756 static const double oneConstant = 1.0;
3757 assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3759 MacroAssembler::Label startLoop(assembler.label());
3760 MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3761 assembler.mulDouble(xOperand, result);
3762 exponentIsEven.link(&assembler);
3763 assembler.mulDouble(xOperand, xOperand);
3764 assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3765 assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3767 MacroAssembler::Jump skipSlowPath = assembler.jump();
3768 skipFastPath.link(&assembler);
3770 return skipSlowPath;
3773 void SpeculativeJIT::compileArithPow(Node* node)
3775 if (node->child2().useKind() == Int32Use) {
3776 SpeculateDoubleOperand xOperand(this, node->child1());
3777 SpeculateInt32Operand yOperand(this, node->child2());
3778 FPRReg xOperandfpr = xOperand.fpr();
3779 GPRReg yOperandGpr = yOperand.gpr();
3780 FPRTemporary yOperandfpr(this);
3784 FPRResult result(this);
3785 FPRReg resultFpr = result.fpr();
3787 FPRTemporary xOperandCopy(this);
3788 FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3789 m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3791 GPRTemporary counter(this);
3792 GPRReg counterGpr = counter.gpr();
3793 m_jit.move(yOperandGpr, counterGpr);
3795 MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, coun