2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMathIC.h"
48 #include "JITMulGenerator.h"
49 #include "JITRightShiftGenerator.h"
50 #include "JITSubGenerator.h"
51 #include "JSCInlines.h"
52 #include "JSEnvironmentRecord.h"
53 #include "JSGeneratorFunction.h"
54 #include "JSLexicalEnvironment.h"
55 #include "LinkBuffer.h"
56 #include "RegExpConstructor.h"
57 #include "ScopedArguments.h"
58 #include "ScratchRegisterAllocator.h"
59 #include "WriteBarrierBuffer.h"
61 #include <wtf/MathExtras.h>
63 namespace JSC { namespace DFG {
65 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
69 , m_lastGeneratedNode(LastNodeType)
71 , m_generationInfo(m_jit.graph().frameRegisterCount())
72 , m_state(m_jit.graph())
73 , m_interpreter(m_jit.graph(), m_state)
74 , m_stream(&jit.jitCode()->variableEventStream)
75 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
79 SpeculativeJIT::~SpeculativeJIT()
83 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
85 IndexingType indexingType = structure->indexingType();
86 bool hasIndexingHeader = hasIndexedProperties(indexingType);
88 unsigned inlineCapacity = structure->inlineCapacity();
89 unsigned outOfLineCapacity = structure->outOfLineCapacity();
91 GPRTemporary scratch(this);
92 GPRTemporary scratch2(this);
93 GPRReg scratchGPR = scratch.gpr();
94 GPRReg scratch2GPR = scratch2.gpr();
96 ASSERT(vectorLength >= numElements);
97 vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
99 JITCompiler::JumpList slowCases;
102 if (hasIndexingHeader)
103 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
104 size += outOfLineCapacity * sizeof(JSValue);
108 emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
109 if (hasIndexingHeader)
110 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
112 m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
114 m_jit.move(TrustedImmPtr(0), storageGPR);
116 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
117 MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
118 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
119 emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
121 if (hasIndexingHeader)
122 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
124 // I want a slow path that also loads out the storage pointer, and that's
125 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
126 // of work for a very small piece of functionality. :-/
127 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
128 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
129 structure, vectorLength));
131 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
133 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
134 for (unsigned i = numElements; i < vectorLength; ++i)
135 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
137 EncodedValueDescriptor value;
138 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
139 for (unsigned i = numElements; i < vectorLength; ++i) {
140 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
141 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
146 if (hasIndexingHeader)
147 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
150 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
152 if (inlineCallFrame && !inlineCallFrame->isVarargs())
153 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
155 VirtualRegister argumentCountRegister;
156 if (!inlineCallFrame)
157 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
159 argumentCountRegister = inlineCallFrame->argumentCountRegister;
160 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
162 m_jit.sub32(TrustedImm32(1), lengthGPR);
166 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
168 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
171 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
173 if (origin.inlineCallFrame) {
174 if (origin.inlineCallFrame->isClosureCall) {
176 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
180 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
184 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
187 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
191 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
192 GPRInfo::callFrameRegister, startGPR);
195 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
197 if (!doOSRExitFuzzing())
198 return MacroAssembler::Jump();
200 MacroAssembler::Jump result;
202 m_jit.pushToSave(GPRInfo::regT0);
203 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
204 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
205 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
206 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
207 unsigned at = Options::fireOSRExitFuzzAt();
208 if (at || atOrAfter) {
210 MacroAssembler::RelationalCondition condition;
212 threshold = atOrAfter;
213 condition = MacroAssembler::Below;
216 condition = MacroAssembler::NotEqual;
218 MacroAssembler::Jump ok = m_jit.branch32(
219 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
220 m_jit.popToRestore(GPRInfo::regT0);
221 result = m_jit.jump();
224 m_jit.popToRestore(GPRInfo::regT0);
229 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
233 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
234 if (fuzzJump.isSet()) {
235 JITCompiler::JumpList jumpsToFail;
236 jumpsToFail.append(fuzzJump);
237 jumpsToFail.append(jumpToFail);
238 m_jit.appendExitInfo(jumpsToFail);
240 m_jit.appendExitInfo(jumpToFail);
241 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
248 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
249 if (fuzzJump.isSet()) {
250 JITCompiler::JumpList myJumpsToFail;
251 myJumpsToFail.append(jumpsToFail);
252 myJumpsToFail.append(fuzzJump);
253 m_jit.appendExitInfo(myJumpsToFail);
255 m_jit.appendExitInfo(jumpsToFail);
256 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
259 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
262 return OSRExitJumpPlaceholder();
263 unsigned index = m_jit.jitCode()->osrExit.size();
264 m_jit.appendExitInfo();
265 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
266 return OSRExitJumpPlaceholder(index);
269 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
271 return speculationCheck(kind, jsValueSource, nodeUse.node());
274 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
276 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
279 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
281 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
284 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
288 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
289 m_jit.appendExitInfo(jumpToFail);
290 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
293 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
295 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
298 void SpeculativeJIT::emitInvalidationPoint(Node* node)
302 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
303 m_jit.jitCode()->appendOSRExit(OSRExit(
304 UncountableInvalidation, JSValueSource(),
305 m_jit.graph().methodOfGettingAValueProfileFor(node),
306 this, m_stream->size()));
307 info.m_replacementSource = m_jit.watchpointLabel();
308 ASSERT(info.m_replacementSource.isSet());
312 void SpeculativeJIT::unreachable(Node* node)
314 m_compileOkay = false;
315 m_jit.abortWithReason(DFGUnreachableNode, node->op());
318 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
322 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
323 m_compileOkay = false;
324 if (verboseCompilationEnabled())
325 dataLog("Bailing compilation.\n");
328 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
330 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
333 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
335 ASSERT(needsTypeCheck(edge, typesPassedThrough));
336 m_interpreter.filter(edge, typesPassedThrough);
337 speculationCheck(exitKind, source, edge.node(), jumpToFail);
340 RegisterSet SpeculativeJIT::usedRegisters()
344 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
345 GPRReg gpr = GPRInfo::toRegister(i);
346 if (m_gprs.isInUse(gpr))
349 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
350 FPRReg fpr = FPRInfo::toRegister(i);
351 if (m_fprs.isInUse(fpr))
355 result.merge(RegisterSet::stubUnavailableRegisters());
360 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
362 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
365 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
367 m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
370 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
372 for (auto& slowPathGenerator : m_slowPathGenerators) {
373 pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
374 slowPathGenerator->generate(this);
376 for (auto& slowPathLambda : m_slowPathLambdas) {
377 Node* currentNode = slowPathLambda.currentNode;
378 m_currentNode = currentNode;
379 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
380 pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
381 slowPathLambda.generator();
382 m_outOfLineStreamIndex = Nullopt;
386 void SpeculativeJIT::clearGenerationInfo()
388 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
389 m_generationInfo[i] = GenerationInfo();
390 m_gprs = RegisterBank<GPRInfo>();
391 m_fprs = RegisterBank<FPRInfo>();
394 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
396 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
397 Node* node = info.node();
398 DataFormat registerFormat = info.registerFormat();
399 ASSERT(registerFormat != DataFormatNone);
400 ASSERT(registerFormat != DataFormatDouble);
402 SilentSpillAction spillAction;
403 SilentFillAction fillAction;
405 if (!info.needsSpill())
406 spillAction = DoNothingForSpill;
409 ASSERT(info.gpr() == source);
410 if (registerFormat == DataFormatInt32)
411 spillAction = Store32Payload;
412 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
413 spillAction = StorePtr;
414 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
415 spillAction = Store64;
417 ASSERT(registerFormat & DataFormatJS);
418 spillAction = Store64;
420 #elif USE(JSVALUE32_64)
421 if (registerFormat & DataFormatJS) {
422 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
423 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
425 ASSERT(info.gpr() == source);
426 spillAction = Store32Payload;
431 if (registerFormat == DataFormatInt32) {
432 ASSERT(info.gpr() == source);
433 ASSERT(isJSInt32(info.registerFormat()));
434 if (node->hasConstant()) {
435 ASSERT(node->isInt32Constant());
436 fillAction = SetInt32Constant;
438 fillAction = Load32Payload;
439 } else if (registerFormat == DataFormatBoolean) {
441 RELEASE_ASSERT_NOT_REACHED();
442 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
443 fillAction = DoNothingForFill;
445 #elif USE(JSVALUE32_64)
446 ASSERT(info.gpr() == source);
447 if (node->hasConstant()) {
448 ASSERT(node->isBooleanConstant());
449 fillAction = SetBooleanConstant;
451 fillAction = Load32Payload;
453 } else if (registerFormat == DataFormatCell) {
454 ASSERT(info.gpr() == source);
455 if (node->hasConstant()) {
456 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
457 node->asCell(); // To get the assertion.
458 fillAction = SetCellConstant;
461 fillAction = LoadPtr;
463 fillAction = Load32Payload;
466 } else if (registerFormat == DataFormatStorage) {
467 ASSERT(info.gpr() == source);
468 fillAction = LoadPtr;
469 } else if (registerFormat == DataFormatInt52) {
470 if (node->hasConstant())
471 fillAction = SetInt52Constant;
472 else if (info.spillFormat() == DataFormatInt52)
474 else if (info.spillFormat() == DataFormatStrictInt52)
475 fillAction = Load64ShiftInt52Left;
476 else if (info.spillFormat() == DataFormatNone)
479 RELEASE_ASSERT_NOT_REACHED();
480 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
481 fillAction = Load64; // Make GCC happy.
484 } else if (registerFormat == DataFormatStrictInt52) {
485 if (node->hasConstant())
486 fillAction = SetStrictInt52Constant;
487 else if (info.spillFormat() == DataFormatInt52)
488 fillAction = Load64ShiftInt52Right;
489 else if (info.spillFormat() == DataFormatStrictInt52)
491 else if (info.spillFormat() == DataFormatNone)
494 RELEASE_ASSERT_NOT_REACHED();
495 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
496 fillAction = Load64; // Make GCC happy.
500 ASSERT(registerFormat & DataFormatJS);
502 ASSERT(info.gpr() == source);
503 if (node->hasConstant()) {
504 if (node->isCellConstant())
505 fillAction = SetTrustedJSConstant;
507 fillAction = SetJSConstant;
508 } else if (info.spillFormat() == DataFormatInt32) {
509 ASSERT(registerFormat == DataFormatJSInt32);
510 fillAction = Load32PayloadBoxInt;
514 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
515 if (node->hasConstant())
516 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
517 else if (info.payloadGPR() == source)
518 fillAction = Load32Payload;
519 else { // Fill the Tag
520 switch (info.spillFormat()) {
521 case DataFormatInt32:
522 ASSERT(registerFormat == DataFormatJSInt32);
523 fillAction = SetInt32Tag;
526 ASSERT(registerFormat == DataFormatJSCell);
527 fillAction = SetCellTag;
529 case DataFormatBoolean:
530 ASSERT(registerFormat == DataFormatJSBoolean);
531 fillAction = SetBooleanTag;
534 fillAction = Load32Tag;
541 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
544 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
546 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
547 Node* node = info.node();
548 ASSERT(info.registerFormat() == DataFormatDouble);
550 SilentSpillAction spillAction;
551 SilentFillAction fillAction;
553 if (!info.needsSpill())
554 spillAction = DoNothingForSpill;
556 ASSERT(!node->hasConstant());
557 ASSERT(info.spillFormat() == DataFormatNone);
558 ASSERT(info.fpr() == source);
559 spillAction = StoreDouble;
563 if (node->hasConstant()) {
564 node->asNumber(); // To get the assertion.
565 fillAction = SetDoubleConstant;
567 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
568 fillAction = LoadDouble;
570 #elif USE(JSVALUE32_64)
571 ASSERT(info.registerFormat() == DataFormatDouble);
572 if (node->hasConstant()) {
573 node->asNumber(); // To get the assertion.
574 fillAction = SetDoubleConstant;
576 fillAction = LoadDouble;
579 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
582 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
584 switch (plan.spillAction()) {
585 case DoNothingForSpill:
588 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
591 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
594 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
598 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
602 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
605 RELEASE_ASSERT_NOT_REACHED();
609 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
611 #if USE(JSVALUE32_64)
612 UNUSED_PARAM(canTrample);
614 switch (plan.fillAction()) {
615 case DoNothingForFill:
617 case SetInt32Constant:
618 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
621 case SetInt52Constant:
622 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
624 case SetStrictInt52Constant:
625 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
627 #endif // USE(JSVALUE64)
628 case SetBooleanConstant:
629 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
631 case SetCellConstant:
632 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
635 case SetTrustedJSConstant:
636 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
639 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
641 case SetDoubleConstant:
642 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
643 m_jit.move64ToDouble(canTrample, plan.fpr());
645 case Load32PayloadBoxInt:
646 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
647 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
649 case Load32PayloadConvertToInt52:
650 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
651 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
652 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
654 case Load32PayloadSignExtend:
655 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
656 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
659 case SetJSConstantTag:
660 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
662 case SetJSConstantPayload:
663 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
666 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
669 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
672 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
674 case SetDoubleConstant:
675 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
679 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
682 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
685 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
689 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
691 case Load64ShiftInt52Right:
692 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
693 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
695 case Load64ShiftInt52Left:
696 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
697 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
701 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
704 RELEASE_ASSERT_NOT_REACHED();
708 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
710 switch (arrayMode.arrayClass()) {
711 case Array::OriginalArray: {
713 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
714 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
720 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
721 return m_jit.branch32(
722 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
724 case Array::NonArray:
725 case Array::OriginalNonArray:
726 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
727 return m_jit.branch32(
728 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
730 case Array::PossiblyArray:
731 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
732 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
735 RELEASE_ASSERT_NOT_REACHED();
736 return JITCompiler::Jump();
739 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
741 JITCompiler::JumpList result;
743 switch (arrayMode.type()) {
745 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
748 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
750 case Array::Contiguous:
751 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
753 case Array::Undecided:
754 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
756 case Array::ArrayStorage:
757 case Array::SlowPutArrayStorage: {
758 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
760 if (arrayMode.isJSArray()) {
761 if (arrayMode.isSlowPut()) {
764 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
765 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
766 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
769 MacroAssembler::Above, tempGPR,
770 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
773 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
775 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
778 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
779 if (arrayMode.isSlowPut()) {
780 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
783 MacroAssembler::Above, tempGPR,
784 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
788 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
799 void SpeculativeJIT::checkArray(Node* node)
801 ASSERT(node->arrayMode().isSpecific());
802 ASSERT(!node->arrayMode().doesConversion());
804 SpeculateCellOperand base(this, node->child1());
805 GPRReg baseReg = base.gpr();
807 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
808 noResult(m_currentNode);
812 const ClassInfo* expectedClassInfo = 0;
814 switch (node->arrayMode().type()) {
815 case Array::AnyTypedArray:
817 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
821 case Array::Contiguous:
822 case Array::Undecided:
823 case Array::ArrayStorage:
824 case Array::SlowPutArrayStorage: {
825 GPRTemporary temp(this);
826 GPRReg tempGPR = temp.gpr();
827 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
829 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
830 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
832 noResult(m_currentNode);
835 case Array::DirectArguments:
836 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
837 noResult(m_currentNode);
839 case Array::ScopedArguments:
840 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
841 noResult(m_currentNode);
844 speculateCellTypeWithoutTypeFiltering(
845 node->child1(), baseReg,
846 typeForTypedArrayType(node->arrayMode().typedArrayType()));
847 noResult(m_currentNode);
851 RELEASE_ASSERT(expectedClassInfo);
853 GPRTemporary temp(this);
854 GPRTemporary temp2(this);
855 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
857 BadType, JSValueSource::unboxedCell(baseReg), node,
859 MacroAssembler::NotEqual,
860 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
861 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
863 noResult(m_currentNode);
866 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
868 ASSERT(node->arrayMode().doesConversion());
870 GPRTemporary temp(this);
871 GPRTemporary structure;
872 GPRReg tempGPR = temp.gpr();
873 GPRReg structureGPR = InvalidGPRReg;
875 if (node->op() != ArrayifyToStructure) {
876 GPRTemporary realStructure(this);
877 structure.adopt(realStructure);
878 structureGPR = structure.gpr();
881 // We can skip all that comes next if we already have array storage.
882 MacroAssembler::JumpList slowPath;
884 if (node->op() == ArrayifyToStructure) {
885 slowPath.append(m_jit.branchWeakStructure(
886 JITCompiler::NotEqual,
887 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
891 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
893 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
896 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
897 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
899 noResult(m_currentNode);
902 void SpeculativeJIT::arrayify(Node* node)
904 ASSERT(node->arrayMode().isSpecific());
906 SpeculateCellOperand base(this, node->child1());
908 if (!node->child2()) {
909 arrayify(node, base.gpr(), InvalidGPRReg);
913 SpeculateInt32Operand property(this, node->child2());
915 arrayify(node, base.gpr(), property.gpr());
918 GPRReg SpeculativeJIT::fillStorage(Edge edge)
920 VirtualRegister virtualRegister = edge->virtualRegister();
921 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
923 switch (info.registerFormat()) {
924 case DataFormatNone: {
925 if (info.spillFormat() == DataFormatStorage) {
926 GPRReg gpr = allocate();
927 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
928 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
929 info.fillStorage(*m_stream, gpr);
933 // Must be a cell; fill it as a cell and then return the pointer.
934 return fillSpeculateCell(edge);
937 case DataFormatStorage: {
938 GPRReg gpr = info.gpr();
944 return fillSpeculateCell(edge);
948 void SpeculativeJIT::useChildren(Node* node)
950 if (node->flags() & NodeHasVarArgs) {
951 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
952 if (!!m_jit.graph().m_varArgChildren[childIdx])
953 use(m_jit.graph().m_varArgChildren[childIdx]);
956 Edge child1 = node->child1();
958 ASSERT(!node->child2() && !node->child3());
963 Edge child2 = node->child2();
965 ASSERT(!node->child3());
970 Edge child3 = node->child3();
977 void SpeculativeJIT::compileTryGetById(Node* node)
979 switch (node->child1().useKind()) {
981 SpeculateCellOperand base(this, node->child1());
982 JSValueRegsTemporary result(this, Reuse, base);
984 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
985 JSValueRegs resultRegs = result.regs();
989 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
991 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
996 JSValueOperand base(this, node->child1());
997 JSValueRegsTemporary result(this, Reuse, base);
999 JSValueRegs baseRegs = base.jsValueRegs();
1000 JSValueRegs resultRegs = result.regs();
1004 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1006 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1008 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1013 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1018 void SpeculativeJIT::compileIn(Node* node)
1020 SpeculateCellOperand base(this, node->child2());
1021 GPRReg baseGPR = base.gpr();
1023 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1024 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1025 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1027 GPRTemporary result(this);
1028 GPRReg resultGPR = result.gpr();
1030 use(node->child1());
1032 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1033 MacroAssembler::Label done = m_jit.label();
1035 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1036 // we can cast it to const AtomicStringImpl* safely.
1037 auto slowPath = slowPathCall(
1038 jump.m_jump, this, operationInOptimize,
1039 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1040 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1042 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1043 stubInfo->codeOrigin = node->origin.semantic;
1044 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1045 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1046 #if USE(JSVALUE32_64)
1047 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1048 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1050 stubInfo->patch.usedRegisters = usedRegisters();
1052 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1053 addSlowPathGenerator(WTFMove(slowPath));
1057 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1062 JSValueOperand key(this, node->child1());
1063 JSValueRegs regs = key.jsValueRegs();
1065 GPRFlushedCallResult result(this);
1066 GPRReg resultGPR = result.gpr();
1073 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1075 m_jit.exceptionCheck();
1076 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1079 void SpeculativeJIT::compileDeleteById(Node* node)
1081 JSValueOperand value(this, node->child1());
1082 GPRFlushedCallResult result(this);
1084 JSValueRegs valueRegs = value.jsValueRegs();
1085 GPRReg resultGPR = result.gpr();
1090 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1091 m_jit.exceptionCheck();
1093 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1096 void SpeculativeJIT::compileDeleteByVal(Node* node)
1098 JSValueOperand base(this, node->child1());
1099 JSValueOperand key(this, node->child2());
1100 GPRFlushedCallResult result(this);
1102 JSValueRegs baseRegs = base.jsValueRegs();
1103 JSValueRegs keyRegs = key.jsValueRegs();
1104 GPRReg resultGPR = result.gpr();
1110 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1111 m_jit.exceptionCheck();
1113 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1116 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1118 unsigned branchIndexInBlock = detectPeepHoleBranch();
1119 if (branchIndexInBlock != UINT_MAX) {
1120 Node* branchNode = m_block->at(branchIndexInBlock);
1122 ASSERT(node->adjustedRefCount() == 1);
1124 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1126 m_indexInBlock = branchIndexInBlock;
1127 m_currentNode = branchNode;
1132 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1137 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1139 unsigned branchIndexInBlock = detectPeepHoleBranch();
1140 if (branchIndexInBlock != UINT_MAX) {
1141 Node* branchNode = m_block->at(branchIndexInBlock);
1143 ASSERT(node->adjustedRefCount() == 1);
1145 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1147 m_indexInBlock = branchIndexInBlock;
1148 m_currentNode = branchNode;
1153 nonSpeculativeNonPeepholeStrictEq(node, invert);
1158 static const char* dataFormatString(DataFormat format)
1160 // These values correspond to the DataFormat enum.
1161 const char* strings[] = {
1179 return strings[format];
1182 void SpeculativeJIT::dump(const char* label)
1185 dataLogF("<%s>\n", label);
1187 dataLogF(" gprs:\n");
1189 dataLogF(" fprs:\n");
1191 dataLogF(" VirtualRegisters:\n");
1192 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1193 GenerationInfo& info = m_generationInfo[i];
1195 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1197 dataLogF(" % 3d:[__][__]", i);
1198 if (info.registerFormat() == DataFormatDouble)
1199 dataLogF(":fpr%d\n", info.fpr());
1200 else if (info.registerFormat() != DataFormatNone
1201 #if USE(JSVALUE32_64)
1202 && !(info.registerFormat() & DataFormatJS)
1205 ASSERT(info.gpr() != InvalidGPRReg);
1206 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1211 dataLogF("</%s>\n", label);
1214 GPRTemporary::GPRTemporary()
1216 , m_gpr(InvalidGPRReg)
1220 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1222 , m_gpr(InvalidGPRReg)
1224 m_gpr = m_jit->allocate();
1227 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1229 , m_gpr(InvalidGPRReg)
1231 m_gpr = m_jit->allocate(specific);
1234 #if USE(JSVALUE32_64)
1235 GPRTemporary::GPRTemporary(
1236 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1238 , m_gpr(InvalidGPRReg)
1240 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1241 m_gpr = m_jit->reuse(op1.gpr(which));
1243 m_gpr = m_jit->allocate();
1245 #endif // USE(JSVALUE32_64)
1247 JSValueRegsTemporary::JSValueRegsTemporary() { }
1249 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1260 template<typename T>
1261 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1262 : m_gpr(jit, Reuse, operand)
1266 template<typename T>
1267 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1269 if (resultWord == PayloadWord) {
1270 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1271 m_tagGPR = GPRTemporary(jit);
1273 m_payloadGPR = GPRTemporary(jit);
1274 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1280 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1282 m_gpr = GPRTemporary(jit, Reuse, operand);
1285 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1287 if (jit->canReuse(operand.node())) {
1288 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1289 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1291 m_payloadGPR = GPRTemporary(jit);
1292 m_tagGPR = GPRTemporary(jit);
1297 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1299 JSValueRegs JSValueRegsTemporary::regs()
1302 return JSValueRegs(m_gpr.gpr());
1304 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1308 void GPRTemporary::adopt(GPRTemporary& other)
1311 ASSERT(m_gpr == InvalidGPRReg);
1312 ASSERT(other.m_jit);
1313 ASSERT(other.m_gpr != InvalidGPRReg);
1314 m_jit = other.m_jit;
1315 m_gpr = other.m_gpr;
1317 other.m_gpr = InvalidGPRReg;
1320 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1322 , m_fpr(InvalidFPRReg)
1324 m_fpr = m_jit->fprAllocate();
1327 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1329 , m_fpr(InvalidFPRReg)
1331 if (m_jit->canReuse(op1.node()))
1332 m_fpr = m_jit->reuse(op1.fpr());
1334 m_fpr = m_jit->fprAllocate();
1337 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1339 , m_fpr(InvalidFPRReg)
1341 if (m_jit->canReuse(op1.node()))
1342 m_fpr = m_jit->reuse(op1.fpr());
1343 else if (m_jit->canReuse(op2.node()))
1344 m_fpr = m_jit->reuse(op2.fpr());
1345 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1346 m_fpr = m_jit->reuse(op1.fpr());
1348 m_fpr = m_jit->fprAllocate();
1351 #if USE(JSVALUE32_64)
1352 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1354 , m_fpr(InvalidFPRReg)
1356 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1357 m_fpr = m_jit->reuse(op1.fpr());
1359 m_fpr = m_jit->fprAllocate();
1363 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1365 BasicBlock* taken = branchNode->branchData()->taken.block;
1366 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1368 if (taken == nextBlock()) {
1369 condition = MacroAssembler::invert(condition);
1370 std::swap(taken, notTaken);
1373 SpeculateDoubleOperand op1(this, node->child1());
1374 SpeculateDoubleOperand op2(this, node->child2());
1376 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1380 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1382 BasicBlock* taken = branchNode->branchData()->taken.block;
1383 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1385 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1387 if (taken == nextBlock()) {
1388 condition = MacroAssembler::NotEqual;
1389 BasicBlock* tmp = taken;
1394 SpeculateCellOperand op1(this, node->child1());
1395 SpeculateCellOperand op2(this, node->child2());
1397 GPRReg op1GPR = op1.gpr();
1398 GPRReg op2GPR = op2.gpr();
1400 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1401 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1403 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1405 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1407 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1410 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1412 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1413 m_jit.branchIfNotObject(op1GPR));
1415 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1417 MacroAssembler::NonZero,
1418 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1419 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1421 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1423 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1424 m_jit.branchIfNotObject(op2GPR));
1426 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1428 MacroAssembler::NonZero,
1429 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1430 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1433 branchPtr(condition, op1GPR, op2GPR, taken);
1437 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1439 BasicBlock* taken = branchNode->branchData()->taken.block;
1440 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1442 // The branch instruction will branch to the taken block.
1443 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1444 if (taken == nextBlock()) {
1445 condition = JITCompiler::invert(condition);
1446 BasicBlock* tmp = taken;
1451 if (node->child1()->isInt32Constant()) {
1452 int32_t imm = node->child1()->asInt32();
1453 SpeculateBooleanOperand op2(this, node->child2());
1454 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1455 } else if (node->child2()->isInt32Constant()) {
1456 SpeculateBooleanOperand op1(this, node->child1());
1457 int32_t imm = node->child2()->asInt32();
1458 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1460 SpeculateBooleanOperand op1(this, node->child1());
1461 SpeculateBooleanOperand op2(this, node->child2());
1462 branch32(condition, op1.gpr(), op2.gpr(), taken);
1468 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1470 BasicBlock* taken = branchNode->branchData()->taken.block;
1471 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1473 // The branch instruction will branch to the taken block.
1474 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1475 if (taken == nextBlock()) {
1476 condition = JITCompiler::invert(condition);
1477 BasicBlock* tmp = taken;
1482 if (node->child1()->isInt32Constant()) {
1483 int32_t imm = node->child1()->asInt32();
1484 SpeculateInt32Operand op2(this, node->child2());
1485 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1486 } else if (node->child2()->isInt32Constant()) {
1487 SpeculateInt32Operand op1(this, node->child1());
1488 int32_t imm = node->child2()->asInt32();
1489 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1491 SpeculateInt32Operand op1(this, node->child1());
1492 SpeculateInt32Operand op2(this, node->child2());
1493 branch32(condition, op1.gpr(), op2.gpr(), taken);
1499 // Returns true if the compare is fused with a subsequent branch.
1500 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1502 // Fused compare & branch.
1503 unsigned branchIndexInBlock = detectPeepHoleBranch();
1504 if (branchIndexInBlock != UINT_MAX) {
1505 Node* branchNode = m_block->at(branchIndexInBlock);
1507 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1508 // so can be no intervening nodes to also reference the compare.
1509 ASSERT(node->adjustedRefCount() == 1);
1511 if (node->isBinaryUseKind(Int32Use))
1512 compilePeepHoleInt32Branch(node, branchNode, condition);
1514 else if (node->isBinaryUseKind(Int52RepUse))
1515 compilePeepHoleInt52Branch(node, branchNode, condition);
1516 #endif // USE(JSVALUE64)
1517 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1518 // Use non-peephole comparison, for now.
1520 } else if (node->isBinaryUseKind(DoubleRepUse))
1521 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1522 else if (node->op() == CompareEq) {
1523 if (node->isBinaryUseKind(BooleanUse))
1524 compilePeepHoleBooleanBranch(node, branchNode, condition);
1525 else if (node->isBinaryUseKind(SymbolUse))
1526 compilePeepHoleSymbolEquality(node, branchNode);
1527 else if (node->isBinaryUseKind(ObjectUse))
1528 compilePeepHoleObjectEquality(node, branchNode);
1529 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1530 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1531 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1532 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1533 else if (!needsTypeCheck(node->child1(), SpecOther))
1534 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1535 else if (!needsTypeCheck(node->child2(), SpecOther))
1536 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1538 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1542 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1546 use(node->child1());
1547 use(node->child2());
1548 m_indexInBlock = branchIndexInBlock;
1549 m_currentNode = branchNode;
1555 void SpeculativeJIT::noticeOSRBirth(Node* node)
1557 if (!node->hasVirtualRegister())
1560 VirtualRegister virtualRegister = node->virtualRegister();
1561 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1563 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1566 void SpeculativeJIT::compileMovHint(Node* node)
1568 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1570 Node* child = node->child1().node();
1571 noticeOSRBirth(child);
1573 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1576 void SpeculativeJIT::bail(AbortReason reason)
1578 if (verboseCompilationEnabled())
1579 dataLog("Bailing compilation.\n");
1580 m_compileOkay = true;
1581 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1582 clearGenerationInfo();
1585 void SpeculativeJIT::compileCurrentBlock()
1587 ASSERT(m_compileOkay);
1592 ASSERT(m_block->isReachable);
1594 m_jit.blockHeads()[m_block->index] = m_jit.label();
1596 if (!m_block->intersectionOfCFAHasVisited) {
1597 // Don't generate code for basic blocks that are unreachable according to CFA.
1598 // But to be sure that nobody has generated a jump to this block, drop in a
1600 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1604 m_stream->appendAndLog(VariableEvent::reset());
1606 m_jit.jitAssertHasValidCallFrame();
1607 m_jit.jitAssertTagsInPlace();
1608 m_jit.jitAssertArgumentCountSane();
1611 m_state.beginBasicBlock(m_block);
1613 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1614 int operand = m_block->variablesAtHead.operandForIndex(i);
1615 Node* node = m_block->variablesAtHead[i];
1617 continue; // No need to record dead SetLocal's.
1619 VariableAccessData* variable = node->variableAccessData();
1621 if (!node->refCount())
1622 continue; // No need to record dead SetLocal's.
1623 format = dataFormatFor(variable->flushFormat());
1624 m_stream->appendAndLog(
1625 VariableEvent::setLocal(
1626 VirtualRegister(operand),
1627 variable->machineLocal(),
1631 m_origin = NodeOrigin();
1633 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1634 m_currentNode = m_block->at(m_indexInBlock);
1636 // We may have hit a contradiction that the CFA was aware of but that the JIT
1637 // didn't cause directly.
1638 if (!m_state.isValid()) {
1639 bail(DFGBailedAtTopOfBlock);
1643 m_interpreter.startExecuting();
1644 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1645 m_jit.setForNode(m_currentNode);
1646 m_origin = m_currentNode->origin;
1647 if (validationEnabled())
1648 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1649 m_lastGeneratedNode = m_currentNode->op();
1651 ASSERT(m_currentNode->shouldGenerate());
1653 if (verboseCompilationEnabled()) {
1655 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1656 (int)m_currentNode->index(),
1657 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1661 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1662 m_jit.jitReleaseAssertNoException();
1664 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1666 compile(m_currentNode);
1668 if (belongsInMinifiedGraph(m_currentNode->op()))
1669 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1671 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1672 m_jit.clearRegisterAllocationOffsets();
1675 if (!m_compileOkay) {
1676 bail(DFGBailedAtEndOfNode);
1680 // Make sure that the abstract state is rematerialized for the next node.
1681 m_interpreter.executeEffects(m_indexInBlock);
1684 // Perform the most basic verification that children have been used correctly.
1685 if (!ASSERT_DISABLED) {
1686 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1687 GenerationInfo& info = m_generationInfo[index];
1688 RELEASE_ASSERT(!info.alive());
1693 // If we are making type predictions about our arguments then
1694 // we need to check that they are correct on function entry.
1695 void SpeculativeJIT::checkArgumentTypes()
1697 ASSERT(!m_currentNode);
1698 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1700 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1701 Node* node = m_jit.graph().m_arguments[i];
1703 // The argument is dead. We don't do any checks for such arguments.
1707 ASSERT(node->op() == SetArgument);
1708 ASSERT(node->shouldGenerate());
1710 VariableAccessData* variableAccessData = node->variableAccessData();
1711 FlushFormat format = variableAccessData->flushFormat();
1713 if (format == FlushedJSValue)
1716 VirtualRegister virtualRegister = variableAccessData->local();
1718 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1722 case FlushedInt32: {
1723 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1726 case FlushedBoolean: {
1727 GPRTemporary temp(this);
1728 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1729 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1730 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1734 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1738 RELEASE_ASSERT_NOT_REACHED();
1743 case FlushedInt32: {
1744 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1747 case FlushedBoolean: {
1748 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1752 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1756 RELEASE_ASSERT_NOT_REACHED();
1762 m_origin = NodeOrigin();
1765 bool SpeculativeJIT::compile()
1767 checkArgumentTypes();
1769 ASSERT(!m_currentNode);
1770 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1771 m_jit.setForBlockIndex(blockIndex);
1772 m_block = m_jit.graph().block(blockIndex);
1773 compileCurrentBlock();
1779 void SpeculativeJIT::createOSREntries()
1781 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1782 BasicBlock* block = m_jit.graph().block(blockIndex);
1785 if (!block->isOSRTarget)
1788 // Currently we don't have OSR entry trampolines. We could add them
1790 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1794 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1796 unsigned osrEntryIndex = 0;
1797 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1798 BasicBlock* block = m_jit.graph().block(blockIndex);
1801 if (!block->isOSRTarget)
1803 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1805 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1807 if (verboseCompilationEnabled()) {
1808 DumpContext dumpContext;
1809 dataLog("OSR Entries:\n");
1810 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1811 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1812 if (!dumpContext.isEmpty())
1813 dumpContext.dump(WTF::dataFile());
1817 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1819 Edge child3 = m_jit.graph().varArgChild(node, 2);
1820 Edge child4 = m_jit.graph().varArgChild(node, 3);
1822 ArrayMode arrayMode = node->arrayMode();
1824 GPRReg baseReg = base.gpr();
1825 GPRReg propertyReg = property.gpr();
1827 SpeculateDoubleOperand value(this, child3);
1829 FPRReg valueReg = value.fpr();
1832 JSValueRegs(), child3, SpecFullRealNumber,
1834 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1839 StorageOperand storage(this, child4);
1840 GPRReg storageReg = storage.gpr();
1842 if (node->op() == PutByValAlias) {
1843 // Store the value to the array.
1844 GPRReg propertyReg = property.gpr();
1845 FPRReg valueReg = value.fpr();
1846 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1848 noResult(m_currentNode);
1852 GPRTemporary temporary;
1853 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1855 MacroAssembler::Jump slowCase;
1857 if (arrayMode.isInBounds()) {
1859 OutOfBounds, JSValueRegs(), 0,
1860 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1862 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1864 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1866 if (!arrayMode.isOutOfBounds())
1867 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1869 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1870 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1872 inBounds.link(&m_jit);
1875 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1882 if (arrayMode.isOutOfBounds()) {
1883 addSlowPathGenerator(
1886 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1887 NoResult, baseReg, propertyReg, valueReg));
1890 noResult(m_currentNode, UseChildrenCalledExplicitly);
1893 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1895 SpeculateCellOperand string(this, node->child1());
1896 SpeculateStrictInt32Operand index(this, node->child2());
1897 StorageOperand storage(this, node->child3());
1899 GPRReg stringReg = string.gpr();
1900 GPRReg indexReg = index.gpr();
1901 GPRReg storageReg = storage.gpr();
1903 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1905 // unsigned comparison so we can filter out negative indices and indices that are too large
1906 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1908 GPRTemporary scratch(this);
1909 GPRReg scratchReg = scratch.gpr();
1911 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1913 // Load the character into scratchReg
1914 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1916 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1917 JITCompiler::Jump cont8Bit = m_jit.jump();
1919 is16Bit.link(&m_jit);
1921 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1923 cont8Bit.link(&m_jit);
1925 int32Result(scratchReg, m_currentNode);
1928 void SpeculativeJIT::compileGetByValOnString(Node* node)
1930 SpeculateCellOperand base(this, node->child1());
1931 SpeculateStrictInt32Operand property(this, node->child2());
1932 StorageOperand storage(this, node->child3());
1933 GPRReg baseReg = base.gpr();
1934 GPRReg propertyReg = property.gpr();
1935 GPRReg storageReg = storage.gpr();
1937 GPRTemporary scratch(this);
1938 GPRReg scratchReg = scratch.gpr();
1939 #if USE(JSVALUE32_64)
1940 GPRTemporary resultTag;
1941 GPRReg resultTagReg = InvalidGPRReg;
1942 if (node->arrayMode().isOutOfBounds()) {
1943 GPRTemporary realResultTag(this);
1944 resultTag.adopt(realResultTag);
1945 resultTagReg = resultTag.gpr();
1949 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1951 // unsigned comparison so we can filter out negative indices and indices that are too large
1952 JITCompiler::Jump outOfBounds = m_jit.branch32(
1953 MacroAssembler::AboveOrEqual, propertyReg,
1954 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1955 if (node->arrayMode().isInBounds())
1956 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1958 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1960 // Load the character into scratchReg
1961 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1963 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1964 JITCompiler::Jump cont8Bit = m_jit.jump();
1966 is16Bit.link(&m_jit);
1968 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1970 JITCompiler::Jump bigCharacter =
1971 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1973 // 8 bit string values don't need the isASCII check.
1974 cont8Bit.link(&m_jit);
1976 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1977 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1978 m_jit.loadPtr(scratchReg, scratchReg);
1980 addSlowPathGenerator(
1982 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1984 if (node->arrayMode().isOutOfBounds()) {
1985 #if USE(JSVALUE32_64)
1986 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1989 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1990 bool prototypeChainIsSane = false;
1991 if (globalObject->stringPrototypeChainIsSane()) {
1992 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1993 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1994 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1995 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1996 // indexed properties either.
1997 // https://bugs.webkit.org/show_bug.cgi?id=144668
1998 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1999 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2000 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2002 if (prototypeChainIsSane) {
2003 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2004 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2007 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2008 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2010 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2011 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2012 baseReg, propertyReg));
2016 addSlowPathGenerator(
2018 outOfBounds, this, operationGetByValStringInt,
2019 scratchReg, baseReg, propertyReg));
2021 addSlowPathGenerator(
2023 outOfBounds, this, operationGetByValStringInt,
2024 resultTagReg, scratchReg, baseReg, propertyReg));
2029 jsValueResult(scratchReg, m_currentNode);
2031 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2034 cellResult(scratchReg, m_currentNode);
2037 void SpeculativeJIT::compileFromCharCode(Node* node)
2039 Edge& child = node->child1();
2040 if (child.useKind() == UntypedUse) {
2041 JSValueOperand opr(this, child);
2042 JSValueRegs oprRegs = opr.jsValueRegs();
2044 GPRTemporary result(this);
2045 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2047 GPRTemporary resultTag(this);
2048 GPRTemporary resultPayload(this);
2049 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2052 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2053 m_jit.exceptionCheck();
2055 jsValueResult(resultRegs, node);
2059 SpeculateStrictInt32Operand property(this, child);
2060 GPRReg propertyReg = property.gpr();
2061 GPRTemporary smallStrings(this);
2062 GPRTemporary scratch(this);
2063 GPRReg scratchReg = scratch.gpr();
2064 GPRReg smallStringsReg = smallStrings.gpr();
2066 JITCompiler::JumpList slowCases;
2067 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2068 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2069 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2071 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2072 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2073 cellResult(scratchReg, m_currentNode);
2076 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2078 VirtualRegister virtualRegister = node->virtualRegister();
2079 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2081 switch (info.registerFormat()) {
2082 case DataFormatStorage:
2083 RELEASE_ASSERT_NOT_REACHED();
2085 case DataFormatBoolean:
2086 case DataFormatCell:
2087 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2088 return GeneratedOperandTypeUnknown;
2090 case DataFormatNone:
2091 case DataFormatJSCell:
2093 case DataFormatJSBoolean:
2094 case DataFormatJSDouble:
2095 return GeneratedOperandJSValue;
2097 case DataFormatJSInt32:
2098 case DataFormatInt32:
2099 return GeneratedOperandInteger;
2102 RELEASE_ASSERT_NOT_REACHED();
2103 return GeneratedOperandTypeUnknown;
2107 void SpeculativeJIT::compileValueToInt32(Node* node)
2109 switch (node->child1().useKind()) {
2112 SpeculateStrictInt52Operand op1(this, node->child1());
2113 GPRTemporary result(this, Reuse, op1);
2114 GPRReg op1GPR = op1.gpr();
2115 GPRReg resultGPR = result.gpr();
2116 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2117 int32Result(resultGPR, node, DataFormatInt32);
2120 #endif // USE(JSVALUE64)
2122 case DoubleRepUse: {
2123 GPRTemporary result(this);
2124 SpeculateDoubleOperand op1(this, node->child1());
2125 FPRReg fpr = op1.fpr();
2126 GPRReg gpr = result.gpr();
2127 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2129 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2131 int32Result(gpr, node);
2137 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2138 case GeneratedOperandInteger: {
2139 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2140 GPRTemporary result(this, Reuse, op1);
2141 m_jit.move(op1.gpr(), result.gpr());
2142 int32Result(result.gpr(), node, op1.format());
2145 case GeneratedOperandJSValue: {
2146 GPRTemporary result(this);
2148 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2150 GPRReg gpr = op1.gpr();
2151 GPRReg resultGpr = result.gpr();
2152 FPRTemporary tempFpr(this);
2153 FPRReg fpr = tempFpr.fpr();
2155 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2156 JITCompiler::JumpList converted;
2158 if (node->child1().useKind() == NumberUse) {
2160 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2162 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2164 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2167 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2169 // It's not a cell: so true turns into 1 and all else turns into 0.
2170 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2171 converted.append(m_jit.jump());
2173 isNumber.link(&m_jit);
2176 // First, if we get here we have a double encoded as a JSValue
2177 unboxDouble(gpr, resultGpr, fpr);
2179 silentSpillAllRegisters(resultGpr);
2180 callOperation(operationToInt32, resultGpr, fpr);
2181 silentFillAllRegisters(resultGpr);
2183 converted.append(m_jit.jump());
2185 isInteger.link(&m_jit);
2186 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2188 converted.link(&m_jit);
2190 Node* childNode = node->child1().node();
2191 VirtualRegister virtualRegister = childNode->virtualRegister();
2192 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2194 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2196 GPRReg payloadGPR = op1.payloadGPR();
2197 GPRReg resultGpr = result.gpr();
2199 JITCompiler::JumpList converted;
2201 if (info.registerFormat() == DataFormatJSInt32)
2202 m_jit.move(payloadGPR, resultGpr);
2204 GPRReg tagGPR = op1.tagGPR();
2205 FPRTemporary tempFpr(this);
2206 FPRReg fpr = tempFpr.fpr();
2207 FPRTemporary scratch(this);
2209 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2211 if (node->child1().useKind() == NumberUse) {
2213 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2215 MacroAssembler::AboveOrEqual, tagGPR,
2216 TrustedImm32(JSValue::LowestTag)));
2218 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2221 op1.jsValueRegs(), node->child1(), ~SpecCell,
2222 m_jit.branchIfCell(op1.jsValueRegs()));
2224 // It's not a cell: so true turns into 1 and all else turns into 0.
2225 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2226 m_jit.move(TrustedImm32(0), resultGpr);
2227 converted.append(m_jit.jump());
2229 isBoolean.link(&m_jit);
2230 m_jit.move(payloadGPR, resultGpr);
2231 converted.append(m_jit.jump());
2233 isNumber.link(&m_jit);
2236 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2238 silentSpillAllRegisters(resultGpr);
2239 callOperation(operationToInt32, resultGpr, fpr);
2240 silentFillAllRegisters(resultGpr);
2242 converted.append(m_jit.jump());
2244 isInteger.link(&m_jit);
2245 m_jit.move(payloadGPR, resultGpr);
2247 converted.link(&m_jit);
2250 int32Result(resultGpr, node);
2253 case GeneratedOperandTypeUnknown:
2254 RELEASE_ASSERT(!m_compileOkay);
2257 RELEASE_ASSERT_NOT_REACHED();
2262 ASSERT(!m_compileOkay);
2267 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2269 if (doesOverflow(node->arithMode())) {
2270 if (enableInt52()) {
2271 SpeculateInt32Operand op1(this, node->child1());
2272 GPRTemporary result(this, Reuse, op1);
2273 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2274 strictInt52Result(result.gpr(), node);
2277 SpeculateInt32Operand op1(this, node->child1());
2278 FPRTemporary result(this);
2280 GPRReg inputGPR = op1.gpr();
2281 FPRReg outputFPR = result.fpr();
2283 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2285 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2286 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2287 positive.link(&m_jit);
2289 doubleResult(outputFPR, node);
2293 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2295 SpeculateInt32Operand op1(this, node->child1());
2296 GPRTemporary result(this);
2298 m_jit.move(op1.gpr(), result.gpr());
2300 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2302 int32Result(result.gpr(), node, op1.format());
2305 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2307 SpeculateDoubleOperand op1(this, node->child1());
2308 FPRTemporary scratch(this);
2309 GPRTemporary result(this);
2311 FPRReg valueFPR = op1.fpr();
2312 FPRReg scratchFPR = scratch.fpr();
2313 GPRReg resultGPR = result.gpr();
2315 JITCompiler::JumpList failureCases;
2316 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2317 m_jit.branchConvertDoubleToInt32(
2318 valueFPR, resultGPR, failureCases, scratchFPR,
2319 shouldCheckNegativeZero(node->arithMode()));
2320 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2322 int32Result(resultGPR, node);
2325 void SpeculativeJIT::compileDoubleRep(Node* node)
2327 switch (node->child1().useKind()) {
2328 case RealNumberUse: {
2329 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2330 FPRTemporary result(this);
2332 JSValueRegs op1Regs = op1.jsValueRegs();
2333 FPRReg resultFPR = result.fpr();
2336 GPRTemporary temp(this);
2337 GPRReg tempGPR = temp.gpr();
2338 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2340 FPRTemporary temp(this);
2341 FPRReg tempFPR = temp.fpr();
2342 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2345 JITCompiler::Jump done = m_jit.branchDouble(
2346 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2349 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2350 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2354 doubleResult(resultFPR, node);
2360 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2362 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2363 if (isInt32Speculation(possibleTypes)) {
2364 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2365 FPRTemporary result(this);
2366 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2367 doubleResult(result.fpr(), node);
2371 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2372 FPRTemporary result(this);
2375 GPRTemporary temp(this);
2377 GPRReg op1GPR = op1.gpr();
2378 GPRReg tempGPR = temp.gpr();
2379 FPRReg resultFPR = result.fpr();
2380 JITCompiler::JumpList done;
2382 JITCompiler::Jump isInteger = m_jit.branch64(
2383 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2385 if (node->child1().useKind() == NotCellUse) {
2386 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2387 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2389 static const double zero = 0;
2390 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2392 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2393 done.append(isNull);
2395 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2396 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2398 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2399 static const double one = 1;
2400 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2401 done.append(m_jit.jump());
2402 done.append(isFalse);
2404 isUndefined.link(&m_jit);
2405 static const double NaN = PNaN;
2406 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2407 done.append(m_jit.jump());
2409 isNumber.link(&m_jit);
2410 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2412 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2413 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2416 unboxDouble(op1GPR, tempGPR, resultFPR);
2417 done.append(m_jit.jump());
2419 isInteger.link(&m_jit);
2420 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2422 #else // USE(JSVALUE64) -> this is the 32_64 case
2423 FPRTemporary temp(this);
2425 GPRReg op1TagGPR = op1.tagGPR();
2426 GPRReg op1PayloadGPR = op1.payloadGPR();
2427 FPRReg tempFPR = temp.fpr();
2428 FPRReg resultFPR = result.fpr();
2429 JITCompiler::JumpList done;
2431 JITCompiler::Jump isInteger = m_jit.branch32(
2432 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2434 if (node->child1().useKind() == NotCellUse) {
2435 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2436 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2438 static const double zero = 0;
2439 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2441 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2442 done.append(isNull);
2444 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2446 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2447 static const double one = 1;
2448 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2449 done.append(m_jit.jump());
2450 done.append(isFalse);
2452 isUndefined.link(&m_jit);
2453 static const double NaN = PNaN;
2454 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2455 done.append(m_jit.jump());
2457 isNumber.link(&m_jit);
2458 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2460 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2461 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2464 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2465 done.append(m_jit.jump());
2467 isInteger.link(&m_jit);
2468 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2470 #endif // USE(JSVALUE64)
2472 doubleResult(resultFPR, node);
2478 SpeculateStrictInt52Operand value(this, node->child1());
2479 FPRTemporary result(this);
2481 GPRReg valueGPR = value.gpr();
2482 FPRReg resultFPR = result.fpr();
2484 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2486 doubleResult(resultFPR, node);
2489 #endif // USE(JSVALUE64)
2492 RELEASE_ASSERT_NOT_REACHED();
2497 void SpeculativeJIT::compileValueRep(Node* node)
2499 switch (node->child1().useKind()) {
2500 case DoubleRepUse: {
2501 SpeculateDoubleOperand value(this, node->child1());
2502 JSValueRegsTemporary result(this);
2504 FPRReg valueFPR = value.fpr();
2505 JSValueRegs resultRegs = result.regs();
2507 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2508 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2509 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2510 // local was purified.
2511 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2512 m_jit.purifyNaN(valueFPR);
2514 boxDouble(valueFPR, resultRegs);
2516 jsValueResult(resultRegs, node);
2522 SpeculateStrictInt52Operand value(this, node->child1());
2523 GPRTemporary result(this);
2525 GPRReg valueGPR = value.gpr();
2526 GPRReg resultGPR = result.gpr();
2528 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2530 jsValueResult(resultGPR, node);
2533 #endif // USE(JSVALUE64)
2536 RELEASE_ASSERT_NOT_REACHED();
2541 static double clampDoubleToByte(double d)
2551 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2553 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2554 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2555 jit.xorPtr(result, result);
2556 MacroAssembler::Jump clamped = jit.jump();
2558 jit.move(JITCompiler::TrustedImm32(255), result);
2560 inBounds.link(&jit);
2563 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2565 // Unordered compare so we pick up NaN
2566 static const double zero = 0;
2567 static const double byteMax = 255;
2568 static const double half = 0.5;
2569 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2570 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2571 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2572 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2574 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2575 // FIXME: This should probably just use a floating point round!
2576 // https://bugs.webkit.org/show_bug.cgi?id=72054
2577 jit.addDouble(source, scratch);
2578 jit.truncateDoubleToInt32(scratch, result);
2579 MacroAssembler::Jump truncatedInt = jit.jump();
2581 tooSmall.link(&jit);
2582 jit.xorPtr(result, result);
2583 MacroAssembler::Jump zeroed = jit.jump();
2586 jit.move(JITCompiler::TrustedImm32(255), result);
2588 truncatedInt.link(&jit);
2593 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2595 if (node->op() == PutByValAlias)
2596 return JITCompiler::Jump();
2597 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2598 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2600 uint32_t length = view->length();
2601 Node* indexNode = m_jit.graph().child(node, 1).node();
2602 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2603 return JITCompiler::Jump();
2604 return m_jit.branch32(
2605 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2607 return m_jit.branch32(
2608 MacroAssembler::AboveOrEqual, indexGPR,
2609 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2612 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2614 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2617 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2620 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2622 JITCompiler::Jump done;
2623 if (outOfBounds.isSet()) {
2624 done = m_jit.jump();
2625 if (node->arrayMode().isInBounds())
2626 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2628 outOfBounds.link(&m_jit);
2630 JITCompiler::Jump notWasteful = m_jit.branch32(
2631 MacroAssembler::NotEqual,
2632 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2633 TrustedImm32(WastefulTypedArray));
2635 JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2636 MacroAssembler::Zero,
2637 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2638 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2639 notWasteful.link(&m_jit);
2645 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2647 ASSERT(isInt(type));
2649 SpeculateCellOperand base(this, node->child1());
2650 SpeculateStrictInt32Operand property(this, node->child2());
2651 StorageOperand storage(this, node->child3());
2653 GPRReg baseReg = base.gpr();
2654 GPRReg propertyReg = property.gpr();
2655 GPRReg storageReg = storage.gpr();
2657 GPRTemporary result(this);
2658 GPRReg resultReg = result.gpr();
2660 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2662 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2663 switch (elementSize(type)) {
2666 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2668 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2672 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2674 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2677 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2682 if (elementSize(type) < 4 || isSigned(type)) {
2683 int32Result(resultReg, node);
2687 ASSERT(elementSize(type) == 4 && !isSigned(type));
2688 if (node->shouldSpeculateInt32()) {
2689 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2690 int32Result(resultReg, node);
2695 if (node->shouldSpeculateAnyInt()) {
2696 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2697 strictInt52Result(resultReg, node);
2702 FPRTemporary fresult(this);
2703 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2704 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2705 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2706 positive.link(&m_jit);
2707 doubleResult(fresult.fpr(), node);
2710 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2712 ASSERT(isInt(type));
2714 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2715 GPRReg storageReg = storage.gpr();
2717 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2720 GPRReg valueGPR = InvalidGPRReg;
2722 if (valueUse->isConstant()) {
2723 JSValue jsValue = valueUse->asJSValue();
2724 if (!jsValue.isNumber()) {
2725 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2729 double d = jsValue.asNumber();
2730 if (isClamped(type)) {
2731 ASSERT(elementSize(type) == 1);
2732 d = clampDoubleToByte(d);
2734 GPRTemporary scratch(this);
2735 GPRReg scratchReg = scratch.gpr();
2736 m_jit.move(Imm32(toInt32(d)), scratchReg);
2737 value.adopt(scratch);
2738 valueGPR = scratchReg;
2740 switch (valueUse.useKind()) {
2742 SpeculateInt32Operand valueOp(this, valueUse);
2743 GPRTemporary scratch(this);
2744 GPRReg scratchReg = scratch.gpr();
2745 m_jit.move(valueOp.gpr(), scratchReg);
2746 if (isClamped(type)) {
2747 ASSERT(elementSize(type) == 1);
2748 compileClampIntegerToByte(m_jit, scratchReg);
2750 value.adopt(scratch);
2751 valueGPR = scratchReg;
2757 SpeculateStrictInt52Operand valueOp(this, valueUse);
2758 GPRTemporary scratch(this);
2759 GPRReg scratchReg = scratch.gpr();
2760 m_jit.move(valueOp.gpr(), scratchReg);
2761 if (isClamped(type)) {
2762 ASSERT(elementSize(type) == 1);
2763 MacroAssembler::Jump inBounds = m_jit.branch64(
2764 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2765 MacroAssembler::Jump tooBig = m_jit.branch64(
2766 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2767 m_jit.move(TrustedImm32(0), scratchReg);
2768 MacroAssembler::Jump clamped = m_jit.jump();
2769 tooBig.link(&m_jit);
2770 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2771 clamped.link(&m_jit);
2772 inBounds.link(&m_jit);
2774 value.adopt(scratch);
2775 valueGPR = scratchReg;
2778 #endif // USE(JSVALUE64)
2780 case DoubleRepUse: {
2781 if (isClamped(type)) {
2782 ASSERT(elementSize(type) == 1);
2783 SpeculateDoubleOperand valueOp(this, valueUse);
2784 GPRTemporary result(this);
2785 FPRTemporary floatScratch(this);
2786 FPRReg fpr = valueOp.fpr();
2787 GPRReg gpr = result.gpr();
2788 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2789 value.adopt(result);
2792 SpeculateDoubleOperand valueOp(this, valueUse);
2793 GPRTemporary result(this);
2794 FPRReg fpr = valueOp.fpr();
2795 GPRReg gpr = result.gpr();
2796 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2797 m_jit.xorPtr(gpr, gpr);
2798 MacroAssembler::Jump fixed = m_jit.jump();
2799 notNaN.link(&m_jit);
2801 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2802 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2804 addSlowPathGenerator(slowPathCall(failed, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2807 value.adopt(result);
2814 RELEASE_ASSERT_NOT_REACHED();
2819 ASSERT_UNUSED(valueGPR, valueGPR != property);
2820 ASSERT(valueGPR != base);
2821 ASSERT(valueGPR != storageReg);
2822 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2824 switch (elementSize(type)) {
2826 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2829 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2832 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2838 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2844 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2846 ASSERT(isFloat(type));
2848 SpeculateCellOperand base(this, node->child1());
2849 SpeculateStrictInt32Operand property(this, node->child2());
2850 StorageOperand storage(this, node->child3());
2852 GPRReg baseReg = base.gpr();
2853 GPRReg propertyReg = property.gpr();
2854 GPRReg storageReg = storage.gpr();
2856 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2858 FPRTemporary result(this);
2859 FPRReg resultReg = result.fpr();
2860 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2861 switch (elementSize(type)) {
2863 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2864 m_jit.convertFloatToDouble(resultReg, resultReg);
2867 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2871 RELEASE_ASSERT_NOT_REACHED();
2874 doubleResult(resultReg, node);
2877 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2879 ASSERT(isFloat(type));
2881 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2882 GPRReg storageReg = storage.gpr();
2884 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2885 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2887 SpeculateDoubleOperand valueOp(this, valueUse);
2888 FPRTemporary scratch(this);
2889 FPRReg valueFPR = valueOp.fpr();
2890 FPRReg scratchFPR = scratch.fpr();
2892 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2894 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2896 switch (elementSize(type)) {
2898 m_jit.moveDouble(valueFPR, scratchFPR);
2899 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2900 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2904 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2907 RELEASE_ASSERT_NOT_REACHED();
2910 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2916 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2918 // Check that prototype is an object.
2919 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2921 // Initialize scratchReg with the value being checked.
2922 m_jit.move(valueReg, scratchReg);
2924 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2925 MacroAssembler::Label loop(&m_jit);
2926 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2927 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2928 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2929 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2930 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2932 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2934 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2937 // No match - result is false.
2939 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2941 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2943 MacroAssembler::JumpList doneJumps;
2944 doneJumps.append(m_jit.jump());
2946 performDefaultHasInstance.link(&m_jit);
2947 silentSpillAllRegisters(scratchReg);
2948 callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg);
2949 silentFillAllRegisters(scratchReg);
2950 m_jit.exceptionCheck();
2952 m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2954 doneJumps.append(m_jit.jump());
2956 isInstance.link(&m_jit);
2958 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2960 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2963 doneJumps.link(&m_jit);
2966 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2968 SpeculateCellOperand base(this, node->child1());
2970 GPRReg baseGPR = base.gpr();
2972 speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2977 void SpeculativeJIT::compileInstanceOf(Node* node)
2979 if (node->child1().useKind() == UntypedUse) {
2980 // It might not be a cell. Speculate less aggressively.
2981 // Or: it might only be used once (i.e. by us), so we get zero benefit
2982 // from speculating any more aggressively than we absolutely need to.
2984 JSValueOperand value(this, node->child1());
2985 SpeculateCellOperand prototype(this, node->child2());
2986 GPRTemporary scratch(this);
2987 GPRTemporary scratch2(this);
2989 GPRReg prototypeReg = prototype.gpr();
2990 GPRReg scratchReg = scratch.gpr();
2991 GPRReg scratch2Reg = scratch2.gpr();
2993 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2994 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2995 moveFalseTo(scratchReg);
2997 MacroAssembler::Jump done = m_jit.jump();
2999 isCell.link(&m_jit);
3001 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3005 blessedBooleanResult(scratchReg, node);
3009 SpeculateCellOperand value(this, node->child1());
3010 SpeculateCellOperand prototype(this, node->child2());
3012 GPRTemporary scratch(this);
3013 GPRTemporary scratch2(this);
3015 GPRReg valueReg = value.gpr();
3016 GPRReg prototypeReg = prototype.gpr();
3017 GPRReg scratchReg = scratch.gpr();
3018 GPRReg scratch2Reg = scratch2.gpr();
3020 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3022 blessedBooleanResult(scratchReg, node);
3025 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3026 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3028 Edge& leftChild = node->child1();
3029 Edge& rightChild = node->child2();
3031 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3032 JSValueOperand left(this, leftChild);
3033 JSValueOperand right(this, rightChild);
3034 JSValueRegs leftRegs = left.jsValueRegs();
3035 JSValueRegs rightRegs = right.jsValueRegs();
3037 GPRTemporary result(this);
3038 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3040 GPRTemporary resultTag(this);
3041 GPRTemporary resultPayload(this);
3042 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3045 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3046 m_jit.exceptionCheck();
3048 jsValueResult(resultRegs, node);
3052 Optional<JSValueOperand> left;
3053 Optional<JSValueOperand> right;
3055 JSValueRegs leftRegs;
3056 JSValueRegs rightRegs;
3059 GPRTemporary result(this);
3060 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3061 GPRTemporary scratch(this);
3062 GPRReg scratchGPR = scratch.gpr();
3064 GPRTemporary resultTag(this);
3065 GPRTemporary resultPayload(this);
3066 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3067 GPRReg scratchGPR = resultTag.gpr();
3070 SnippetOperand leftOperand;
3071 SnippetOperand rightOperand;
3073 // The snippet generator does not support both operands being constant. If the left
3074 // operand is already const, we'll ignore the right operand's constness.
3075 if (leftChild->isInt32Constant())
3076 leftOperand.setConstInt32(leftChild->asInt32());
3077 else if (rightChild->isInt32Constant())
3078 rightOperand.setConstInt32(rightChild->asInt32());
3080 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3082 if (!leftOperand.isConst()) {
3083 left = JSValueOperand(this, leftChild);
3084 leftRegs = left->jsValueRegs();
3086 if (!rightOperand.isConst()) {
3087 right = JSValueOperand(this, rightChild);
3088 rightRegs = right->jsValueRegs();
3091 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3092 gen.generateFastPath(m_jit);
3094 ASSERT(gen.didEmitFastPath());
3095 gen.endJumpList().append(m_jit.jump());
3097 gen.slowPathJumpList().link(&m_jit);
3098 silentSpillAllRegisters(resultRegs);
3100 if (leftOperand.isConst()) {
3101 leftRegs = resultRegs;
3102 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3103 } else if (rightOperand.isConst()) {
3104 rightRegs = resultRegs;
3105 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3108 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3110 silentFillAllRegisters(resultRegs);
3111 m_jit.exceptionCheck();
3113 gen.endJumpList().link(&m_jit);
3114 jsValueResult(resultRegs, node);
3117 void SpeculativeJIT::compileBitwiseOp(Node* node)
3119 NodeType op = node->op();
3120 Edge& leftChild = node->child1();
3121 Edge& rightChild = node->child2();
3123 if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3126 emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3129 emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3132 emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3135 RELEASE_ASSERT_NOT_REACHED();
3139 if (leftChild->isInt32Constant()) {
3140 SpeculateInt32Operand op2(this, rightChild);
3141 GPRTemporary result(this, Reuse, op2);
3143 bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3145 int32Result(result.gpr(), node);
3147 } else if (rightChild->isInt32Constant()) {
3148 SpeculateInt32Operand op1(this, leftChild);
3149 GPRTemporary result(this, Reuse, op1);
3151 bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3153 int32Result(result.gpr(), node);
3156 SpeculateInt32Operand op1(this, leftChild);
3157 SpeculateInt32Operand op2(this, rightChild);
3158 GPRTemporary result(this, Reuse, op1, op2);
3160 GPRReg reg1 = op1.gpr();
3161 GPRReg reg2 = op2.gpr();
3162 bitOp(op, reg1, reg2, result.gpr());
3164 int32Result(result.gpr(), node);
3168 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3170 J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3171 ? operationValueBitRShift : operationValueBitURShift;
3172 JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3173 ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3175 Edge& leftChild = node->child1();
3176 Edge& rightChild = node->child2();
3178 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3179 JSValueOperand left(this, leftChild);
3180 JSValueOperand right(this, rightChild);
3181 JSValueRegs leftRegs = left.jsValueRegs();
3182 JSValueRegs rightRegs = right.jsValueRegs();
3184 GPRTemporary result(this);
3185 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3187 GPRTemporary resultTag(this);
3188 GPRTemporary resultPayload(this);
3189 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3192 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3193 m_jit.exceptionCheck();
3195 jsValueResult(resultRegs, node);
3199 Optional<JSValueOperand> left;
3200 Optional<JSValueOperand> right;
3202 JSValueRegs leftRegs;
3203 JSValueRegs rightRegs;
3205 FPRTemporary leftNumber(this);
3206 FPRReg leftFPR = leftNumber.fpr();
3209 GPRTemporary result(this);
3210 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3211 GPRTemporary scratch(this);
3212 GPRReg scratchGPR = scratch.gpr();
3213 FPRReg scratchFPR = InvalidFPRReg;
3215 GPRTemporary resultTag(this);
3216 GPRTemporary resultPayload(this);
3217 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3218 GPRReg scratchGPR = resultTag.gpr();
3219 FPRTemporary fprScratch(this);
3220 FPRReg scratchFPR = fprScratch.fpr();
3223 SnippetOperand leftOperand;
3224 SnippetOperand rightOperand;
3226 // The snippet generator does not support both operands being constant. If the left
3227 // operand is already const, we'll ignore the right operand's constness.
3228 if (leftChild->isInt32Constant())
3229 leftOperand.setConstInt32(leftChild->asInt32());
3230 else if (rightChild->isInt32Constant())
3231 rightOperand.setConstInt32(rightChild->asInt32());
3233 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3235 if (!leftOperand.isConst()) {
3236 left = JSValueOperand(this, leftChild);
3237 leftRegs = left->jsValueRegs();
3239 if (!rightOperand.isConst()) {
3240 right = JSValueOperand(this, rightChild);
3241 rightRegs = right->jsValueRegs();
3244 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3245 leftFPR, scratchGPR, scratchFPR, shiftType);
3246 gen.generateFastPath(m_jit);
3248 ASSERT(gen.didEmitFastPath());
3249 gen.endJumpList().append(m_jit.jump());
3251 gen.slowPathJumpList().link(&m_jit);
3252 silentSpillAllRegisters(resultRegs);
3254 if (leftOperand.isConst()) {
3255 leftRegs = resultRegs;
3256 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3257 } else if (rightOperand.isConst()) {
3258 rightRegs = resultRegs;
3259 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3262 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3264 silentFillAllRegisters(resultRegs);
3265 m_jit.exceptionCheck();
3267 gen.endJumpList().link(&m_jit);
3268 jsValueResult(resultRegs, node);
3272 void SpeculativeJIT::compileShiftOp(Node* node)
3274 NodeType op = node->op();
3275 Edge& leftChild = node->child1();
3276 Edge& rightChild = node->child2();
3278 if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3281 emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3285 emitUntypedRightShiftBitOp(node);
3288 RELEASE_ASSERT_NOT_REACHED();
3292 if (rightChild->isInt32Constant()) {
3293 SpeculateInt32Operand op1(this, leftChild);
3294 GPRTemporary result(this, Reuse, op1);
3296 shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3298 int32Result(result.gpr(), node);
3300 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3301 SpeculateInt32Operand op1(this, leftChild);
3302 SpeculateInt32Operand op2(this, rightChild);
3303 GPRTemporary result(this, Reuse, op1);
3305 GPRReg reg1 = op1.gpr();
3306 GPRReg reg2 = op2.gpr();
3307 shiftOp(op, reg1, reg2, result.gpr());
3309 int32Result(result.gpr(), node);
3313 void SpeculativeJIT::compileValueAdd(Node* node)
3315 Edge& leftChild = node->child1();
3316 Edge& rightChild = node->child2();
3318 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3319 JSValueOperand left(this, leftChild);
3320 JSValueOperand right(this, rightChild);
3321 JSValueRegs leftRegs = left.jsValueRegs();
3322 JSValueRegs rightRegs = right.jsValueRegs();
3324 GPRTemporary result(this);
3325 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3327 GPRTemporary resultTag(this);
3328 GPRTemporary resultPayload(this);
3329 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3332 callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3333 m_jit.exceptionCheck();
3335 jsValueResult(resultRegs, node);
3339 Optional<JSValueOperand> left;
3340 Optional<JSValueOperand> right;
3342 JSValueRegs leftRegs;
3343 JSValueRegs rightRegs;
3345 FPRTemporary leftNumber(this);
3346 FPRTemporary rightNumber(this);
3347 FPRReg leftFPR = leftNumber.fpr();
3348 FPRReg rightFPR = rightNumber.fpr();
3351 GPRTemporary result(this);
3352 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3353 GPRTemporary scratch(this);
3354 GPRReg scratchGPR = scratch.gpr();
3355 FPRReg scratchFPR = InvalidFPRReg;
3357 GPRTemporary resultTag(this);
3358 GPRTemporary resultPayload(this);
3359 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3360 GPRReg scratchGPR = resultTag.gpr();
3361 FPRTemporary fprScratch(this);
3362 FPRReg scratchFPR = fprScratch.fpr();
3365 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3366 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3368 // The snippet generator does not support both operands being constant. If the left
3369 // operand is already const, we'll ignore the right operand's constness.
3370 if (leftChild->isInt32Constant())
3371 leftOperand.setConstInt32(leftChild->asInt32());
3372 else if (rightChild->isInt32Constant())
3373 rightOperand.setConstInt32(rightChild->asInt32());
3375 ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3377 if (!leftOperand.isConst()) {
3378 left = JSValueOperand(this, leftChild);
3379 leftRegs = left->jsValueRegs();
3381 if (!rightOperand.isConst()) {
3382 right = JSValueOperand(this, rightChild);
3383 rightRegs = right->jsValueRegs();
3386 JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC();
3387 Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3388 ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3389 addIC->m_generator = JITAddGenerator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR, arithProfile);
3391 bool generatedInline = addIC->generateInline(m_jit, *addICGenerationState);
3393 if (generatedInline) {
3394 ASSERT(!addICGenerationState->slowPathJumps.empty());
3396 Vector<SilentRegisterSavePlan> savePlans;
3397 silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3399 auto done = m_jit.label();
3401 addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3402 addICGenerationState->slowPathJumps.link(&m_jit);
3403 addICGenerationState->slowPathStart = m_jit.label();
3405 silentSpill(savePlans);
3407 auto innerLeftRegs = leftRegs;
3408 auto innerRightRegs = rightRegs;
3409 if (leftOperand.isConst()) {
3410 innerLeftRegs = resultRegs;
3411 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3412 } else if (rightOperand.isConst()) {
3413 innerRightRegs = resultRegs;
3414 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3417 if (addICGenerationState->shouldSlowPathRepatch)
3418 addICGenerationState->slowPathCall = callOperation(operationValueAddOptimize, resultRegs, innerLeftRegs, innerRightRegs, addIC);
3420 addICGenerationState->slowPathCall = callOperation(operationValueAdd, resultRegs, innerLeftRegs, innerRightRegs);
3422 silentFill(savePlans);
3423 m_jit.exceptionCheck();
3424 m_jit.jump().linkTo(done, &m_jit);
3426 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3427 addIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3431 if (leftOperand.isConst()) {
3432 left = JSValueOperand(this, leftChild);
3433 leftRegs = left->jsValueRegs();
3434 } else if (rightOperand.isConst()) {
3435 right = JSValueOperand(this, rightChild);
3436 rightRegs = right->jsValueRegs();
3440 callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3441 m_jit.exceptionCheck();
3444 jsValueResult(resultRegs, node);
3448 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3450 // We could do something smarter here but this case is currently super rare and unless
3451 // Symbol.hasInstance becomes popular will likely remain that way.
3453 JSValueOperand value(this, node->child1());
3454 SpeculateCellOperand constructor(this, node->child2());
3455 JSValueOperand hasInstanceValue(this, node->child3());
3456 GPRTemporary result(this);
3458 JSValueRegs valueRegs = value.jsValueRegs();
3459 GPRReg constructorGPR = constructor.gpr();
3460 JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3461 GPRReg resultGPR = result.gpr();
3463 MacroAssembler::Jump slowCase = m_jit.jump();
3465 addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3467 unblessedBooleanResult(resultGPR, node);
3470 void SpeculativeJIT::compileIsJSArray(Node* node)
3472 JSValueOperand value(this, node->child1());
3473 GPRFlushedCallResult result(this);
3475 JSValueRegs valueRegs = value.jsValueRegs();
3476 GPRReg resultGPR = result.gpr();
3478 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3480 m_jit.compare8(JITCompiler::Equal,
3481 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3482 TrustedImm32(ArrayType),
3484 blessBoolean(resultGPR);
3485 JITCompiler::Jump done = m_jit.jump();
3487 isNotCell.link(&m_jit);
3488 moveFalseTo(resultGPR);
3491 blessedBooleanResult(resultGPR, node);
3494 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3496 JSValueOperand value(this, node->child1());
3497 GPRFlushedCallResult result(this);
3499 JSValueRegs valueRegs = value.jsValueRegs();
3500 GPRReg resultGPR = result.gpr();
3502 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3504 m_jit.compare8(JITCompiler::Equal,
3505 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3506 TrustedImm32(RegExpObjectType),
3508 blessBoolean(resultGPR);
3509 JITCompiler::Jump done = m_jit.jump();
3511 isNotCell.link(&m_jit);
3512 moveFalseTo(resultGPR);
3515 blessedBooleanResult(resultGPR, node);
3518 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3520 JSValueOperand value(this, node->child1());
3522 GPRTemporary result(this, Reuse, value);
3524 GPRTemporary result(this, Reuse, value, PayloadWord);
3527 JSValueRegs valueRegs = value.jsValueRegs();
3528 GPRReg resultGPR = result.gpr();
3530 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3532 m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3533 m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3534 m_jit.compare32(JITCompiler::BelowOrEqual,
3536 TrustedImm32(Float64ArrayType - Int8ArrayType),
3538 blessBoolean(resultGPR);
3539 JITCompiler::Jump done = m_jit.jump();
3541 isNotCell.link(&m_jit);
3542 moveFalseTo(resultGPR);
3545 blessedBooleanResult(resultGPR, node);
3548 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3550 RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3551 JSValueOperand value(this, node->child1());
3553 GPRTemporary result(this, Reuse, value);
3555 GPRTemporary result(this, Reuse, value, PayloadWord);
3558 JSValueRegs valueRegs = value.jsValueRegs();
3559 GPRReg resultGPR = result.gpr();
3561 MacroAssembler::JumpList slowCases;
3562 slowCases.append(m_jit.branchIfNotCell(valueRegs));
3563 slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3564 m_jit.move(valueRegs.payloadGPR(), resultGPR);
3566 addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3567 cellResult(resultGPR, node);
3570 void SpeculativeJIT::compileArithAdd(Node* node)
3572 switch (node->binaryUseKind()) {
3574 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3576 if (node->child2()->isInt32Constant()) {
3577 SpeculateInt32Operand op1(this, node->child1());
3578 GPRTemporary result(this, Reuse, op1);
3580 GPRReg gpr1 = op1.gpr();
3581 int32_t imm2 = node->child2()->asInt32();
3582 GPRReg gprResult = result.gpr();
3584 if (!shouldCheckOverflow(node->arithMode())) {
3585 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3586 int32Result(gprResult, node);
3590 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3591 if (gpr1 == gprResult) {
3592 speculationCheck(Overflow, JSValueRegs(), 0, check,
3593 SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3595 speculationCheck(Overflow, JSValueRegs(), 0, check);
3597 int32Result(gprResult, node);
3601 SpeculateInt32Operand op1(this, node->child1());
3602 SpeculateInt32Operand op2(this, node->child2());
3603 GPRTemporary result(this, Reuse, op1, op2);
3605 GPRReg gpr1 = op1.gpr();
3606 GPRReg gpr2 = op2.gpr();
3607 GPRReg gprResult = result.gpr();
3609 if (!shouldCheckOverflow(node->arithMode()))
3610 m_jit.add32(gpr1, gpr2, gprResult);
3612 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3614 if (gpr1 == gprResult)
3615 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3616 else if (gpr2 == gprResult)
3617 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3619 speculationCheck(Overflow, JSValueRegs(), 0, check);
3622 int32Result(gprResult, node);
3628 ASSERT(shouldCheckOverflow(node->arithMode()));
3629 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3631 // Will we need an overflow check? If we can prove that neither input can be
3632 // Int52 then the overflow check will not be necessary.
3633 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3634 && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3635 SpeculateWhicheverInt52Operand op1(this, node->child1());
3636 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3637 GPRTemporary result(this, Reuse, op1);
3638 m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3639 int52Result(result.gpr(), node, op1.format());
3643 SpeculateInt52Operand op1(this, node->child1());
3644 SpeculateInt52Operand op2(this, node->child2());
3645 GPRTemporary result(this);
3646 m_jit.move(op1.gpr(), result.gpr());
3648 Int52Overflow, JSValueRegs(), 0,
3649 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3650 int52Result(result.gpr(), node);
3653 #endif // USE(JSVALUE64)
3655 case DoubleRepUse: {
3656 SpeculateDoubleOperand op1(this, node->child1());
3657 SpeculateDoubleOperand op2(this, node->child2());
3658 FPRTemporary result(this, op1, op2);
3660 FPRReg reg1 = op1.fpr();
3661 FPRReg reg2 = op2.fpr();
3662 m_jit.addDouble(reg1, reg2, result.fpr());
3664 doubleResult(result.fpr(), node);
3669 RELEASE_ASSERT_NOT_REACHED();
3674 void SpeculativeJIT::compileMakeRope(Node* node)
3676 ASSERT(node->child1().useKind() == KnownStringUse);
3677 ASSERT(node->child2().useKind() == KnownStringUse);
3678 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3680 SpeculateCellOperand op1(this, node->child1());
3681 SpeculateCellOperand op2(this, node->child2());
3682 SpeculateCellOperand op3(this, node->child3());
3683 GPRTemporary result(this);
3684 GPRTemporary allocator(this);
3685 GPRTemporary scratch(this);
3689 opGPRs[0] = op1.gpr();
3690 opGPRs[1] = op2.gpr();
3691 if (node->child3()) {
3692 opGPRs[2] = op3.gpr();
3695 opGPRs[2] = InvalidGPRReg;
3698 GPRReg resultGPR = result.gpr();
3699 GPRReg allocatorGPR = allocator.gpr();
3700 GPRReg scratchGPR = scratch.gpr();
3702 JITCompiler::JumpList slowPath;
3703 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3704 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3705 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3707 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3708 for (unsigned i = 0; i < numOpGPRs; ++i)
3709 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3710 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3711 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3712 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3713 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3714 if (!ASSERT_DISABLED) {
3715 JITCompiler::Jump ok = m_jit.branch32(
3716 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3717 m_jit.abortWithReason(DFGNegativeStringLength);
3720 for (unsigned i = 1; i < numOpGPRs; ++i) {
3721 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3723 Uncountable, JSValueSource(), nullptr,
3725 JITCompiler::Overflow,
3726 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3728 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3729 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3730 if (!ASSERT_DISABLED) {
3731 JITCompiler::Jump ok = m_jit.branch32(
3732 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3733 m_jit.abortWithReason(DFGNegativeStringLength);
3736 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3738 switch (numOpGPRs) {
3740 addSlowPathGenerator(slowPathCall(
3741 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3744 addSlowPathGenerator(slowPathCall(
3745 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3748 RELEASE_ASSERT_NOT_REACHED();
3752 cellResult(resultGPR, node);
3755 void SpeculativeJIT::compileArithClz32(Node* node)
3757 ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3758 SpeculateInt32Operand value(this, node->child1());
3759 GPRTemporary result(this, Reuse, value);
3760 GPRReg valueReg = value.gpr();
3761 GPRReg resultReg = result.gpr();
3762 m_jit.countLeadingZeros32(valueReg, resultReg);
3763 int32Result(resultReg, node);
3766 void SpeculativeJIT::compileArithSub(Node* node)
3768 switch (node->binaryUseKind()) {
3770 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3772 if (node->child2()->isInt32Constant()) {
3773 SpeculateInt32Operand op1(this, node->child1());
3774 int32_t imm2 = node->child2()->asInt32();
3775 GPRTemporary result(this);
3777 if (!shouldCheckOverflow(node->arithMode())) {
3778 m_jit.move(op1.gpr(), result.gpr());
3779 m_jit.sub32(Imm32(imm2), result.gpr());
3781 GPRTemporary scratch(this);
3782 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3785 int32Result(result.gpr(), node);
3789 if (node->child1()->isInt32Constant()) {
3790 int32_t imm1 = node->child1()->asInt32();
3791 SpeculateInt32Operand op2(this, node->child2());
3792 GPRTemporary result(this);
3794 m_jit.move(Imm32(imm1), result.gpr());
3795 if (!shouldCheckOverflow(node->arithMode()))
3796 m_jit.sub32(op2.gpr(), result.gpr());
3798 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3800 int32Result(result.gpr(), node);
3804 SpeculateInt32Operand op1(this, node->child1());
3805 SpeculateInt32Operand op2(this, node->child2());
3806 GPRTemporary result(this);
3808 if (!shouldCheckOverflow(node->arithMode())) {
3809 m_jit.move(op1.gpr(), result.gpr());
3810 m_jit.sub32(op2.gpr(), result.gpr());
3812 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3814 int32Result(result.gpr(), node);
3820 ASSERT(shouldCheckOverflow(node->arithMode()));
3821 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3823 // Will we need an overflow check? If we can prove that neither input can be
3824 // Int52 then the overflow check will not be necessary.
3825 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3826 && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3827 SpeculateWhicheverInt52Operand op1(this, node->child1());
3828 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3829 GPRTemporary result(this, Reuse, op1);
3830 m_jit.move(op1.gpr(), result.gpr());
3831 m_jit.sub64(op2.gpr(), result.gpr());
3832 int52Result(result.gpr(), node, op1.format());
3836 SpeculateInt52Operand op1(this, node->child1());
3837 SpeculateInt52Operand op2(this, node->child2());
3838 GPRTemporary result(this);
3839 m_jit.move(op1.gpr(), result.gpr());
3841 Int52Overflow, JSValueRegs(), 0,
3842 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3843 int52Result(result.gpr(), node);
3846 #endif // USE(JSVALUE64)
3848 case DoubleRepUse: {
3849 SpeculateDoubleOperand op1(this, node->child1());
3850 SpeculateDoubleOperand op2(this, node->child2());
3851 FPRTemporary result(this, op1);
3853 FPRReg reg1 = op1.fpr();
3854 FPRReg reg2 = op2.fpr();
3855 m_jit.subDouble(reg1, reg2, result.fpr());
3857 doubleResult(result.fpr(), node);
3862 Edge& leftChild = node->child1();
3863 Edge& rightChild = node->child2();
3865 JSValueOperand left(this, leftChild);
3866 JSValueOperand right(this, rightChild);
3868 JSValueRegs leftRegs = left.jsValueRegs();
3869 JSValueRegs rightRegs = right.jsValueRegs();
3871 FPRTemporary leftNumber(this);
3872 FPRTemporary rightNumber(this);
3873 FPRReg leftFPR = leftNumber.fpr();
3874 FPRReg rightFPR = rightNumber.fpr();
3877 GPRTemporary result(this);
3878 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3879 GPRTemporary scratch(this);
3880 GPRReg scratchGPR = scratch.gpr();
3881 FPRReg scratchFPR = InvalidFPRReg;
3883 GPRTemporary resultTag(this);
3884 GPRTemporary resultPayload(this);
3885 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3886 GPRReg scratchGPR = resultTag.gpr();
3887 FPRTemporary fprScratch(this);
3888 FPRReg scratchFPR = fprScratch.fpr();
3891 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3892 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3894 JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3895 leftFPR, rightFPR, scratchGPR, scratchFPR);
3896 gen.generateFastPath(m_jit);
3898 ASSERT(gen.didEmitFastPath());
3899 gen.endJumpList().append(m_jit.jump());