2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
61 namespace JSC { namespace DFG {
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
67 , m_lastGeneratedNode(LastNodeType)
69 , m_generationInfo(m_jit.graph().frameRegisterCount())
70 , m_state(m_jit.graph())
71 , m_interpreter(m_jit.graph(), m_state)
72 , m_stream(&jit.jitCode()->variableEventStream)
73 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
77 SpeculativeJIT::~SpeculativeJIT()
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
83 IndexingType indexingType = structure->indexingType();
84 bool hasIndexingHeader = hasIndexedProperties(indexingType);
86 unsigned inlineCapacity = structure->inlineCapacity();
87 unsigned outOfLineCapacity = structure->outOfLineCapacity();
89 GPRTemporary scratch(this);
90 GPRTemporary scratch2(this);
91 GPRReg scratchGPR = scratch.gpr();
92 GPRReg scratch2GPR = scratch2.gpr();
94 ASSERT(vectorLength >= numElements);
95 vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
97 JITCompiler::JumpList slowCases;
100 if (hasIndexingHeader)
101 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102 size += outOfLineCapacity * sizeof(JSValue);
106 emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107 if (hasIndexingHeader)
108 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
110 m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
112 m_jit.move(TrustedImmPtr(0), storageGPR);
114 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115 MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117 emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
119 if (hasIndexingHeader)
120 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
122 // I want a slow path that also loads out the storage pointer, and that's
123 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124 // of work for a very small piece of functionality. :-/
125 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127 structure, vectorLength));
129 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
131 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132 for (unsigned i = numElements; i < vectorLength; ++i)
133 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
135 EncodedValueDescriptor value;
136 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137 for (unsigned i = numElements; i < vectorLength; ++i) {
138 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
144 if (hasIndexingHeader)
145 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
150 if (inlineCallFrame && !inlineCallFrame->isVarargs())
151 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
153 VirtualRegister argumentCountRegister;
154 if (!inlineCallFrame)
155 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
157 argumentCountRegister = inlineCallFrame->argumentCountRegister;
158 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
160 m_jit.sub32(TrustedImm32(1), lengthGPR);
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
166 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
171 if (origin.inlineCallFrame) {
172 if (origin.inlineCallFrame->isClosureCall) {
174 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
178 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
182 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
189 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190 GPRInfo::callFrameRegister, startGPR);
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
195 if (!doOSRExitFuzzing())
196 return MacroAssembler::Jump();
198 MacroAssembler::Jump result;
200 m_jit.pushToSave(GPRInfo::regT0);
201 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205 unsigned at = Options::fireOSRExitFuzzAt();
206 if (at || atOrAfter) {
208 MacroAssembler::RelationalCondition condition;
210 threshold = atOrAfter;
211 condition = MacroAssembler::Below;
214 condition = MacroAssembler::NotEqual;
216 MacroAssembler::Jump ok = m_jit.branch32(
217 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218 m_jit.popToRestore(GPRInfo::regT0);
219 result = m_jit.jump();
222 m_jit.popToRestore(GPRInfo::regT0);
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
231 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232 if (fuzzJump.isSet()) {
233 JITCompiler::JumpList jumpsToFail;
234 jumpsToFail.append(fuzzJump);
235 jumpsToFail.append(jumpToFail);
236 m_jit.appendExitInfo(jumpsToFail);
238 m_jit.appendExitInfo(jumpToFail);
239 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
246 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247 if (fuzzJump.isSet()) {
248 JITCompiler::JumpList myJumpsToFail;
249 myJumpsToFail.append(jumpsToFail);
250 myJumpsToFail.append(fuzzJump);
251 m_jit.appendExitInfo(myJumpsToFail);
253 m_jit.appendExitInfo(jumpsToFail);
254 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
260 return OSRExitJumpPlaceholder();
261 unsigned index = m_jit.jitCode()->osrExit.size();
262 m_jit.appendExitInfo();
263 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264 return OSRExitJumpPlaceholder(index);
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
269 return speculationCheck(kind, jsValueSource, nodeUse.node());
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
274 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
279 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
286 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287 m_jit.appendExitInfo(jumpToFail);
288 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
293 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
300 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301 m_jit.jitCode()->appendOSRExit(OSRExit(
302 UncountableInvalidation, JSValueSource(),
303 m_jit.graph().methodOfGettingAValueProfileFor(node),
304 this, m_stream->size()));
305 info.m_replacementSource = m_jit.watchpointLabel();
306 ASSERT(info.m_replacementSource.isSet());
310 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
314 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315 m_compileOkay = false;
316 if (verboseCompilationEnabled())
317 dataLog("Bailing compilation.\n");
320 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
322 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
325 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
327 ASSERT(needsTypeCheck(edge, typesPassedThrough));
328 m_interpreter.filter(edge, typesPassedThrough);
329 speculationCheck(exitKind, source, edge.node(), jumpToFail);
332 RegisterSet SpeculativeJIT::usedRegisters()
336 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
337 GPRReg gpr = GPRInfo::toRegister(i);
338 if (m_gprs.isInUse(gpr))
341 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
342 FPRReg fpr = FPRInfo::toRegister(i);
343 if (m_fprs.isInUse(fpr))
347 result.merge(RegisterSet::stubUnavailableRegisters());
352 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
354 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
357 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
359 m_slowPathLambdas.append(std::make_pair(lambda, m_origin.semantic));
362 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
364 for (auto& slowPathGenerator : m_slowPathGenerators) {
365 pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
366 slowPathGenerator->generate(this);
368 for (auto& generatorPair : m_slowPathLambdas) {
369 pcToCodeOriginMapBuilder.appendItem(m_jit.label(), generatorPair.second);
370 generatorPair.first();
374 void SpeculativeJIT::clearGenerationInfo()
376 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
377 m_generationInfo[i] = GenerationInfo();
378 m_gprs = RegisterBank<GPRInfo>();
379 m_fprs = RegisterBank<FPRInfo>();
382 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
384 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
385 Node* node = info.node();
386 DataFormat registerFormat = info.registerFormat();
387 ASSERT(registerFormat != DataFormatNone);
388 ASSERT(registerFormat != DataFormatDouble);
390 SilentSpillAction spillAction;
391 SilentFillAction fillAction;
393 if (!info.needsSpill())
394 spillAction = DoNothingForSpill;
397 ASSERT(info.gpr() == source);
398 if (registerFormat == DataFormatInt32)
399 spillAction = Store32Payload;
400 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
401 spillAction = StorePtr;
402 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
403 spillAction = Store64;
405 ASSERT(registerFormat & DataFormatJS);
406 spillAction = Store64;
408 #elif USE(JSVALUE32_64)
409 if (registerFormat & DataFormatJS) {
410 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
411 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
413 ASSERT(info.gpr() == source);
414 spillAction = Store32Payload;
419 if (registerFormat == DataFormatInt32) {
420 ASSERT(info.gpr() == source);
421 ASSERT(isJSInt32(info.registerFormat()));
422 if (node->hasConstant()) {
423 ASSERT(node->isInt32Constant());
424 fillAction = SetInt32Constant;
426 fillAction = Load32Payload;
427 } else if (registerFormat == DataFormatBoolean) {
429 RELEASE_ASSERT_NOT_REACHED();
430 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
431 fillAction = DoNothingForFill;
433 #elif USE(JSVALUE32_64)
434 ASSERT(info.gpr() == source);
435 if (node->hasConstant()) {
436 ASSERT(node->isBooleanConstant());
437 fillAction = SetBooleanConstant;
439 fillAction = Load32Payload;
441 } else if (registerFormat == DataFormatCell) {
442 ASSERT(info.gpr() == source);
443 if (node->hasConstant()) {
444 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
445 node->asCell(); // To get the assertion.
446 fillAction = SetCellConstant;
449 fillAction = LoadPtr;
451 fillAction = Load32Payload;
454 } else if (registerFormat == DataFormatStorage) {
455 ASSERT(info.gpr() == source);
456 fillAction = LoadPtr;
457 } else if (registerFormat == DataFormatInt52) {
458 if (node->hasConstant())
459 fillAction = SetInt52Constant;
460 else if (info.spillFormat() == DataFormatInt52)
462 else if (info.spillFormat() == DataFormatStrictInt52)
463 fillAction = Load64ShiftInt52Left;
464 else if (info.spillFormat() == DataFormatNone)
467 RELEASE_ASSERT_NOT_REACHED();
468 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
469 fillAction = Load64; // Make GCC happy.
472 } else if (registerFormat == DataFormatStrictInt52) {
473 if (node->hasConstant())
474 fillAction = SetStrictInt52Constant;
475 else if (info.spillFormat() == DataFormatInt52)
476 fillAction = Load64ShiftInt52Right;
477 else if (info.spillFormat() == DataFormatStrictInt52)
479 else if (info.spillFormat() == DataFormatNone)
482 RELEASE_ASSERT_NOT_REACHED();
483 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
484 fillAction = Load64; // Make GCC happy.
488 ASSERT(registerFormat & DataFormatJS);
490 ASSERT(info.gpr() == source);
491 if (node->hasConstant()) {
492 if (node->isCellConstant())
493 fillAction = SetTrustedJSConstant;
495 fillAction = SetJSConstant;
496 } else if (info.spillFormat() == DataFormatInt32) {
497 ASSERT(registerFormat == DataFormatJSInt32);
498 fillAction = Load32PayloadBoxInt;
502 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
503 if (node->hasConstant())
504 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
505 else if (info.payloadGPR() == source)
506 fillAction = Load32Payload;
507 else { // Fill the Tag
508 switch (info.spillFormat()) {
509 case DataFormatInt32:
510 ASSERT(registerFormat == DataFormatJSInt32);
511 fillAction = SetInt32Tag;
514 ASSERT(registerFormat == DataFormatJSCell);
515 fillAction = SetCellTag;
517 case DataFormatBoolean:
518 ASSERT(registerFormat == DataFormatJSBoolean);
519 fillAction = SetBooleanTag;
522 fillAction = Load32Tag;
529 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
532 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
534 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
535 Node* node = info.node();
536 ASSERT(info.registerFormat() == DataFormatDouble);
538 SilentSpillAction spillAction;
539 SilentFillAction fillAction;
541 if (!info.needsSpill())
542 spillAction = DoNothingForSpill;
544 ASSERT(!node->hasConstant());
545 ASSERT(info.spillFormat() == DataFormatNone);
546 ASSERT(info.fpr() == source);
547 spillAction = StoreDouble;
551 if (node->hasConstant()) {
552 node->asNumber(); // To get the assertion.
553 fillAction = SetDoubleConstant;
555 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
556 fillAction = LoadDouble;
558 #elif USE(JSVALUE32_64)
559 ASSERT(info.registerFormat() == DataFormatDouble);
560 if (node->hasConstant()) {
561 node->asNumber(); // To get the assertion.
562 fillAction = SetDoubleConstant;
564 fillAction = LoadDouble;
567 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
570 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
572 switch (plan.spillAction()) {
573 case DoNothingForSpill:
576 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
579 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
582 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
586 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
590 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
593 RELEASE_ASSERT_NOT_REACHED();
597 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
599 #if USE(JSVALUE32_64)
600 UNUSED_PARAM(canTrample);
602 switch (plan.fillAction()) {
603 case DoNothingForFill:
605 case SetInt32Constant:
606 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
609 case SetInt52Constant:
610 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
612 case SetStrictInt52Constant:
613 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
615 #endif // USE(JSVALUE64)
616 case SetBooleanConstant:
617 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
619 case SetCellConstant:
620 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
623 case SetTrustedJSConstant:
624 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
627 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
629 case SetDoubleConstant:
630 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
631 m_jit.move64ToDouble(canTrample, plan.fpr());
633 case Load32PayloadBoxInt:
634 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
635 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
637 case Load32PayloadConvertToInt52:
638 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
639 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
640 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
642 case Load32PayloadSignExtend:
643 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
644 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
647 case SetJSConstantTag:
648 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
650 case SetJSConstantPayload:
651 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
654 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
657 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
660 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
662 case SetDoubleConstant:
663 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
667 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
670 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
677 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
679 case Load64ShiftInt52Right:
680 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
681 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
683 case Load64ShiftInt52Left:
684 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
685 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
689 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
692 RELEASE_ASSERT_NOT_REACHED();
696 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
698 switch (arrayMode.arrayClass()) {
699 case Array::OriginalArray: {
701 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
702 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
708 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
709 return m_jit.branch32(
710 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
712 case Array::NonArray:
713 case Array::OriginalNonArray:
714 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
715 return m_jit.branch32(
716 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
718 case Array::PossiblyArray:
719 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
720 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
723 RELEASE_ASSERT_NOT_REACHED();
724 return JITCompiler::Jump();
727 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
729 JITCompiler::JumpList result;
731 switch (arrayMode.type()) {
733 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
736 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
738 case Array::Contiguous:
739 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
741 case Array::Undecided:
742 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
744 case Array::ArrayStorage:
745 case Array::SlowPutArrayStorage: {
746 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
748 if (arrayMode.isJSArray()) {
749 if (arrayMode.isSlowPut()) {
752 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
753 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
754 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
757 MacroAssembler::Above, tempGPR,
758 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
761 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
763 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
766 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
767 if (arrayMode.isSlowPut()) {
768 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
771 MacroAssembler::Above, tempGPR,
772 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
776 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
787 void SpeculativeJIT::checkArray(Node* node)
789 ASSERT(node->arrayMode().isSpecific());
790 ASSERT(!node->arrayMode().doesConversion());
792 SpeculateCellOperand base(this, node->child1());
793 GPRReg baseReg = base.gpr();
795 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
796 noResult(m_currentNode);
800 const ClassInfo* expectedClassInfo = 0;
802 switch (node->arrayMode().type()) {
803 case Array::AnyTypedArray:
805 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
809 case Array::Contiguous:
810 case Array::Undecided:
811 case Array::ArrayStorage:
812 case Array::SlowPutArrayStorage: {
813 GPRTemporary temp(this);
814 GPRReg tempGPR = temp.gpr();
815 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
817 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
818 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
820 noResult(m_currentNode);
823 case Array::DirectArguments:
824 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
825 noResult(m_currentNode);
827 case Array::ScopedArguments:
828 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
829 noResult(m_currentNode);
832 speculateCellTypeWithoutTypeFiltering(
833 node->child1(), baseReg,
834 typeForTypedArrayType(node->arrayMode().typedArrayType()));
835 noResult(m_currentNode);
839 RELEASE_ASSERT(expectedClassInfo);
841 GPRTemporary temp(this);
842 GPRTemporary temp2(this);
843 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
845 BadType, JSValueSource::unboxedCell(baseReg), node,
847 MacroAssembler::NotEqual,
848 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
849 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
851 noResult(m_currentNode);
854 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
856 ASSERT(node->arrayMode().doesConversion());
858 GPRTemporary temp(this);
859 GPRTemporary structure;
860 GPRReg tempGPR = temp.gpr();
861 GPRReg structureGPR = InvalidGPRReg;
863 if (node->op() != ArrayifyToStructure) {
864 GPRTemporary realStructure(this);
865 structure.adopt(realStructure);
866 structureGPR = structure.gpr();
869 // We can skip all that comes next if we already have array storage.
870 MacroAssembler::JumpList slowPath;
872 if (node->op() == ArrayifyToStructure) {
873 slowPath.append(m_jit.branchWeakStructure(
874 JITCompiler::NotEqual,
875 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
879 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
881 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
884 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
885 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
887 noResult(m_currentNode);
890 void SpeculativeJIT::arrayify(Node* node)
892 ASSERT(node->arrayMode().isSpecific());
894 SpeculateCellOperand base(this, node->child1());
896 if (!node->child2()) {
897 arrayify(node, base.gpr(), InvalidGPRReg);
901 SpeculateInt32Operand property(this, node->child2());
903 arrayify(node, base.gpr(), property.gpr());
906 GPRReg SpeculativeJIT::fillStorage(Edge edge)
908 VirtualRegister virtualRegister = edge->virtualRegister();
909 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
911 switch (info.registerFormat()) {
912 case DataFormatNone: {
913 if (info.spillFormat() == DataFormatStorage) {
914 GPRReg gpr = allocate();
915 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
916 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
917 info.fillStorage(*m_stream, gpr);
921 // Must be a cell; fill it as a cell and then return the pointer.
922 return fillSpeculateCell(edge);
925 case DataFormatStorage: {
926 GPRReg gpr = info.gpr();
932 return fillSpeculateCell(edge);
936 void SpeculativeJIT::useChildren(Node* node)
938 if (node->flags() & NodeHasVarArgs) {
939 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
940 if (!!m_jit.graph().m_varArgChildren[childIdx])
941 use(m_jit.graph().m_varArgChildren[childIdx]);
944 Edge child1 = node->child1();
946 ASSERT(!node->child2() && !node->child3());
951 Edge child2 = node->child2();
953 ASSERT(!node->child3());
958 Edge child3 = node->child3();
965 void SpeculativeJIT::compileTryGetById(Node* node)
967 switch (node->child1().useKind()) {
969 SpeculateCellOperand base(this, node->child1());
970 JSValueRegsTemporary result(this, Reuse, base);
972 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
973 JSValueRegs resultRegs = result.regs();
977 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
979 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
984 JSValueOperand base(this, node->child1());
985 JSValueRegsTemporary result(this, Reuse, base);
987 JSValueRegs baseRegs = base.jsValueRegs();
988 JSValueRegs resultRegs = result.regs();
992 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
994 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
996 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1001 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1006 void SpeculativeJIT::compileIn(Node* node)
1008 SpeculateCellOperand base(this, node->child2());
1009 GPRReg baseGPR = base.gpr();
1011 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1012 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1013 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1015 GPRTemporary result(this);
1016 GPRReg resultGPR = result.gpr();
1018 use(node->child1());
1020 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1021 MacroAssembler::Label done = m_jit.label();
1023 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1024 // we can cast it to const AtomicStringImpl* safely.
1025 auto slowPath = slowPathCall(
1026 jump.m_jump, this, operationInOptimize,
1027 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1028 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1030 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1031 stubInfo->codeOrigin = node->origin.semantic;
1032 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1033 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1034 #if USE(JSVALUE32_64)
1035 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1036 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1038 stubInfo->patch.usedRegisters = usedRegisters();
1040 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1041 addSlowPathGenerator(WTFMove(slowPath));
1045 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1050 JSValueOperand key(this, node->child1());
1051 JSValueRegs regs = key.jsValueRegs();
1053 GPRFlushedCallResult result(this);
1054 GPRReg resultGPR = result.gpr();
1061 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1063 m_jit.exceptionCheck();
1064 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1067 void SpeculativeJIT::compileDeleteById(Node* node)
1069 JSValueOperand value(this, node->child1());
1070 GPRFlushedCallResult result(this);
1072 JSValueRegs valueRegs = value.jsValueRegs();
1073 GPRReg resultGPR = result.gpr();
1078 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1079 m_jit.exceptionCheck();
1081 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1084 void SpeculativeJIT::compileDeleteByVal(Node* node)
1086 JSValueOperand base(this, node->child1());
1087 JSValueOperand key(this, node->child2());
1088 GPRFlushedCallResult result(this);
1090 JSValueRegs baseRegs = base.jsValueRegs();
1091 JSValueRegs keyRegs = key.jsValueRegs();
1092 GPRReg resultGPR = result.gpr();
1098 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1099 m_jit.exceptionCheck();
1101 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1104 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1106 unsigned branchIndexInBlock = detectPeepHoleBranch();
1107 if (branchIndexInBlock != UINT_MAX) {
1108 Node* branchNode = m_block->at(branchIndexInBlock);
1110 ASSERT(node->adjustedRefCount() == 1);
1112 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1114 m_indexInBlock = branchIndexInBlock;
1115 m_currentNode = branchNode;
1120 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1125 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1127 unsigned branchIndexInBlock = detectPeepHoleBranch();
1128 if (branchIndexInBlock != UINT_MAX) {
1129 Node* branchNode = m_block->at(branchIndexInBlock);
1131 ASSERT(node->adjustedRefCount() == 1);
1133 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1135 m_indexInBlock = branchIndexInBlock;
1136 m_currentNode = branchNode;
1141 nonSpeculativeNonPeepholeStrictEq(node, invert);
1146 static const char* dataFormatString(DataFormat format)
1148 // These values correspond to the DataFormat enum.
1149 const char* strings[] = {
1167 return strings[format];
1170 void SpeculativeJIT::dump(const char* label)
1173 dataLogF("<%s>\n", label);
1175 dataLogF(" gprs:\n");
1177 dataLogF(" fprs:\n");
1179 dataLogF(" VirtualRegisters:\n");
1180 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1181 GenerationInfo& info = m_generationInfo[i];
1183 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1185 dataLogF(" % 3d:[__][__]", i);
1186 if (info.registerFormat() == DataFormatDouble)
1187 dataLogF(":fpr%d\n", info.fpr());
1188 else if (info.registerFormat() != DataFormatNone
1189 #if USE(JSVALUE32_64)
1190 && !(info.registerFormat() & DataFormatJS)
1193 ASSERT(info.gpr() != InvalidGPRReg);
1194 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1199 dataLogF("</%s>\n", label);
1202 GPRTemporary::GPRTemporary()
1204 , m_gpr(InvalidGPRReg)
1208 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1210 , m_gpr(InvalidGPRReg)
1212 m_gpr = m_jit->allocate();
1215 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1217 , m_gpr(InvalidGPRReg)
1219 m_gpr = m_jit->allocate(specific);
1222 #if USE(JSVALUE32_64)
1223 GPRTemporary::GPRTemporary(
1224 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1226 , m_gpr(InvalidGPRReg)
1228 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1229 m_gpr = m_jit->reuse(op1.gpr(which));
1231 m_gpr = m_jit->allocate();
1233 #endif // USE(JSVALUE32_64)
1235 JSValueRegsTemporary::JSValueRegsTemporary() { }
1237 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1248 template<typename T>
1249 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1250 : m_gpr(jit, Reuse, operand)
1254 template<typename T>
1255 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1257 if (resultWord == PayloadWord) {
1258 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1259 m_tagGPR = GPRTemporary(jit);
1261 m_payloadGPR = GPRTemporary(jit);
1262 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1268 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1270 m_gpr = GPRTemporary(jit, Reuse, operand);
1273 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1275 if (jit->canReuse(operand.node())) {
1276 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1277 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1279 m_payloadGPR = GPRTemporary(jit);
1280 m_tagGPR = GPRTemporary(jit);
1285 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1287 JSValueRegs JSValueRegsTemporary::regs()
1290 return JSValueRegs(m_gpr.gpr());
1292 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1296 void GPRTemporary::adopt(GPRTemporary& other)
1299 ASSERT(m_gpr == InvalidGPRReg);
1300 ASSERT(other.m_jit);
1301 ASSERT(other.m_gpr != InvalidGPRReg);
1302 m_jit = other.m_jit;
1303 m_gpr = other.m_gpr;
1305 other.m_gpr = InvalidGPRReg;
1308 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1310 , m_fpr(InvalidFPRReg)
1312 m_fpr = m_jit->fprAllocate();
1315 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1317 , m_fpr(InvalidFPRReg)
1319 if (m_jit->canReuse(op1.node()))
1320 m_fpr = m_jit->reuse(op1.fpr());
1322 m_fpr = m_jit->fprAllocate();
1325 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1327 , m_fpr(InvalidFPRReg)
1329 if (m_jit->canReuse(op1.node()))
1330 m_fpr = m_jit->reuse(op1.fpr());
1331 else if (m_jit->canReuse(op2.node()))
1332 m_fpr = m_jit->reuse(op2.fpr());
1333 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1334 m_fpr = m_jit->reuse(op1.fpr());
1336 m_fpr = m_jit->fprAllocate();
1339 #if USE(JSVALUE32_64)
1340 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1342 , m_fpr(InvalidFPRReg)
1344 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1345 m_fpr = m_jit->reuse(op1.fpr());
1347 m_fpr = m_jit->fprAllocate();
1351 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1353 BasicBlock* taken = branchNode->branchData()->taken.block;
1354 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1356 if (taken == nextBlock()) {
1357 condition = MacroAssembler::invert(condition);
1358 std::swap(taken, notTaken);
1361 SpeculateDoubleOperand op1(this, node->child1());
1362 SpeculateDoubleOperand op2(this, node->child2());
1364 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1368 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1370 BasicBlock* taken = branchNode->branchData()->taken.block;
1371 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1373 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1375 if (taken == nextBlock()) {
1376 condition = MacroAssembler::NotEqual;
1377 BasicBlock* tmp = taken;
1382 SpeculateCellOperand op1(this, node->child1());
1383 SpeculateCellOperand op2(this, node->child2());
1385 GPRReg op1GPR = op1.gpr();
1386 GPRReg op2GPR = op2.gpr();
1388 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1389 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1391 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1393 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1395 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1398 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1400 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1401 m_jit.branchIfNotObject(op1GPR));
1403 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1405 MacroAssembler::NonZero,
1406 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1407 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1409 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1411 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1412 m_jit.branchIfNotObject(op2GPR));
1414 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1416 MacroAssembler::NonZero,
1417 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1418 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1421 branchPtr(condition, op1GPR, op2GPR, taken);
1425 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1427 BasicBlock* taken = branchNode->branchData()->taken.block;
1428 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1430 // The branch instruction will branch to the taken block.
1431 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1432 if (taken == nextBlock()) {
1433 condition = JITCompiler::invert(condition);
1434 BasicBlock* tmp = taken;
1439 if (node->child1()->isInt32Constant()) {
1440 int32_t imm = node->child1()->asInt32();
1441 SpeculateBooleanOperand op2(this, node->child2());
1442 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1443 } else if (node->child2()->isInt32Constant()) {
1444 SpeculateBooleanOperand op1(this, node->child1());
1445 int32_t imm = node->child2()->asInt32();
1446 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1448 SpeculateBooleanOperand op1(this, node->child1());
1449 SpeculateBooleanOperand op2(this, node->child2());
1450 branch32(condition, op1.gpr(), op2.gpr(), taken);
1456 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1458 BasicBlock* taken = branchNode->branchData()->taken.block;
1459 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1461 // The branch instruction will branch to the taken block.
1462 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1463 if (taken == nextBlock()) {
1464 condition = JITCompiler::invert(condition);
1465 BasicBlock* tmp = taken;
1470 if (node->child1()->isInt32Constant()) {
1471 int32_t imm = node->child1()->asInt32();
1472 SpeculateInt32Operand op2(this, node->child2());
1473 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1474 } else if (node->child2()->isInt32Constant()) {
1475 SpeculateInt32Operand op1(this, node->child1());
1476 int32_t imm = node->child2()->asInt32();
1477 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1479 SpeculateInt32Operand op1(this, node->child1());
1480 SpeculateInt32Operand op2(this, node->child2());
1481 branch32(condition, op1.gpr(), op2.gpr(), taken);
1487 // Returns true if the compare is fused with a subsequent branch.
1488 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1490 // Fused compare & branch.
1491 unsigned branchIndexInBlock = detectPeepHoleBranch();
1492 if (branchIndexInBlock != UINT_MAX) {
1493 Node* branchNode = m_block->at(branchIndexInBlock);
1495 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1496 // so can be no intervening nodes to also reference the compare.
1497 ASSERT(node->adjustedRefCount() == 1);
1499 if (node->isBinaryUseKind(Int32Use))
1500 compilePeepHoleInt32Branch(node, branchNode, condition);
1502 else if (node->isBinaryUseKind(Int52RepUse))
1503 compilePeepHoleInt52Branch(node, branchNode, condition);
1504 #endif // USE(JSVALUE64)
1505 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1506 // Use non-peephole comparison, for now.
1508 } else if (node->isBinaryUseKind(DoubleRepUse))
1509 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1510 else if (node->op() == CompareEq) {
1511 if (node->isBinaryUseKind(BooleanUse))
1512 compilePeepHoleBooleanBranch(node, branchNode, condition);
1513 else if (node->isBinaryUseKind(SymbolUse))
1514 compilePeepHoleSymbolEquality(node, branchNode);
1515 else if (node->isBinaryUseKind(ObjectUse))
1516 compilePeepHoleObjectEquality(node, branchNode);
1517 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1518 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1519 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1520 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1521 else if (!needsTypeCheck(node->child1(), SpecOther))
1522 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1523 else if (!needsTypeCheck(node->child2(), SpecOther))
1524 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1526 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1530 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1534 use(node->child1());
1535 use(node->child2());
1536 m_indexInBlock = branchIndexInBlock;
1537 m_currentNode = branchNode;
1543 void SpeculativeJIT::noticeOSRBirth(Node* node)
1545 if (!node->hasVirtualRegister())
1548 VirtualRegister virtualRegister = node->virtualRegister();
1549 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1551 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1554 void SpeculativeJIT::compileMovHint(Node* node)
1556 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1558 Node* child = node->child1().node();
1559 noticeOSRBirth(child);
1561 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1564 void SpeculativeJIT::bail(AbortReason reason)
1566 if (verboseCompilationEnabled())
1567 dataLog("Bailing compilation.\n");
1568 m_compileOkay = true;
1569 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1570 clearGenerationInfo();
1573 void SpeculativeJIT::compileCurrentBlock()
1575 ASSERT(m_compileOkay);
1580 ASSERT(m_block->isReachable);
1582 m_jit.blockHeads()[m_block->index] = m_jit.label();
1584 if (!m_block->intersectionOfCFAHasVisited) {
1585 // Don't generate code for basic blocks that are unreachable according to CFA.
1586 // But to be sure that nobody has generated a jump to this block, drop in a
1588 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1592 m_stream->appendAndLog(VariableEvent::reset());
1594 m_jit.jitAssertHasValidCallFrame();
1595 m_jit.jitAssertTagsInPlace();
1596 m_jit.jitAssertArgumentCountSane();
1599 m_state.beginBasicBlock(m_block);
1601 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1602 int operand = m_block->variablesAtHead.operandForIndex(i);
1603 Node* node = m_block->variablesAtHead[i];
1605 continue; // No need to record dead SetLocal's.
1607 VariableAccessData* variable = node->variableAccessData();
1609 if (!node->refCount())
1610 continue; // No need to record dead SetLocal's.
1611 format = dataFormatFor(variable->flushFormat());
1612 m_stream->appendAndLog(
1613 VariableEvent::setLocal(
1614 VirtualRegister(operand),
1615 variable->machineLocal(),
1619 m_origin = NodeOrigin();
1621 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1622 m_currentNode = m_block->at(m_indexInBlock);
1624 // We may have hit a contradiction that the CFA was aware of but that the JIT
1625 // didn't cause directly.
1626 if (!m_state.isValid()) {
1627 bail(DFGBailedAtTopOfBlock);
1631 m_interpreter.startExecuting();
1632 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1633 m_jit.setForNode(m_currentNode);
1634 m_origin = m_currentNode->origin;
1635 if (validationEnabled())
1636 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1637 m_lastGeneratedNode = m_currentNode->op();
1639 ASSERT(m_currentNode->shouldGenerate());
1641 if (verboseCompilationEnabled()) {
1643 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1644 (int)m_currentNode->index(),
1645 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1649 if (Options::validateDFGExceptionHandling() && mayExit(m_jit.graph(), m_currentNode) != DoesNotExit)
1650 m_jit.jitReleaseAssertNoException();
1652 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1654 compile(m_currentNode);
1656 if (belongsInMinifiedGraph(m_currentNode->op()))
1657 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1659 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1660 m_jit.clearRegisterAllocationOffsets();
1663 if (!m_compileOkay) {
1664 bail(DFGBailedAtEndOfNode);
1668 // Make sure that the abstract state is rematerialized for the next node.
1669 m_interpreter.executeEffects(m_indexInBlock);
1672 // Perform the most basic verification that children have been used correctly.
1673 if (!ASSERT_DISABLED) {
1674 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1675 GenerationInfo& info = m_generationInfo[index];
1676 RELEASE_ASSERT(!info.alive());
1681 // If we are making type predictions about our arguments then
1682 // we need to check that they are correct on function entry.
1683 void SpeculativeJIT::checkArgumentTypes()
1685 ASSERT(!m_currentNode);
1686 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1688 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1689 Node* node = m_jit.graph().m_arguments[i];
1691 // The argument is dead. We don't do any checks for such arguments.
1695 ASSERT(node->op() == SetArgument);
1696 ASSERT(node->shouldGenerate());
1698 VariableAccessData* variableAccessData = node->variableAccessData();
1699 FlushFormat format = variableAccessData->flushFormat();
1701 if (format == FlushedJSValue)
1704 VirtualRegister virtualRegister = variableAccessData->local();
1706 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1710 case FlushedInt32: {
1711 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1714 case FlushedBoolean: {
1715 GPRTemporary temp(this);
1716 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1717 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1718 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1722 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1726 RELEASE_ASSERT_NOT_REACHED();
1731 case FlushedInt32: {
1732 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1735 case FlushedBoolean: {
1736 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1740 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1744 RELEASE_ASSERT_NOT_REACHED();
1750 m_origin = NodeOrigin();
1753 bool SpeculativeJIT::compile()
1755 checkArgumentTypes();
1757 ASSERT(!m_currentNode);
1758 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1759 m_jit.setForBlockIndex(blockIndex);
1760 m_block = m_jit.graph().block(blockIndex);
1761 compileCurrentBlock();
1767 void SpeculativeJIT::createOSREntries()
1769 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1770 BasicBlock* block = m_jit.graph().block(blockIndex);
1773 if (!block->isOSRTarget)
1776 // Currently we don't have OSR entry trampolines. We could add them
1778 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1782 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1784 unsigned osrEntryIndex = 0;
1785 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1786 BasicBlock* block = m_jit.graph().block(blockIndex);
1789 if (!block->isOSRTarget)
1791 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1793 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1795 if (verboseCompilationEnabled()) {
1796 DumpContext dumpContext;
1797 dataLog("OSR Entries:\n");
1798 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1799 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1800 if (!dumpContext.isEmpty())
1801 dumpContext.dump(WTF::dataFile());
1805 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1807 Edge child3 = m_jit.graph().varArgChild(node, 2);
1808 Edge child4 = m_jit.graph().varArgChild(node, 3);
1810 ArrayMode arrayMode = node->arrayMode();
1812 GPRReg baseReg = base.gpr();
1813 GPRReg propertyReg = property.gpr();
1815 SpeculateDoubleOperand value(this, child3);
1817 FPRReg valueReg = value.fpr();
1820 JSValueRegs(), child3, SpecFullRealNumber,
1822 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1827 StorageOperand storage(this, child4);
1828 GPRReg storageReg = storage.gpr();
1830 if (node->op() == PutByValAlias) {
1831 // Store the value to the array.
1832 GPRReg propertyReg = property.gpr();
1833 FPRReg valueReg = value.fpr();
1834 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1836 noResult(m_currentNode);
1840 GPRTemporary temporary;
1841 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1843 MacroAssembler::Jump slowCase;
1845 if (arrayMode.isInBounds()) {
1847 OutOfBounds, JSValueRegs(), 0,
1848 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1850 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1852 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1854 if (!arrayMode.isOutOfBounds())
1855 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1857 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1858 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1860 inBounds.link(&m_jit);
1863 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1870 if (arrayMode.isOutOfBounds()) {
1871 addSlowPathGenerator(
1874 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1875 NoResult, baseReg, propertyReg, valueReg));
1878 noResult(m_currentNode, UseChildrenCalledExplicitly);
1881 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1883 SpeculateCellOperand string(this, node->child1());
1884 SpeculateStrictInt32Operand index(this, node->child2());
1885 StorageOperand storage(this, node->child3());
1887 GPRReg stringReg = string.gpr();
1888 GPRReg indexReg = index.gpr();
1889 GPRReg storageReg = storage.gpr();
1891 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1893 // unsigned comparison so we can filter out negative indices and indices that are too large
1894 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1896 GPRTemporary scratch(this);
1897 GPRReg scratchReg = scratch.gpr();
1899 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1901 // Load the character into scratchReg
1902 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1904 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1905 JITCompiler::Jump cont8Bit = m_jit.jump();
1907 is16Bit.link(&m_jit);
1909 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1911 cont8Bit.link(&m_jit);
1913 int32Result(scratchReg, m_currentNode);
1916 void SpeculativeJIT::compileGetByValOnString(Node* node)
1918 SpeculateCellOperand base(this, node->child1());
1919 SpeculateStrictInt32Operand property(this, node->child2());
1920 StorageOperand storage(this, node->child3());
1921 GPRReg baseReg = base.gpr();
1922 GPRReg propertyReg = property.gpr();
1923 GPRReg storageReg = storage.gpr();
1925 GPRTemporary scratch(this);
1926 GPRReg scratchReg = scratch.gpr();
1927 #if USE(JSVALUE32_64)
1928 GPRTemporary resultTag;
1929 GPRReg resultTagReg = InvalidGPRReg;
1930 if (node->arrayMode().isOutOfBounds()) {
1931 GPRTemporary realResultTag(this);
1932 resultTag.adopt(realResultTag);
1933 resultTagReg = resultTag.gpr();
1937 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1939 // unsigned comparison so we can filter out negative indices and indices that are too large
1940 JITCompiler::Jump outOfBounds = m_jit.branch32(
1941 MacroAssembler::AboveOrEqual, propertyReg,
1942 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1943 if (node->arrayMode().isInBounds())
1944 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1946 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1948 // Load the character into scratchReg
1949 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1951 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1952 JITCompiler::Jump cont8Bit = m_jit.jump();
1954 is16Bit.link(&m_jit);
1956 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1958 JITCompiler::Jump bigCharacter =
1959 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1961 // 8 bit string values don't need the isASCII check.
1962 cont8Bit.link(&m_jit);
1964 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1965 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1966 m_jit.loadPtr(scratchReg, scratchReg);
1968 addSlowPathGenerator(
1970 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1972 if (node->arrayMode().isOutOfBounds()) {
1973 #if USE(JSVALUE32_64)
1974 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1977 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1978 bool prototypeChainIsSane = false;
1979 if (globalObject->stringPrototypeChainIsSane()) {
1980 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1981 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1982 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1983 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1984 // indexed properties either.
1985 // https://bugs.webkit.org/show_bug.cgi?id=144668
1986 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1987 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1988 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
1990 if (prototypeChainIsSane) {
1991 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1992 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1995 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1996 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1998 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1999 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2000 baseReg, propertyReg));
2004 addSlowPathGenerator(
2006 outOfBounds, this, operationGetByValStringInt,
2007 scratchReg, baseReg, propertyReg));
2009 addSlowPathGenerator(
2011 outOfBounds, this, operationGetByValStringInt,
2012 resultTagReg, scratchReg, baseReg, propertyReg));
2017 jsValueResult(scratchReg, m_currentNode);
2019 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2022 cellResult(scratchReg, m_currentNode);
2025 void SpeculativeJIT::compileFromCharCode(Node* node)
2027 Edge& child = node->child1();
2028 if (child.useKind() == UntypedUse) {
2029 JSValueOperand opr(this, child);
2030 JSValueRegs oprRegs = opr.jsValueRegs();
2032 GPRTemporary result(this);
2033 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2035 GPRTemporary resultTag(this);
2036 GPRTemporary resultPayload(this);
2037 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2040 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2041 m_jit.exceptionCheck();
2043 jsValueResult(resultRegs, node);
2047 SpeculateStrictInt32Operand property(this, child);
2048 GPRReg propertyReg = property.gpr();
2049 GPRTemporary smallStrings(this);
2050 GPRTemporary scratch(this);
2051 GPRReg scratchReg = scratch.gpr();
2052 GPRReg smallStringsReg = smallStrings.gpr();
2054 JITCompiler::JumpList slowCases;
2055 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2056 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2057 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2059 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2060 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2061 cellResult(scratchReg, m_currentNode);
2064 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2066 VirtualRegister virtualRegister = node->virtualRegister();
2067 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2069 switch (info.registerFormat()) {
2070 case DataFormatStorage:
2071 RELEASE_ASSERT_NOT_REACHED();
2073 case DataFormatBoolean:
2074 case DataFormatCell:
2075 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2076 return GeneratedOperandTypeUnknown;
2078 case DataFormatNone:
2079 case DataFormatJSCell:
2081 case DataFormatJSBoolean:
2082 case DataFormatJSDouble:
2083 return GeneratedOperandJSValue;
2085 case DataFormatJSInt32:
2086 case DataFormatInt32:
2087 return GeneratedOperandInteger;
2090 RELEASE_ASSERT_NOT_REACHED();
2091 return GeneratedOperandTypeUnknown;
2095 void SpeculativeJIT::compileValueToInt32(Node* node)
2097 switch (node->child1().useKind()) {
2100 SpeculateStrictInt52Operand op1(this, node->child1());
2101 GPRTemporary result(this, Reuse, op1);
2102 GPRReg op1GPR = op1.gpr();
2103 GPRReg resultGPR = result.gpr();
2104 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2105 int32Result(resultGPR, node, DataFormatInt32);
2108 #endif // USE(JSVALUE64)
2110 case DoubleRepUse: {
2111 GPRTemporary result(this);
2112 SpeculateDoubleOperand op1(this, node->child1());
2113 FPRReg fpr = op1.fpr();
2114 GPRReg gpr = result.gpr();
2115 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2117 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2119 int32Result(gpr, node);
2125 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2126 case GeneratedOperandInteger: {
2127 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2128 GPRTemporary result(this, Reuse, op1);
2129 m_jit.move(op1.gpr(), result.gpr());
2130 int32Result(result.gpr(), node, op1.format());
2133 case GeneratedOperandJSValue: {
2134 GPRTemporary result(this);
2136 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2138 GPRReg gpr = op1.gpr();
2139 GPRReg resultGpr = result.gpr();
2140 FPRTemporary tempFpr(this);
2141 FPRReg fpr = tempFpr.fpr();
2143 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2144 JITCompiler::JumpList converted;
2146 if (node->child1().useKind() == NumberUse) {
2148 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2150 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2152 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2155 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2157 // It's not a cell: so true turns into 1 and all else turns into 0.
2158 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2159 converted.append(m_jit.jump());
2161 isNumber.link(&m_jit);
2164 // First, if we get here we have a double encoded as a JSValue
2165 unboxDouble(gpr, resultGpr, fpr);
2167 silentSpillAllRegisters(resultGpr);
2168 callOperation(toInt32, resultGpr, fpr);
2169 silentFillAllRegisters(resultGpr);
2171 converted.append(m_jit.jump());
2173 isInteger.link(&m_jit);
2174 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2176 converted.link(&m_jit);
2178 Node* childNode = node->child1().node();
2179 VirtualRegister virtualRegister = childNode->virtualRegister();
2180 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2182 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2184 GPRReg payloadGPR = op1.payloadGPR();
2185 GPRReg resultGpr = result.gpr();
2187 JITCompiler::JumpList converted;
2189 if (info.registerFormat() == DataFormatJSInt32)
2190 m_jit.move(payloadGPR, resultGpr);
2192 GPRReg tagGPR = op1.tagGPR();
2193 FPRTemporary tempFpr(this);
2194 FPRReg fpr = tempFpr.fpr();
2195 FPRTemporary scratch(this);
2197 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2199 if (node->child1().useKind() == NumberUse) {
2201 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2203 MacroAssembler::AboveOrEqual, tagGPR,
2204 TrustedImm32(JSValue::LowestTag)));
2206 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2209 op1.jsValueRegs(), node->child1(), ~SpecCell,
2210 m_jit.branchIfCell(op1.jsValueRegs()));
2212 // It's not a cell: so true turns into 1 and all else turns into 0.
2213 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2214 m_jit.move(TrustedImm32(0), resultGpr);
2215 converted.append(m_jit.jump());
2217 isBoolean.link(&m_jit);
2218 m_jit.move(payloadGPR, resultGpr);
2219 converted.append(m_jit.jump());
2221 isNumber.link(&m_jit);
2224 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2226 silentSpillAllRegisters(resultGpr);
2227 callOperation(toInt32, resultGpr, fpr);
2228 silentFillAllRegisters(resultGpr);
2230 converted.append(m_jit.jump());
2232 isInteger.link(&m_jit);
2233 m_jit.move(payloadGPR, resultGpr);
2235 converted.link(&m_jit);
2238 int32Result(resultGpr, node);
2241 case GeneratedOperandTypeUnknown:
2242 RELEASE_ASSERT(!m_compileOkay);
2245 RELEASE_ASSERT_NOT_REACHED();
2250 ASSERT(!m_compileOkay);
2255 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2257 if (doesOverflow(node->arithMode())) {
2258 if (enableInt52()) {
2259 SpeculateInt32Operand op1(this, node->child1());
2260 GPRTemporary result(this, Reuse, op1);
2261 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2262 strictInt52Result(result.gpr(), node);
2265 SpeculateInt32Operand op1(this, node->child1());
2266 FPRTemporary result(this);
2268 GPRReg inputGPR = op1.gpr();
2269 FPRReg outputFPR = result.fpr();
2271 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2273 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2274 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2275 positive.link(&m_jit);
2277 doubleResult(outputFPR, node);
2281 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2283 SpeculateInt32Operand op1(this, node->child1());
2284 GPRTemporary result(this);
2286 m_jit.move(op1.gpr(), result.gpr());
2288 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2290 int32Result(result.gpr(), node, op1.format());
2293 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2295 SpeculateDoubleOperand op1(this, node->child1());
2296 FPRTemporary scratch(this);
2297 GPRTemporary result(this);
2299 FPRReg valueFPR = op1.fpr();
2300 FPRReg scratchFPR = scratch.fpr();
2301 GPRReg resultGPR = result.gpr();
2303 JITCompiler::JumpList failureCases;
2304 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2305 m_jit.branchConvertDoubleToInt32(
2306 valueFPR, resultGPR, failureCases, scratchFPR,
2307 shouldCheckNegativeZero(node->arithMode()));
2308 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2310 int32Result(resultGPR, node);
2313 void SpeculativeJIT::compileDoubleRep(Node* node)
2315 switch (node->child1().useKind()) {
2316 case RealNumberUse: {
2317 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2318 FPRTemporary result(this);
2320 JSValueRegs op1Regs = op1.jsValueRegs();
2321 FPRReg resultFPR = result.fpr();
2324 GPRTemporary temp(this);
2325 GPRReg tempGPR = temp.gpr();
2326 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2328 FPRTemporary temp(this);
2329 FPRReg tempFPR = temp.fpr();
2330 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2333 JITCompiler::Jump done = m_jit.branchDouble(
2334 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2337 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2338 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2342 doubleResult(resultFPR, node);
2348 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2350 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2351 if (isInt32Speculation(possibleTypes)) {
2352 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2353 FPRTemporary result(this);
2354 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2355 doubleResult(result.fpr(), node);
2359 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2360 FPRTemporary result(this);
2363 GPRTemporary temp(this);
2365 GPRReg op1GPR = op1.gpr();
2366 GPRReg tempGPR = temp.gpr();
2367 FPRReg resultFPR = result.fpr();
2368 JITCompiler::JumpList done;
2370 JITCompiler::Jump isInteger = m_jit.branch64(
2371 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2373 if (node->child1().useKind() == NotCellUse) {
2374 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2375 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2377 static const double zero = 0;
2378 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2380 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2381 done.append(isNull);
2383 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2384 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2386 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2387 static const double one = 1;
2388 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2389 done.append(m_jit.jump());
2390 done.append(isFalse);
2392 isUndefined.link(&m_jit);
2393 static const double NaN = PNaN;
2394 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2395 done.append(m_jit.jump());
2397 isNumber.link(&m_jit);
2398 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2400 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2401 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2404 unboxDouble(op1GPR, tempGPR, resultFPR);
2405 done.append(m_jit.jump());
2407 isInteger.link(&m_jit);
2408 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2410 #else // USE(JSVALUE64) -> this is the 32_64 case
2411 FPRTemporary temp(this);
2413 GPRReg op1TagGPR = op1.tagGPR();
2414 GPRReg op1PayloadGPR = op1.payloadGPR();
2415 FPRReg tempFPR = temp.fpr();
2416 FPRReg resultFPR = result.fpr();
2417 JITCompiler::JumpList done;
2419 JITCompiler::Jump isInteger = m_jit.branch32(
2420 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2422 if (node->child1().useKind() == NotCellUse) {
2423 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2424 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2426 static const double zero = 0;
2427 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2429 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2430 done.append(isNull);
2432 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2434 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2435 static const double one = 1;
2436 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2437 done.append(m_jit.jump());
2438 done.append(isFalse);
2440 isUndefined.link(&m_jit);
2441 static const double NaN = PNaN;
2442 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2443 done.append(m_jit.jump());
2445 isNumber.link(&m_jit);
2446 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2448 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2449 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2452 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2453 done.append(m_jit.jump());
2455 isInteger.link(&m_jit);
2456 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2458 #endif // USE(JSVALUE64)
2460 doubleResult(resultFPR, node);
2466 SpeculateStrictInt52Operand value(this, node->child1());
2467 FPRTemporary result(this);
2469 GPRReg valueGPR = value.gpr();
2470 FPRReg resultFPR = result.fpr();
2472 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2474 doubleResult(resultFPR, node);
2477 #endif // USE(JSVALUE64)
2480 RELEASE_ASSERT_NOT_REACHED();
2485 void SpeculativeJIT::compileValueRep(Node* node)
2487 switch (node->child1().useKind()) {
2488 case DoubleRepUse: {
2489 SpeculateDoubleOperand value(this, node->child1());
2490 JSValueRegsTemporary result(this);
2492 FPRReg valueFPR = value.fpr();
2493 JSValueRegs resultRegs = result.regs();
2495 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2496 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2497 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2498 // local was purified.
2499 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2500 m_jit.purifyNaN(valueFPR);
2502 boxDouble(valueFPR, resultRegs);
2504 jsValueResult(resultRegs, node);
2510 SpeculateStrictInt52Operand value(this, node->child1());
2511 GPRTemporary result(this);
2513 GPRReg valueGPR = value.gpr();
2514 GPRReg resultGPR = result.gpr();
2516 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2518 jsValueResult(resultGPR, node);
2521 #endif // USE(JSVALUE64)
2524 RELEASE_ASSERT_NOT_REACHED();
2529 static double clampDoubleToByte(double d)
2539 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2541 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2542 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2543 jit.xorPtr(result, result);
2544 MacroAssembler::Jump clamped = jit.jump();
2546 jit.move(JITCompiler::TrustedImm32(255), result);
2548 inBounds.link(&jit);
2551 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2553 // Unordered compare so we pick up NaN
2554 static const double zero = 0;
2555 static const double byteMax = 255;
2556 static const double half = 0.5;
2557 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2558 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2559 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2560 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2562 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2563 // FIXME: This should probably just use a floating point round!
2564 // https://bugs.webkit.org/show_bug.cgi?id=72054
2565 jit.addDouble(source, scratch);
2566 jit.truncateDoubleToInt32(scratch, result);
2567 MacroAssembler::Jump truncatedInt = jit.jump();
2569 tooSmall.link(&jit);
2570 jit.xorPtr(result, result);
2571 MacroAssembler::Jump zeroed = jit.jump();
2574 jit.move(JITCompiler::TrustedImm32(255), result);
2576 truncatedInt.link(&jit);
2581 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2583 if (node->op() == PutByValAlias)
2584 return JITCompiler::Jump();
2585 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2586 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2588 uint32_t length = view->length();
2589 Node* indexNode = m_jit.graph().child(node, 1).node();
2590 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2591 return JITCompiler::Jump();
2592 return m_jit.branch32(
2593 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2595 return m_jit.branch32(
2596 MacroAssembler::AboveOrEqual, indexGPR,
2597 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2600 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2602 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2605 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2608 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2610 ASSERT(isInt(type));
2612 SpeculateCellOperand base(this, node->child1());
2613 SpeculateStrictInt32Operand property(this, node->child2());
2614 StorageOperand storage(this, node->child3());
2616 GPRReg baseReg = base.gpr();
2617 GPRReg propertyReg = property.gpr();
2618 GPRReg storageReg = storage.gpr();
2620 GPRTemporary result(this);
2621 GPRReg resultReg = result.gpr();
2623 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2625 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2626 switch (elementSize(type)) {
2629 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2631 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2635 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2637 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2640 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2645 if (elementSize(type) < 4 || isSigned(type)) {
2646 int32Result(resultReg, node);
2650 ASSERT(elementSize(type) == 4 && !isSigned(type));
2651 if (node->shouldSpeculateInt32()) {
2652 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2653 int32Result(resultReg, node);
2658 if (node->shouldSpeculateAnyInt()) {
2659 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2660 strictInt52Result(resultReg, node);
2665 FPRTemporary fresult(this);
2666 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2667 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2668 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2669 positive.link(&m_jit);
2670 doubleResult(fresult.fpr(), node);
2673 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2675 ASSERT(isInt(type));
2677 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2678 GPRReg storageReg = storage.gpr();
2680 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2683 GPRReg valueGPR = InvalidGPRReg;
2685 if (valueUse->isConstant()) {
2686 JSValue jsValue = valueUse->asJSValue();
2687 if (!jsValue.isNumber()) {
2688 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2692 double d = jsValue.asNumber();
2693 if (isClamped(type)) {
2694 ASSERT(elementSize(type) == 1);
2695 d = clampDoubleToByte(d);
2697 GPRTemporary scratch(this);
2698 GPRReg scratchReg = scratch.gpr();
2699 m_jit.move(Imm32(toInt32(d)), scratchReg);
2700 value.adopt(scratch);
2701 valueGPR = scratchReg;
2703 switch (valueUse.useKind()) {
2705 SpeculateInt32Operand valueOp(this, valueUse);
2706 GPRTemporary scratch(this);
2707 GPRReg scratchReg = scratch.gpr();
2708 m_jit.move(valueOp.gpr(), scratchReg);
2709 if (isClamped(type)) {
2710 ASSERT(elementSize(type) == 1);
2711 compileClampIntegerToByte(m_jit, scratchReg);
2713 value.adopt(scratch);
2714 valueGPR = scratchReg;
2720 SpeculateStrictInt52Operand valueOp(this, valueUse);
2721 GPRTemporary scratch(this);
2722 GPRReg scratchReg = scratch.gpr();
2723 m_jit.move(valueOp.gpr(), scratchReg);
2724 if (isClamped(type)) {
2725 ASSERT(elementSize(type) == 1);
2726 MacroAssembler::Jump inBounds = m_jit.branch64(
2727 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2728 MacroAssembler::Jump tooBig = m_jit.branch64(
2729 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2730 m_jit.move(TrustedImm32(0), scratchReg);
2731 MacroAssembler::Jump clamped = m_jit.jump();
2732 tooBig.link(&m_jit);
2733 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2734 clamped.link(&m_jit);
2735 inBounds.link(&m_jit);
2737 value.adopt(scratch);
2738 valueGPR = scratchReg;
2741 #endif // USE(JSVALUE64)
2743 case DoubleRepUse: {
2744 if (isClamped(type)) {
2745 ASSERT(elementSize(type) == 1);
2746 SpeculateDoubleOperand valueOp(this, valueUse);
2747 GPRTemporary result(this);
2748 FPRTemporary floatScratch(this);
2749 FPRReg fpr = valueOp.fpr();
2750 GPRReg gpr = result.gpr();
2751 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2752 value.adopt(result);
2755 SpeculateDoubleOperand valueOp(this, valueUse);
2756 GPRTemporary result(this);
2757 FPRReg fpr = valueOp.fpr();
2758 GPRReg gpr = result.gpr();
2759 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2760 m_jit.xorPtr(gpr, gpr);
2761 MacroAssembler::Jump fixed = m_jit.jump();
2762 notNaN.link(&m_jit);
2764 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2765 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2767 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2770 value.adopt(result);
2777 RELEASE_ASSERT_NOT_REACHED();
2782 ASSERT_UNUSED(valueGPR, valueGPR != property);
2783 ASSERT(valueGPR != base);
2784 ASSERT(valueGPR != storageReg);
2785 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2786 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2787 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2788 outOfBounds = MacroAssembler::Jump();
2791 switch (elementSize(type)) {
2793 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2796 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2799 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2804 if (outOfBounds.isSet())
2805 outOfBounds.link(&m_jit);
2809 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2811 ASSERT(isFloat(type));
2813 SpeculateCellOperand base(this, node->child1());
2814 SpeculateStrictInt32Operand property(this, node->child2());
2815 StorageOperand storage(this, node->child3());
2817 GPRReg baseReg = base.gpr();
2818 GPRReg propertyReg = property.gpr();
2819 GPRReg storageReg = storage.gpr();
2821 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2823 FPRTemporary result(this);
2824 FPRReg resultReg = result.fpr();
2825 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2826 switch (elementSize(type)) {
2828 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2829 m_jit.convertFloatToDouble(resultReg, resultReg);
2832 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2836 RELEASE_ASSERT_NOT_REACHED();
2839 doubleResult(resultReg, node);
2842 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2844 ASSERT(isFloat(type));
2846 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2847 GPRReg storageReg = storage.gpr();
2849 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2850 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2852 SpeculateDoubleOperand valueOp(this, valueUse);
2853 FPRTemporary scratch(this);
2854 FPRReg valueFPR = valueOp.fpr();
2855 FPRReg scratchFPR = scratch.fpr();
2857 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2859 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2860 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2861 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2862 outOfBounds = MacroAssembler::Jump();
2865 switch (elementSize(type)) {
2867 m_jit.moveDouble(valueFPR, scratchFPR);
2868 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2869 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2873 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2876 RELEASE_ASSERT_NOT_REACHED();
2878 if (outOfBounds.isSet())
2879 outOfBounds.link(&m_jit);
2883 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2885 // Check that prototype is an object.
2886 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2888 // Initialize scratchReg with the value being checked.
2889 m_jit.move(valueReg, scratchReg);
2891 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2892 MacroAssembler::Label loop(&m_jit);
2893 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2894 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2895 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2896 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2897 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2899 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2901 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2904 // No match - result is false.
2906 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2908 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2910 MacroAssembler::JumpList doneJumps;
2911 doneJumps.append(m_jit.jump());
2913 performDefaultHasInstance.link(&m_jit);
2914 silentSpillAllRegisters(scratchReg);
2915 callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg);
2916 silentFillAllRegisters(scratchReg);
2917 m_jit.exceptionCheck();
2919 m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2921 doneJumps.append(m_jit.jump());
2923 isInstance.link(&m_jit);
2925 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2927 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2930 doneJumps.link(&m_jit);
2933 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2935 SpeculateCellOperand base(this, node->child1());
2937 GPRReg baseGPR = base.gpr();
2939 speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2944 void SpeculativeJIT::compileInstanceOf(Node* node)
2946 if (node->child1().useKind() == UntypedUse) {
2947 // It might not be a cell. Speculate less aggressively.
2948 // Or: it might only be used once (i.e. by us), so we get zero benefit
2949 // from speculating any more aggressively than we absolutely need to.
2951 JSValueOperand value(this, node->child1());
2952 SpeculateCellOperand prototype(this, node->child2());
2953 GPRTemporary scratch(this);
2954 GPRTemporary scratch2(this);
2956 GPRReg prototypeReg = prototype.gpr();
2957 GPRReg scratchReg = scratch.gpr();
2958 GPRReg scratch2Reg = scratch2.gpr();
2960 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2961 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2962 moveFalseTo(scratchReg);
2964 MacroAssembler::Jump done = m_jit.jump();
2966 isCell.link(&m_jit);
2968 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2972 blessedBooleanResult(scratchReg, node);
2976 SpeculateCellOperand value(this, node->child1());
2977 SpeculateCellOperand prototype(this, node->child2());
2979 GPRTemporary scratch(this);
2980 GPRTemporary scratch2(this);
2982 GPRReg valueReg = value.gpr();
2983 GPRReg prototypeReg = prototype.gpr();
2984 GPRReg scratchReg = scratch.gpr();
2985 GPRReg scratch2Reg = scratch2.gpr();
2987 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2989 blessedBooleanResult(scratchReg, node);
2992 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2993 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2995 Edge& leftChild = node->child1();
2996 Edge& rightChild = node->child2();
2998 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2999 JSValueOperand left(this, leftChild);
3000 JSValueOperand right(this, rightChild);
3001 JSValueRegs leftRegs = left.jsValueRegs();
3002 JSValueRegs rightRegs = right.jsValueRegs();
3004 GPRTemporary result(this);
3005 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3007 GPRTemporary resultTag(this);
3008 GPRTemporary resultPayload(this);
3009 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3012 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3013 m_jit.exceptionCheck();
3015 jsValueResult(resultRegs, node);
3019 Optional<JSValueOperand> left;
3020 Optional<JSValueOperand> right;
3022 JSValueRegs leftRegs;
3023 JSValueRegs rightRegs;
3026 GPRTemporary result(this);
3027 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3028 GPRTemporary scratch(this);
3029 GPRReg scratchGPR = scratch.gpr();
3031 GPRTemporary resultTag(this);
3032 GPRTemporary resultPayload(this);
3033 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3034 GPRReg scratchGPR = resultTag.gpr();
3037 SnippetOperand leftOperand;
3038 SnippetOperand rightOperand;
3040 // The snippet generator does not support both operands being constant. If the left
3041 // operand is already const, we'll ignore the right operand's constness.
3042 if (leftChild->isInt32Constant())
3043 leftOperand.setConstInt32(leftChild->asInt32());
3044 else if (rightChild->isInt32Constant())
3045 rightOperand.setConstInt32(rightChild->asInt32());
3047 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3049 if (!leftOperand.isConst()) {
3050 left = JSValueOperand(this, leftChild);
3051 leftRegs = left->jsValueRegs();
3053 if (!rightOperand.isConst()) {
3054 right = JSValueOperand(this, rightChild);
3055 rightRegs = right->jsValueRegs();
3058 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3059 gen.generateFastPath(m_jit);
3061 ASSERT(gen.didEmitFastPath());
3062 gen.endJumpList().append(m_jit.jump());
3064 gen.slowPathJumpList().link(&m_jit);
3065 silentSpillAllRegisters(resultRegs);
3067 if (leftOperand.isConst()) {
3068 leftRegs = resultRegs;
3069 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3070 } else if (rightOperand.isConst()) {
3071 rightRegs = resultRegs;
3072 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3075 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3077 silentFillAllRegisters(resultRegs);
3078 m_jit.exceptionCheck();
3080 gen.endJumpList().link(&m_jit);
3081 jsValueResult(resultRegs, node);
3084 void SpeculativeJIT::compileBitwiseOp(Node* node)
3086 NodeType op = node->op();
3087 Edge& leftChild = node->child1();
3088 Edge& rightChild = node->child2();
3090 if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3093 emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3096 emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3099 emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3102 RELEASE_ASSERT_NOT_REACHED();
3106 if (leftChild->isInt32Constant()) {
3107 SpeculateInt32Operand op2(this, rightChild);
3108 GPRTemporary result(this, Reuse, op2);
3110 bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3112 int32Result(result.gpr(), node);
3114 } else if (rightChild->isInt32Constant()) {
3115 SpeculateInt32Operand op1(this, leftChild);
3116 GPRTemporary result(this, Reuse, op1);
3118 bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3120 int32Result(result.gpr(), node);
3123 SpeculateInt32Operand op1(this, leftChild);
3124 SpeculateInt32Operand op2(this, rightChild);
3125 GPRTemporary result(this, Reuse, op1, op2);
3127 GPRReg reg1 = op1.gpr();
3128 GPRReg reg2 = op2.gpr();
3129 bitOp(op, reg1, reg2, result.gpr());
3131 int32Result(result.gpr(), node);
3135 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3137 J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3138 ? operationValueBitRShift : operationValueBitURShift;
3139 JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3140 ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3142 Edge& leftChild = node->child1();
3143 Edge& rightChild = node->child2();
3145 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3146 JSValueOperand left(this, leftChild);
3147 JSValueOperand right(this, rightChild);
3148 JSValueRegs leftRegs = left.jsValueRegs();
3149 JSValueRegs rightRegs = right.jsValueRegs();
3151 GPRTemporary result(this);
3152 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3154 GPRTemporary resultTag(this);
3155 GPRTemporary resultPayload(this);
3156 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3159 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3160 m_jit.exceptionCheck();
3162 jsValueResult(resultRegs, node);
3166 Optional<JSValueOperand> left;
3167 Optional<JSValueOperand> right;
3169 JSValueRegs leftRegs;
3170 JSValueRegs rightRegs;
3172 FPRTemporary leftNumber(this);
3173 FPRReg leftFPR = leftNumber.fpr();
3176 GPRTemporary result(this);
3177 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3178 GPRTemporary scratch(this);
3179 GPRReg scratchGPR = scratch.gpr();
3180 FPRReg scratchFPR = InvalidFPRReg;
3182 GPRTemporary resultTag(this);
3183 GPRTemporary resultPayload(this);
3184 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3185 GPRReg scratchGPR = resultTag.gpr();
3186 FPRTemporary fprScratch(this);
3187 FPRReg scratchFPR = fprScratch.fpr();
3190 SnippetOperand leftOperand;
3191 SnippetOperand rightOperand;
3193 // The snippet generator does not support both operands being constant. If the left
3194 // operand is already const, we'll ignore the right operand's constness.
3195 if (leftChild->isInt32Constant())
3196 leftOperand.setConstInt32(leftChild->asInt32());
3197 else if (rightChild->isInt32Constant())
3198 rightOperand.setConstInt32(rightChild->asInt32());
3200 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3202 if (!leftOperand.isConst()) {
3203 left = JSValueOperand(this, leftChild);
3204 leftRegs = left->jsValueRegs();
3206 if (!rightOperand.isConst()) {
3207 right = JSValueOperand(this, rightChild);
3208 rightRegs = right->jsValueRegs();
3211 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3212 leftFPR, scratchGPR, scratchFPR, shiftType);
3213 gen.generateFastPath(m_jit);
3215 ASSERT(gen.didEmitFastPath());
3216 gen.endJumpList().append(m_jit.jump());
3218 gen.slowPathJumpList().link(&m_jit);
3219 silentSpillAllRegisters(resultRegs);
3221 if (leftOperand.isConst()) {
3222 leftRegs = resultRegs;
3223 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3224 } else if (rightOperand.isConst()) {
3225 rightRegs = resultRegs;
3226 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3229 callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3231 silentFillAllRegisters(resultRegs);
3232 m_jit.exceptionCheck();
3234 gen.endJumpList().link(&m_jit);
3235 jsValueResult(resultRegs, node);
3239 void SpeculativeJIT::compileShiftOp(Node* node)
3241 NodeType op = node->op();
3242 Edge& leftChild = node->child1();
3243 Edge& rightChild = node->child2();
3245 if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3248 emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3252 emitUntypedRightShiftBitOp(node);
3255 RELEASE_ASSERT_NOT_REACHED();
3259 if (rightChild->isInt32Constant()) {
3260 SpeculateInt32Operand op1(this, leftChild);
3261 GPRTemporary result(this, Reuse, op1);
3263 shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3265 int32Result(result.gpr(), node);
3267 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3268 SpeculateInt32Operand op1(this, leftChild);
3269 SpeculateInt32Operand op2(this, rightChild);
3270 GPRTemporary result(this, Reuse, op1);
3272 GPRReg reg1 = op1.gpr();
3273 GPRReg reg2 = op2.gpr();
3274 shiftOp(op, reg1, reg2, result.gpr());
3276 int32Result(result.gpr(), node);
3280 void SpeculativeJIT::compileValueAdd(Node* node)
3282 Edge& leftChild = node->child1();
3283 Edge& rightChild = node->child2();
3285 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3286 JSValueOperand left(this, leftChild);
3287 JSValueOperand right(this, rightChild);
3288 JSValueRegs leftRegs = left.jsValueRegs();
3289 JSValueRegs rightRegs = right.jsValueRegs();
3291 GPRTemporary result(this);
3292 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3294 GPRTemporary resultTag(this);
3295 GPRTemporary resultPayload(this);
3296 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3299 callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3300 m_jit.exceptionCheck();
3302 jsValueResult(resultRegs, node);
3306 Optional<JSValueOperand> left;
3307 Optional<JSValueOperand> right;
3309 JSValueRegs leftRegs;
3310 JSValueRegs rightRegs;
3312 FPRTemporary leftNumber(this);
3313 FPRTemporary rightNumber(this);
3314 FPRReg leftFPR = leftNumber.fpr();
3315 FPRReg rightFPR = rightNumber.fpr();
3318 GPRTemporary result(this);
3319 JSValueRegs resultRegs = JSValueRegs(result.gpr());
3320 GPRTemporary scratch(this);
3321 GPRReg scratchGPR = scratch.gpr();
3322 FPRReg scratchFPR = InvalidFPRReg;
3324 GPRTemporary resultTag(this);
3325 GPRTemporary resultPayload(this);
3326 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3327 GPRReg scratchGPR = resultTag.gpr();
3328 FPRTemporary fprScratch(this);
3329 FPRReg scratchFPR = fprScratch.fpr();
3332 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3333 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3335 // The snippet generator does not support both operands being constant. If the left
3336 // operand is already const, we'll ignore the right operand's constness.
3337 if (leftChild->isInt32Constant())
3338 leftOperand.setConstInt32(leftChild->asInt32());
3339 else if (rightChild->isInt32Constant())
3340 rightOperand.setConstInt32(rightChild->asInt32());
3342 ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3344 if (!leftOperand.isConst()) {
3345 left = JSValueOperand(this, leftChild);
3346 leftRegs = left->jsValueRegs();
3348 if (!rightOperand.isConst()) {
3349 right = JSValueOperand(this, rightChild);
3350 rightRegs = right->jsValueRegs();
3353 JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftR