2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSnippetParams.h"
42 #include "DirectArguments.h"
43 #include "JITAddGenerator.h"
44 #include "JITBitAndGenerator.h"
45 #include "JITBitOrGenerator.h"
46 #include "JITBitXorGenerator.h"
47 #include "JITDivGenerator.h"
48 #include "JITLeftShiftGenerator.h"
49 #include "JITMulGenerator.h"
50 #include "JITRightShiftGenerator.h"
51 #include "JITSubGenerator.h"
52 #include "JSAsyncFunction.h"
53 #include "JSAsyncGeneratorFunction.h"
54 #include "JSCInlines.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "LinkBuffer.h"
59 #include "RegExpConstructor.h"
60 #include "ScopedArguments.h"
61 #include "ScratchRegisterAllocator.h"
62 #include "SuperSampler.h"
63 #include <wtf/BitVector.h>
65 #include <wtf/MathExtras.h>
67 namespace JSC { namespace DFG {
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
73 , m_lastGeneratedNode(LastNodeType)
75 , m_generationInfo(m_jit.graph().frameRegisterCount())
76 , m_state(m_jit.graph())
77 , m_interpreter(m_jit.graph(), m_state)
78 , m_stream(&jit.jitCode()->variableEventStream)
79 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
83 SpeculativeJIT::~SpeculativeJIT()
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
89 IndexingType indexingType = structure->indexingType();
90 bool hasIndexingHeader = hasIndexedProperties(indexingType);
92 unsigned inlineCapacity = structure->inlineCapacity();
93 unsigned outOfLineCapacity = structure->outOfLineCapacity();
95 GPRTemporary scratch(this);
96 GPRTemporary scratch2(this);
97 GPRReg scratchGPR = scratch.gpr();
98 GPRReg scratch2GPR = scratch2.gpr();
100 ASSERT(vectorLength >= numElements);
101 vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
103 JITCompiler::JumpList slowCases;
106 if (hasIndexingHeader)
107 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108 size += outOfLineCapacity * sizeof(JSValue);
110 m_jit.move(TrustedImmPtr(0), storageGPR);
113 if (MarkedAllocator* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorFor(size)) {
114 m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115 m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
118 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
121 if (hasIndexingHeader)
122 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
124 slowCases.append(m_jit.jump());
127 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128 MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
130 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131 emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
134 slowCases.append(m_jit.jump());
136 // I want a slow path that also loads out the storage pointer, and that's
137 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138 // of work for a very small piece of functionality. :-/
139 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141 structure, vectorLength));
143 if (numElements < vectorLength && LIKELY(!hasUndecided(structure->indexingType()))) {
145 if (hasDouble(structure->indexingType()))
146 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
148 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149 for (unsigned i = numElements; i < vectorLength; ++i)
150 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
152 EncodedValueDescriptor value;
153 if (hasDouble(structure->indexingType()))
154 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
156 value.asInt64 = JSValue::encode(JSValue());
157 for (unsigned i = numElements; i < vectorLength; ++i) {
158 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
164 if (hasIndexingHeader)
165 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
167 m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
169 m_jit.mutatorFence(*m_jit.vm());
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
174 if (inlineCallFrame && !inlineCallFrame->isVarargs())
175 m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
177 VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
180 m_jit.sub32(TrustedImm32(1), lengthGPR);
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
186 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
191 if (origin.inlineCallFrame) {
192 if (origin.inlineCallFrame->isClosureCall) {
194 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
198 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
202 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
209 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210 GPRInfo::callFrameRegister, startGPR);
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
215 if (!Options::useOSRExitFuzz()
216 || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217 || !doOSRExitFuzzing())
218 return MacroAssembler::Jump();
220 MacroAssembler::Jump result;
222 m_jit.pushToSave(GPRInfo::regT0);
223 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227 unsigned at = Options::fireOSRExitFuzzAt();
228 if (at || atOrAfter) {
230 MacroAssembler::RelationalCondition condition;
232 threshold = atOrAfter;
233 condition = MacroAssembler::Below;
236 condition = MacroAssembler::NotEqual;
238 MacroAssembler::Jump ok = m_jit.branch32(
239 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240 m_jit.popToRestore(GPRInfo::regT0);
241 result = m_jit.jump();
244 m_jit.popToRestore(GPRInfo::regT0);
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
253 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254 if (fuzzJump.isSet()) {
255 JITCompiler::JumpList jumpsToFail;
256 jumpsToFail.append(fuzzJump);
257 jumpsToFail.append(jumpToFail);
258 m_jit.appendExitInfo(jumpsToFail);
260 m_jit.appendExitInfo(jumpToFail);
261 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
268 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269 if (fuzzJump.isSet()) {
270 JITCompiler::JumpList myJumpsToFail;
271 myJumpsToFail.append(jumpsToFail);
272 myJumpsToFail.append(fuzzJump);
273 m_jit.appendExitInfo(myJumpsToFail);
275 m_jit.appendExitInfo(jumpsToFail);
276 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
282 return OSRExitJumpPlaceholder();
283 unsigned index = m_jit.jitCode()->osrExit.size();
284 m_jit.appendExitInfo();
285 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286 return OSRExitJumpPlaceholder(index);
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
291 return speculationCheck(kind, jsValueSource, nodeUse.node());
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
296 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
301 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
308 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309 m_jit.appendExitInfo(jumpToFail);
310 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
315 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
322 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323 m_jit.jitCode()->appendOSRExit(OSRExit(
324 UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325 this, m_stream->size()));
326 info.m_replacementSource = m_jit.watchpointLabel();
327 ASSERT(info.m_replacementSource.isSet());
331 void SpeculativeJIT::unreachable(Node* node)
333 m_compileOkay = false;
334 m_jit.abortWithReason(DFGUnreachableNode, node->op());
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
341 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342 m_compileOkay = false;
343 if (verboseCompilationEnabled())
344 dataLog("Bailing compilation.\n");
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
349 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
354 ASSERT(needsTypeCheck(edge, typesPassedThrough));
355 m_interpreter.filter(edge, typesPassedThrough);
356 speculationCheck(exitKind, source, edge.node(), jumpToFail);
359 RegisterSet SpeculativeJIT::usedRegisters()
363 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364 GPRReg gpr = GPRInfo::toRegister(i);
365 if (m_gprs.isInUse(gpr))
368 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369 FPRReg fpr = FPRInfo::toRegister(i);
370 if (m_fprs.isInUse(fpr))
374 result.merge(RegisterSet::stubUnavailableRegisters());
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
381 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
386 m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
391 for (auto& slowPathGenerator : m_slowPathGenerators) {
392 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393 slowPathGenerator->generate(this);
395 for (auto& slowPathLambda : m_slowPathLambdas) {
396 Node* currentNode = slowPathLambda.currentNode;
397 m_currentNode = currentNode;
398 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400 slowPathLambda.generator();
401 m_outOfLineStreamIndex = std::nullopt;
405 void SpeculativeJIT::clearGenerationInfo()
407 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408 m_generationInfo[i] = GenerationInfo();
409 m_gprs = RegisterBank<GPRInfo>();
410 m_fprs = RegisterBank<FPRInfo>();
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
415 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416 Node* node = info.node();
417 DataFormat registerFormat = info.registerFormat();
418 ASSERT(registerFormat != DataFormatNone);
419 ASSERT(registerFormat != DataFormatDouble);
421 SilentSpillAction spillAction;
422 SilentFillAction fillAction;
424 if (!info.needsSpill())
425 spillAction = DoNothingForSpill;
428 ASSERT(info.gpr() == source);
429 if (registerFormat == DataFormatInt32)
430 spillAction = Store32Payload;
431 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432 spillAction = StorePtr;
433 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434 spillAction = Store64;
436 ASSERT(registerFormat & DataFormatJS);
437 spillAction = Store64;
439 #elif USE(JSVALUE32_64)
440 if (registerFormat & DataFormatJS) {
441 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
444 ASSERT(info.gpr() == source);
445 spillAction = Store32Payload;
450 if (registerFormat == DataFormatInt32) {
451 ASSERT(info.gpr() == source);
452 ASSERT(isJSInt32(info.registerFormat()));
453 if (node->hasConstant()) {
454 ASSERT(node->isInt32Constant());
455 fillAction = SetInt32Constant;
457 fillAction = Load32Payload;
458 } else if (registerFormat == DataFormatBoolean) {
460 RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462 fillAction = DoNothingForFill;
464 #elif USE(JSVALUE32_64)
465 ASSERT(info.gpr() == source);
466 if (node->hasConstant()) {
467 ASSERT(node->isBooleanConstant());
468 fillAction = SetBooleanConstant;
470 fillAction = Load32Payload;
472 } else if (registerFormat == DataFormatCell) {
473 ASSERT(info.gpr() == source);
474 if (node->hasConstant()) {
475 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476 node->asCell(); // To get the assertion.
477 fillAction = SetCellConstant;
480 fillAction = LoadPtr;
482 fillAction = Load32Payload;
485 } else if (registerFormat == DataFormatStorage) {
486 ASSERT(info.gpr() == source);
487 fillAction = LoadPtr;
488 } else if (registerFormat == DataFormatInt52) {
489 if (node->hasConstant())
490 fillAction = SetInt52Constant;
491 else if (info.spillFormat() == DataFormatInt52)
493 else if (info.spillFormat() == DataFormatStrictInt52)
494 fillAction = Load64ShiftInt52Left;
495 else if (info.spillFormat() == DataFormatNone)
498 RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500 fillAction = Load64; // Make GCC happy.
503 } else if (registerFormat == DataFormatStrictInt52) {
504 if (node->hasConstant())
505 fillAction = SetStrictInt52Constant;
506 else if (info.spillFormat() == DataFormatInt52)
507 fillAction = Load64ShiftInt52Right;
508 else if (info.spillFormat() == DataFormatStrictInt52)
510 else if (info.spillFormat() == DataFormatNone)
513 RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515 fillAction = Load64; // Make GCC happy.
519 ASSERT(registerFormat & DataFormatJS);
521 ASSERT(info.gpr() == source);
522 if (node->hasConstant()) {
523 if (node->isCellConstant())
524 fillAction = SetTrustedJSConstant;
526 fillAction = SetJSConstant;
527 } else if (info.spillFormat() == DataFormatInt32) {
528 ASSERT(registerFormat == DataFormatJSInt32);
529 fillAction = Load32PayloadBoxInt;
533 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534 if (node->hasConstant())
535 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536 else if (info.payloadGPR() == source)
537 fillAction = Load32Payload;
538 else { // Fill the Tag
539 switch (info.spillFormat()) {
540 case DataFormatInt32:
541 ASSERT(registerFormat == DataFormatJSInt32);
542 fillAction = SetInt32Tag;
545 ASSERT(registerFormat == DataFormatJSCell);
546 fillAction = SetCellTag;
548 case DataFormatBoolean:
549 ASSERT(registerFormat == DataFormatJSBoolean);
550 fillAction = SetBooleanTag;
553 fillAction = Load32Tag;
560 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
565 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566 Node* node = info.node();
567 ASSERT(info.registerFormat() == DataFormatDouble);
569 SilentSpillAction spillAction;
570 SilentFillAction fillAction;
572 if (!info.needsSpill())
573 spillAction = DoNothingForSpill;
575 ASSERT(!node->hasConstant());
576 ASSERT(info.spillFormat() == DataFormatNone);
577 ASSERT(info.fpr() == source);
578 spillAction = StoreDouble;
582 if (node->hasConstant()) {
583 node->asNumber(); // To get the assertion.
584 fillAction = SetDoubleConstant;
586 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587 fillAction = LoadDouble;
589 #elif USE(JSVALUE32_64)
590 ASSERT(info.registerFormat() == DataFormatDouble);
591 if (node->hasConstant()) {
592 node->asNumber(); // To get the assertion.
593 fillAction = SetDoubleConstant;
595 fillAction = LoadDouble;
598 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
603 switch (plan.spillAction()) {
604 case DoNothingForSpill:
607 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
610 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
613 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
617 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
621 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
624 RELEASE_ASSERT_NOT_REACHED();
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
630 switch (plan.fillAction()) {
631 case DoNothingForFill:
633 case SetInt32Constant:
634 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
637 case SetInt52Constant:
638 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
640 case SetStrictInt52Constant:
641 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
643 #endif // USE(JSVALUE64)
644 case SetBooleanConstant:
645 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
647 case SetCellConstant:
648 ASSERT(plan.node()->constant()->value().isCell());
649 m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
652 case SetTrustedJSConstant:
653 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
656 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
658 case SetDoubleConstant:
659 m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
661 case Load32PayloadBoxInt:
662 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
663 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
665 case Load32PayloadConvertToInt52:
666 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
668 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
670 case Load32PayloadSignExtend:
671 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
672 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
675 case SetJSConstantTag:
676 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
678 case SetJSConstantPayload:
679 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
682 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
685 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
688 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
690 case SetDoubleConstant:
691 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
695 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
698 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
701 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
705 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
707 case Load64ShiftInt52Right:
708 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
709 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
711 case Load64ShiftInt52Left:
712 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
717 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
720 RELEASE_ASSERT_NOT_REACHED();
724 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
726 switch (arrayMode.arrayClass()) {
727 case Array::OriginalArray: {
729 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
730 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
736 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
737 return m_jit.branch32(
738 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
740 case Array::NonArray:
741 case Array::OriginalNonArray:
742 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
743 return m_jit.branch32(
744 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
746 case Array::PossiblyArray:
747 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
748 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
751 RELEASE_ASSERT_NOT_REACHED();
752 return JITCompiler::Jump();
755 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
757 JITCompiler::JumpList result;
759 switch (arrayMode.type()) {
762 case Array::Contiguous:
763 case Array::Undecided:
764 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
766 case Array::ArrayStorage:
767 case Array::SlowPutArrayStorage: {
768 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
770 if (arrayMode.isJSArray()) {
771 if (arrayMode.isSlowPut()) {
774 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
775 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
776 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
779 MacroAssembler::Above, tempGPR,
780 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
783 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
785 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
788 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
789 if (arrayMode.isSlowPut()) {
790 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
793 MacroAssembler::Above, tempGPR,
794 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
798 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
809 void SpeculativeJIT::checkArray(Node* node)
811 ASSERT(node->arrayMode().isSpecific());
812 ASSERT(!node->arrayMode().doesConversion());
814 SpeculateCellOperand base(this, node->child1());
815 GPRReg baseReg = base.gpr();
817 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
818 noResult(m_currentNode);
822 const ClassInfo* expectedClassInfo = 0;
824 switch (node->arrayMode().type()) {
825 case Array::AnyTypedArray:
827 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
831 case Array::Contiguous:
832 case Array::Undecided:
833 case Array::ArrayStorage:
834 case Array::SlowPutArrayStorage: {
835 GPRTemporary temp(this);
836 GPRReg tempGPR = temp.gpr();
837 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
839 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
840 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
842 noResult(m_currentNode);
845 case Array::DirectArguments:
846 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
847 noResult(m_currentNode);
849 case Array::ScopedArguments:
850 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
851 noResult(m_currentNode);
854 speculateCellTypeWithoutTypeFiltering(
855 node->child1(), baseReg,
856 typeForTypedArrayType(node->arrayMode().typedArrayType()));
857 noResult(m_currentNode);
861 RELEASE_ASSERT(expectedClassInfo);
863 GPRTemporary temp(this);
864 GPRTemporary temp2(this);
865 m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
867 BadType, JSValueSource::unboxedCell(baseReg), node,
869 MacroAssembler::NotEqual,
870 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
871 TrustedImmPtr(expectedClassInfo)));
873 noResult(m_currentNode);
876 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
878 ASSERT(node->arrayMode().doesConversion());
880 GPRTemporary temp(this);
881 GPRTemporary structure;
882 GPRReg tempGPR = temp.gpr();
883 GPRReg structureGPR = InvalidGPRReg;
885 if (node->op() != ArrayifyToStructure) {
886 GPRTemporary realStructure(this);
887 structure.adopt(realStructure);
888 structureGPR = structure.gpr();
891 // We can skip all that comes next if we already have array storage.
892 MacroAssembler::JumpList slowPath;
894 if (node->op() == ArrayifyToStructure) {
895 slowPath.append(m_jit.branchWeakStructure(
896 JITCompiler::NotEqual,
897 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
901 MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
903 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
906 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
907 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
909 noResult(m_currentNode);
912 void SpeculativeJIT::arrayify(Node* node)
914 ASSERT(node->arrayMode().isSpecific());
916 SpeculateCellOperand base(this, node->child1());
918 if (!node->child2()) {
919 arrayify(node, base.gpr(), InvalidGPRReg);
923 SpeculateInt32Operand property(this, node->child2());
925 arrayify(node, base.gpr(), property.gpr());
928 GPRReg SpeculativeJIT::fillStorage(Edge edge)
930 VirtualRegister virtualRegister = edge->virtualRegister();
931 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
933 switch (info.registerFormat()) {
934 case DataFormatNone: {
935 if (info.spillFormat() == DataFormatStorage) {
936 GPRReg gpr = allocate();
937 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
938 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
939 info.fillStorage(*m_stream, gpr);
943 // Must be a cell; fill it as a cell and then return the pointer.
944 return fillSpeculateCell(edge);
947 case DataFormatStorage: {
948 GPRReg gpr = info.gpr();
954 return fillSpeculateCell(edge);
958 void SpeculativeJIT::useChildren(Node* node)
960 if (node->flags() & NodeHasVarArgs) {
961 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
962 if (!!m_jit.graph().m_varArgChildren[childIdx])
963 use(m_jit.graph().m_varArgChildren[childIdx]);
966 Edge child1 = node->child1();
968 ASSERT(!node->child2() && !node->child3());
973 Edge child2 = node->child2();
975 ASSERT(!node->child3());
980 Edge child3 = node->child3();
987 void SpeculativeJIT::compileTryGetById(Node* node)
989 switch (node->child1().useKind()) {
991 SpeculateCellOperand base(this, node->child1());
992 JSValueRegsTemporary result(this, Reuse, base);
994 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
995 JSValueRegs resultRegs = result.regs();
999 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1001 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1006 JSValueOperand base(this, node->child1());
1007 JSValueRegsTemporary result(this, Reuse, base);
1009 JSValueRegs baseRegs = base.jsValueRegs();
1010 JSValueRegs resultRegs = result.regs();
1014 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1016 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1018 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1023 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1028 void SpeculativeJIT::compileIn(Node* node)
1030 SpeculateCellOperand base(this, node->child1());
1031 GPRReg baseGPR = base.gpr();
1033 if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1034 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1035 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1037 GPRTemporary result(this);
1038 GPRReg resultGPR = result.gpr();
1040 use(node->child2());
1042 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1043 MacroAssembler::Label done = m_jit.label();
1045 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1046 // we can cast it to const AtomicStringImpl* safely.
1047 auto slowPath = slowPathCall(
1048 jump.m_jump, this, operationInOptimize,
1049 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1050 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1052 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1053 stubInfo->codeOrigin = node->origin.semantic;
1054 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1055 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1056 stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1057 #if USE(JSVALUE32_64)
1058 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1059 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1060 stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1062 stubInfo->patch.usedRegisters = usedRegisters();
1064 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1065 addSlowPathGenerator(WTFMove(slowPath));
1069 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1074 JSValueOperand key(this, node->child2());
1075 JSValueRegs regs = key.jsValueRegs();
1077 GPRFlushedCallResult result(this);
1078 GPRReg resultGPR = result.gpr();
1085 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1087 m_jit.exceptionCheck();
1088 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1091 void SpeculativeJIT::compileDeleteById(Node* node)
1093 JSValueOperand value(this, node->child1());
1094 GPRFlushedCallResult result(this);
1096 JSValueRegs valueRegs = value.jsValueRegs();
1097 GPRReg resultGPR = result.gpr();
1102 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1103 m_jit.exceptionCheck();
1105 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1108 void SpeculativeJIT::compileDeleteByVal(Node* node)
1110 JSValueOperand base(this, node->child1());
1111 JSValueOperand key(this, node->child2());
1112 GPRFlushedCallResult result(this);
1114 JSValueRegs baseRegs = base.jsValueRegs();
1115 JSValueRegs keyRegs = key.jsValueRegs();
1116 GPRReg resultGPR = result.gpr();
1122 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1123 m_jit.exceptionCheck();
1125 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1128 void SpeculativeJIT::compilePushWithScope(Node* node)
1130 SpeculateCellOperand currentScope(this, node->child1());
1131 GPRReg currentScopeGPR = currentScope.gpr();
1133 GPRFlushedCallResult result(this);
1134 GPRReg resultGPR = result.gpr();
1136 auto objectEdge = node->child2();
1137 if (objectEdge.useKind() == ObjectUse) {
1138 SpeculateCellOperand object(this, objectEdge);
1139 GPRReg objectGPR = object.gpr();
1140 speculateObject(objectEdge, objectGPR);
1143 callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1144 // No exception check here as we did not have to call toObject().
1146 ASSERT(objectEdge.useKind() == UntypedUse);
1147 JSValueOperand object(this, objectEdge);
1148 JSValueRegs objectRegs = object.jsValueRegs();
1151 callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1152 m_jit.exceptionCheck();
1155 cellResult(resultGPR, node);
1158 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1160 unsigned branchIndexInBlock = detectPeepHoleBranch();
1161 if (branchIndexInBlock != UINT_MAX) {
1162 Node* branchNode = m_block->at(branchIndexInBlock);
1164 ASSERT(node->adjustedRefCount() == 1);
1166 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1168 m_indexInBlock = branchIndexInBlock;
1169 m_currentNode = branchNode;
1174 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1179 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1181 unsigned branchIndexInBlock = detectPeepHoleBranch();
1182 if (branchIndexInBlock != UINT_MAX) {
1183 Node* branchNode = m_block->at(branchIndexInBlock);
1185 ASSERT(node->adjustedRefCount() == 1);
1187 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1189 m_indexInBlock = branchIndexInBlock;
1190 m_currentNode = branchNode;
1195 nonSpeculativeNonPeepholeStrictEq(node, invert);
1200 static const char* dataFormatString(DataFormat format)
1202 // These values correspond to the DataFormat enum.
1203 const char* strings[] = {
1221 return strings[format];
1224 void SpeculativeJIT::dump(const char* label)
1227 dataLogF("<%s>\n", label);
1229 dataLogF(" gprs:\n");
1231 dataLogF(" fprs:\n");
1233 dataLogF(" VirtualRegisters:\n");
1234 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1235 GenerationInfo& info = m_generationInfo[i];
1237 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1239 dataLogF(" % 3d:[__][__]", i);
1240 if (info.registerFormat() == DataFormatDouble)
1241 dataLogF(":fpr%d\n", info.fpr());
1242 else if (info.registerFormat() != DataFormatNone
1243 #if USE(JSVALUE32_64)
1244 && !(info.registerFormat() & DataFormatJS)
1247 ASSERT(info.gpr() != InvalidGPRReg);
1248 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1253 dataLogF("</%s>\n", label);
1256 GPRTemporary::GPRTemporary()
1258 , m_gpr(InvalidGPRReg)
1262 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1264 , m_gpr(InvalidGPRReg)
1266 m_gpr = m_jit->allocate();
1269 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1271 , m_gpr(InvalidGPRReg)
1273 m_gpr = m_jit->allocate(specific);
1276 #if USE(JSVALUE32_64)
1277 GPRTemporary::GPRTemporary(
1278 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1280 , m_gpr(InvalidGPRReg)
1282 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1283 m_gpr = m_jit->reuse(op1.gpr(which));
1285 m_gpr = m_jit->allocate();
1287 #endif // USE(JSVALUE32_64)
1289 JSValueRegsTemporary::JSValueRegsTemporary() { }
1291 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1302 template<typename T>
1303 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1304 : m_gpr(jit, Reuse, operand)
1308 template<typename T>
1309 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1311 if (resultWord == PayloadWord) {
1312 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1313 m_tagGPR = GPRTemporary(jit);
1315 m_payloadGPR = GPRTemporary(jit);
1316 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1322 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1324 m_gpr = GPRTemporary(jit, Reuse, operand);
1327 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1329 if (jit->canReuse(operand.node())) {
1330 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1331 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1333 m_payloadGPR = GPRTemporary(jit);
1334 m_tagGPR = GPRTemporary(jit);
1339 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1341 JSValueRegs JSValueRegsTemporary::regs()
1344 return JSValueRegs(m_gpr.gpr());
1346 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1350 void GPRTemporary::adopt(GPRTemporary& other)
1353 ASSERT(m_gpr == InvalidGPRReg);
1354 ASSERT(other.m_jit);
1355 ASSERT(other.m_gpr != InvalidGPRReg);
1356 m_jit = other.m_jit;
1357 m_gpr = other.m_gpr;
1359 other.m_gpr = InvalidGPRReg;
1362 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1364 ASSERT(other.m_jit);
1365 ASSERT(other.m_fpr != InvalidFPRReg);
1366 m_jit = other.m_jit;
1367 m_fpr = other.m_fpr;
1369 other.m_jit = nullptr;
1372 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1374 , m_fpr(InvalidFPRReg)
1376 m_fpr = m_jit->fprAllocate();
1379 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1381 , m_fpr(InvalidFPRReg)
1383 if (m_jit->canReuse(op1.node()))
1384 m_fpr = m_jit->reuse(op1.fpr());
1386 m_fpr = m_jit->fprAllocate();
1389 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1391 , m_fpr(InvalidFPRReg)
1393 if (m_jit->canReuse(op1.node()))
1394 m_fpr = m_jit->reuse(op1.fpr());
1395 else if (m_jit->canReuse(op2.node()))
1396 m_fpr = m_jit->reuse(op2.fpr());
1397 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1398 m_fpr = m_jit->reuse(op1.fpr());
1400 m_fpr = m_jit->fprAllocate();
1403 #if USE(JSVALUE32_64)
1404 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1406 , m_fpr(InvalidFPRReg)
1408 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1409 m_fpr = m_jit->reuse(op1.fpr());
1411 m_fpr = m_jit->fprAllocate();
1415 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1417 BasicBlock* taken = branchNode->branchData()->taken.block;
1418 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1420 if (taken == nextBlock()) {
1421 condition = MacroAssembler::invert(condition);
1422 std::swap(taken, notTaken);
1425 SpeculateDoubleOperand op1(this, node->child1());
1426 SpeculateDoubleOperand op2(this, node->child2());
1428 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1432 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1434 BasicBlock* taken = branchNode->branchData()->taken.block;
1435 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1437 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1439 if (taken == nextBlock()) {
1440 condition = MacroAssembler::NotEqual;
1441 BasicBlock* tmp = taken;
1446 SpeculateCellOperand op1(this, node->child1());
1447 SpeculateCellOperand op2(this, node->child2());
1449 GPRReg op1GPR = op1.gpr();
1450 GPRReg op2GPR = op2.gpr();
1452 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1453 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1455 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1457 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1459 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1462 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1464 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1465 m_jit.branchIfNotObject(op1GPR));
1467 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1469 MacroAssembler::NonZero,
1470 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1471 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1473 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1475 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1476 m_jit.branchIfNotObject(op2GPR));
1478 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1480 MacroAssembler::NonZero,
1481 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1482 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1485 branchPtr(condition, op1GPR, op2GPR, taken);
1489 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1491 BasicBlock* taken = branchNode->branchData()->taken.block;
1492 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1494 // The branch instruction will branch to the taken block.
1495 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1496 if (taken == nextBlock()) {
1497 condition = JITCompiler::invert(condition);
1498 BasicBlock* tmp = taken;
1503 if (node->child1()->isInt32Constant()) {
1504 int32_t imm = node->child1()->asInt32();
1505 SpeculateBooleanOperand op2(this, node->child2());
1506 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1507 } else if (node->child2()->isInt32Constant()) {
1508 SpeculateBooleanOperand op1(this, node->child1());
1509 int32_t imm = node->child2()->asInt32();
1510 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1512 SpeculateBooleanOperand op1(this, node->child1());
1513 SpeculateBooleanOperand op2(this, node->child2());
1514 branch32(condition, op1.gpr(), op2.gpr(), taken);
1520 void SpeculativeJIT::compileStringSlice(Node* node)
1522 SpeculateCellOperand string(this, node->child1());
1523 GPRTemporary startIndex(this);
1524 GPRTemporary temp(this);
1525 GPRTemporary temp2(this);
1527 GPRReg stringGPR = string.gpr();
1528 GPRReg startIndexGPR = startIndex.gpr();
1529 GPRReg tempGPR = temp.gpr();
1530 GPRReg temp2GPR = temp2.gpr();
1532 speculateString(node->child1(), stringGPR);
1535 m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1537 emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1539 emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1541 m_jit.move(temp2GPR, tempGPR);
1544 CCallHelpers::JumpList doneCases;
1545 CCallHelpers::JumpList slowCases;
1547 auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1548 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1549 doneCases.append(m_jit.jump());
1551 nonEmptyCase.link(&m_jit);
1552 m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1553 slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1555 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1556 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1558 m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1560 // Load the character into scratchReg
1561 m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1562 auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1564 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1565 auto cont8Bit = m_jit.jump();
1567 is16Bit.link(&m_jit);
1568 m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1570 auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1572 // 8 bit string values don't need the isASCII check.
1573 cont8Bit.link(&m_jit);
1575 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1576 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1577 m_jit.loadPtr(tempGPR, tempGPR);
1579 addSlowPathGenerator(
1581 bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1583 addSlowPathGenerator(
1585 slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1587 doneCases.link(&m_jit);
1588 cellResult(tempGPR, node);
1591 void SpeculativeJIT::compileToLowerCase(Node* node)
1593 ASSERT(node->op() == ToLowerCase);
1594 SpeculateCellOperand string(this, node->child1());
1595 GPRTemporary temp(this);
1596 GPRTemporary index(this);
1597 GPRTemporary charReg(this);
1598 GPRTemporary length(this);
1600 GPRReg stringGPR = string.gpr();
1601 GPRReg tempGPR = temp.gpr();
1602 GPRReg indexGPR = index.gpr();
1603 GPRReg charGPR = charReg.gpr();
1604 GPRReg lengthGPR = length.gpr();
1606 speculateString(node->child1(), stringGPR);
1608 CCallHelpers::JumpList slowPath;
1610 m_jit.move(TrustedImmPtr(0), indexGPR);
1612 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1613 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1615 slowPath.append(m_jit.branchTest32(
1616 MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1617 MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1618 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1619 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1621 auto loopStart = m_jit.label();
1622 auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1623 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1624 slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1625 m_jit.sub32(TrustedImm32('A'), charGPR);
1626 slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1628 m_jit.add32(TrustedImm32(1), indexGPR);
1629 m_jit.jump().linkTo(loopStart, &m_jit);
1631 slowPath.link(&m_jit);
1632 silentSpillAllRegisters(lengthGPR);
1633 callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1634 silentFillAllRegisters();
1635 m_jit.exceptionCheck();
1636 auto done = m_jit.jump();
1638 loopDone.link(&m_jit);
1639 m_jit.move(stringGPR, lengthGPR);
1642 cellResult(lengthGPR, node);
1645 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1647 BasicBlock* taken = branchNode->branchData()->taken.block;
1648 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1650 // The branch instruction will branch to the taken block.
1651 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1652 if (taken == nextBlock()) {
1653 condition = JITCompiler::invert(condition);
1654 BasicBlock* tmp = taken;
1659 if (node->child1()->isInt32Constant()) {
1660 int32_t imm = node->child1()->asInt32();
1661 SpeculateInt32Operand op2(this, node->child2());
1662 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1663 } else if (node->child2()->isInt32Constant()) {
1664 SpeculateInt32Operand op1(this, node->child1());
1665 int32_t imm = node->child2()->asInt32();
1666 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1668 SpeculateInt32Operand op1(this, node->child1());
1669 SpeculateInt32Operand op2(this, node->child2());
1670 branch32(condition, op1.gpr(), op2.gpr(), taken);
1676 // Returns true if the compare is fused with a subsequent branch.
1677 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1679 // Fused compare & branch.
1680 unsigned branchIndexInBlock = detectPeepHoleBranch();
1681 if (branchIndexInBlock != UINT_MAX) {
1682 Node* branchNode = m_block->at(branchIndexInBlock);
1684 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1685 // so can be no intervening nodes to also reference the compare.
1686 ASSERT(node->adjustedRefCount() == 1);
1688 if (node->isBinaryUseKind(Int32Use))
1689 compilePeepHoleInt32Branch(node, branchNode, condition);
1691 else if (node->isBinaryUseKind(Int52RepUse))
1692 compilePeepHoleInt52Branch(node, branchNode, condition);
1693 #endif // USE(JSVALUE64)
1694 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1695 // Use non-peephole comparison, for now.
1697 } else if (node->isBinaryUseKind(DoubleRepUse))
1698 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1699 else if (node->op() == CompareEq) {
1700 if (node->isBinaryUseKind(BooleanUse))
1701 compilePeepHoleBooleanBranch(node, branchNode, condition);
1702 else if (node->isBinaryUseKind(SymbolUse))
1703 compilePeepHoleSymbolEquality(node, branchNode);
1704 else if (node->isBinaryUseKind(ObjectUse))
1705 compilePeepHoleObjectEquality(node, branchNode);
1706 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1707 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1708 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1709 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1710 else if (!needsTypeCheck(node->child1(), SpecOther))
1711 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1712 else if (!needsTypeCheck(node->child2(), SpecOther))
1713 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1715 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1719 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1723 use(node->child1());
1724 use(node->child2());
1725 m_indexInBlock = branchIndexInBlock;
1726 m_currentNode = branchNode;
1732 void SpeculativeJIT::noticeOSRBirth(Node* node)
1734 if (!node->hasVirtualRegister())
1737 VirtualRegister virtualRegister = node->virtualRegister();
1738 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1740 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1743 void SpeculativeJIT::compileMovHint(Node* node)
1745 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1747 Node* child = node->child1().node();
1748 noticeOSRBirth(child);
1750 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1753 void SpeculativeJIT::bail(AbortReason reason)
1755 if (verboseCompilationEnabled())
1756 dataLog("Bailing compilation.\n");
1757 m_compileOkay = true;
1758 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1759 clearGenerationInfo();
1762 void SpeculativeJIT::compileCurrentBlock()
1764 ASSERT(m_compileOkay);
1769 ASSERT(m_block->isReachable);
1771 m_jit.blockHeads()[m_block->index] = m_jit.label();
1773 if (!m_block->intersectionOfCFAHasVisited) {
1774 // Don't generate code for basic blocks that are unreachable according to CFA.
1775 // But to be sure that nobody has generated a jump to this block, drop in a
1777 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1781 if (m_block->isCatchEntrypoint) {
1782 m_jit.addPtr(CCallHelpers::TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1783 m_jit.emitSaveCalleeSaves();
1784 m_jit.emitMaterializeTagCheckRegisters();
1785 m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1788 m_stream->appendAndLog(VariableEvent::reset());
1790 m_jit.jitAssertHasValidCallFrame();
1791 m_jit.jitAssertTagsInPlace();
1792 m_jit.jitAssertArgumentCountSane();
1795 m_state.beginBasicBlock(m_block);
1797 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1798 int operand = m_block->variablesAtHead.operandForIndex(i);
1799 Node* node = m_block->variablesAtHead[i];
1801 continue; // No need to record dead SetLocal's.
1803 VariableAccessData* variable = node->variableAccessData();
1805 if (!node->refCount())
1806 continue; // No need to record dead SetLocal's.
1807 format = dataFormatFor(variable->flushFormat());
1808 m_stream->appendAndLog(
1809 VariableEvent::setLocal(
1810 VirtualRegister(operand),
1811 variable->machineLocal(),
1815 m_origin = NodeOrigin();
1817 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1818 m_currentNode = m_block->at(m_indexInBlock);
1820 // We may have hit a contradiction that the CFA was aware of but that the JIT
1821 // didn't cause directly.
1822 if (!m_state.isValid()) {
1823 bail(DFGBailedAtTopOfBlock);
1827 m_interpreter.startExecuting();
1828 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1829 m_jit.setForNode(m_currentNode);
1830 m_origin = m_currentNode->origin;
1831 if (validationEnabled())
1832 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1833 m_lastGeneratedNode = m_currentNode->op();
1835 ASSERT(m_currentNode->shouldGenerate());
1837 if (verboseCompilationEnabled()) {
1839 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1840 (int)m_currentNode->index(),
1841 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1845 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1846 m_jit.jitReleaseAssertNoException(*m_jit.vm());
1848 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1850 compile(m_currentNode);
1852 if (belongsInMinifiedGraph(m_currentNode->op()))
1853 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1855 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1856 m_jit.clearRegisterAllocationOffsets();
1859 if (!m_compileOkay) {
1860 bail(DFGBailedAtEndOfNode);
1864 // Make sure that the abstract state is rematerialized for the next node.
1865 m_interpreter.executeEffects(m_indexInBlock);
1868 // Perform the most basic verification that children have been used correctly.
1869 if (!ASSERT_DISABLED) {
1870 for (auto& info : m_generationInfo)
1871 RELEASE_ASSERT(!info.alive());
1875 // If we are making type predictions about our arguments then
1876 // we need to check that they are correct on function entry.
1877 void SpeculativeJIT::checkArgumentTypes()
1879 ASSERT(!m_currentNode);
1880 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1882 auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1883 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1884 Node* node = arguments[i];
1886 // The argument is dead. We don't do any checks for such arguments.
1890 ASSERT(node->op() == SetArgument);
1891 ASSERT(node->shouldGenerate());
1893 VariableAccessData* variableAccessData = node->variableAccessData();
1894 FlushFormat format = variableAccessData->flushFormat();
1896 if (format == FlushedJSValue)
1899 VirtualRegister virtualRegister = variableAccessData->local();
1901 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1905 case FlushedInt32: {
1906 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1909 case FlushedBoolean: {
1910 GPRTemporary temp(this);
1911 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1912 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1913 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1917 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1921 RELEASE_ASSERT_NOT_REACHED();
1926 case FlushedInt32: {
1927 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1930 case FlushedBoolean: {
1931 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1935 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1939 RELEASE_ASSERT_NOT_REACHED();
1945 m_origin = NodeOrigin();
1948 bool SpeculativeJIT::compile()
1950 checkArgumentTypes();
1952 ASSERT(!m_currentNode);
1953 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1954 m_jit.setForBlockIndex(blockIndex);
1955 m_block = m_jit.graph().block(blockIndex);
1956 compileCurrentBlock();
1962 void SpeculativeJIT::createOSREntries()
1964 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1965 BasicBlock* block = m_jit.graph().block(blockIndex);
1968 if (block->isOSRTarget || block->isCatchEntrypoint) {
1969 // Currently we don't have OSR entry trampolines. We could add them
1971 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1976 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1978 unsigned osrEntryIndex = 0;
1979 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1980 BasicBlock* block = m_jit.graph().block(blockIndex);
1983 if (!block->isOSRTarget && !block->isCatchEntrypoint)
1985 if (block->isCatchEntrypoint) {
1986 auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1987 Vector<FlushFormat> argumentFormats;
1988 argumentFormats.reserveInitialCapacity(argumentsVector.size());
1989 for (Node* setArgument : argumentsVector) {
1991 FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1992 ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1993 argumentFormats.uncheckedAppend(flushFormat);
1995 argumentFormats.uncheckedAppend(DeadFlush);
1997 m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
1999 ASSERT(block->isOSRTarget);
2000 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
2004 m_jit.jitCode()->finalizeOSREntrypoints();
2005 m_jit.jitCode()->common.finalizeCatchEntrypoints();
2007 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
2009 if (verboseCompilationEnabled()) {
2010 DumpContext dumpContext;
2011 dataLog("OSR Entries:\n");
2012 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
2013 dataLog(" ", inContext(entryData, &dumpContext), "\n");
2014 if (!dumpContext.isEmpty())
2015 dumpContext.dump(WTF::dataFile());
2019 void SpeculativeJIT::compileCheckTraps(Node*)
2021 ASSERT(Options::usePollingTraps());
2022 GPRTemporary unused(this);
2023 GPRReg unusedGPR = unused.gpr();
2025 JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2026 JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2028 addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2031 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2033 Edge child3 = m_jit.graph().varArgChild(node, 2);
2034 Edge child4 = m_jit.graph().varArgChild(node, 3);
2036 ArrayMode arrayMode = node->arrayMode();
2038 GPRReg baseReg = base.gpr();
2039 GPRReg propertyReg = property.gpr();
2041 SpeculateDoubleOperand value(this, child3);
2043 FPRReg valueReg = value.fpr();
2046 JSValueRegs(), child3, SpecFullRealNumber,
2048 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2053 StorageOperand storage(this, child4);
2054 GPRReg storageReg = storage.gpr();
2056 if (node->op() == PutByValAlias) {
2057 // Store the value to the array.
2058 GPRReg propertyReg = property.gpr();
2059 FPRReg valueReg = value.fpr();
2060 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2062 noResult(m_currentNode);
2066 GPRTemporary temporary;
2067 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2069 MacroAssembler::Jump slowCase;
2071 if (arrayMode.isInBounds()) {
2073 OutOfBounds, JSValueRegs(), 0,
2074 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2076 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2078 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2080 if (!arrayMode.isOutOfBounds())
2081 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2083 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2084 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2086 inBounds.link(&m_jit);
2089 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2096 if (arrayMode.isOutOfBounds()) {
2097 addSlowPathGenerator(
2100 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2101 NoResult, baseReg, propertyReg, valueReg));
2104 noResult(m_currentNode, UseChildrenCalledExplicitly);
2107 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2109 SpeculateCellOperand string(this, node->child1());
2110 SpeculateStrictInt32Operand index(this, node->child2());
2111 StorageOperand storage(this, node->child3());
2113 GPRReg stringReg = string.gpr();
2114 GPRReg indexReg = index.gpr();
2115 GPRReg storageReg = storage.gpr();
2117 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2119 // unsigned comparison so we can filter out negative indices and indices that are too large
2120 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2122 GPRTemporary scratch(this);
2123 GPRReg scratchReg = scratch.gpr();
2125 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2127 // Load the character into scratchReg
2128 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2130 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2131 JITCompiler::Jump cont8Bit = m_jit.jump();
2133 is16Bit.link(&m_jit);
2135 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2137 cont8Bit.link(&m_jit);
2139 int32Result(scratchReg, m_currentNode);
2142 void SpeculativeJIT::compileGetByValOnString(Node* node)
2144 SpeculateCellOperand base(this, node->child1());
2145 SpeculateStrictInt32Operand property(this, node->child2());
2146 StorageOperand storage(this, node->child3());
2147 GPRReg baseReg = base.gpr();
2148 GPRReg propertyReg = property.gpr();
2149 GPRReg storageReg = storage.gpr();
2151 GPRTemporary scratch(this);
2152 GPRReg scratchReg = scratch.gpr();
2153 #if USE(JSVALUE32_64)
2154 GPRTemporary resultTag;
2155 GPRReg resultTagReg = InvalidGPRReg;
2156 if (node->arrayMode().isOutOfBounds()) {
2157 GPRTemporary realResultTag(this);
2158 resultTag.adopt(realResultTag);
2159 resultTagReg = resultTag.gpr();
2163 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2165 // unsigned comparison so we can filter out negative indices and indices that are too large
2166 JITCompiler::Jump outOfBounds = m_jit.branch32(
2167 MacroAssembler::AboveOrEqual, propertyReg,
2168 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2169 if (node->arrayMode().isInBounds())
2170 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2172 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2174 // Load the character into scratchReg
2175 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2177 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2178 JITCompiler::Jump cont8Bit = m_jit.jump();
2180 is16Bit.link(&m_jit);
2182 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2184 JITCompiler::Jump bigCharacter =
2185 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2187 // 8 bit string values don't need the isASCII check.
2188 cont8Bit.link(&m_jit);
2190 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2191 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2192 m_jit.loadPtr(scratchReg, scratchReg);
2194 addSlowPathGenerator(
2196 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2198 if (node->arrayMode().isOutOfBounds()) {
2199 #if USE(JSVALUE32_64)
2200 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2203 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2204 bool prototypeChainIsSane = false;
2205 if (globalObject->stringPrototypeChainIsSane()) {
2206 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2207 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2208 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2209 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2210 // indexed properties either.
2211 // https://bugs.webkit.org/show_bug.cgi?id=144668
2212 m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2213 m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2214 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2216 if (prototypeChainIsSane) {
2218 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2219 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2221 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2222 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2223 baseReg, propertyReg));
2227 addSlowPathGenerator(
2229 outOfBounds, this, operationGetByValStringInt,
2230 scratchReg, baseReg, propertyReg));
2232 addSlowPathGenerator(
2234 outOfBounds, this, operationGetByValStringInt,
2235 JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2240 jsValueResult(scratchReg, m_currentNode);
2242 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2245 cellResult(scratchReg, m_currentNode);
2248 void SpeculativeJIT::compileFromCharCode(Node* node)
2250 Edge& child = node->child1();
2251 if (child.useKind() == UntypedUse) {
2252 JSValueOperand opr(this, child);
2253 JSValueRegs oprRegs = opr.jsValueRegs();
2255 GPRTemporary result(this);
2256 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2258 GPRTemporary resultTag(this);
2259 GPRTemporary resultPayload(this);
2260 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2263 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2264 m_jit.exceptionCheck();
2266 jsValueResult(resultRegs, node);
2270 SpeculateStrictInt32Operand property(this, child);
2271 GPRReg propertyReg = property.gpr();
2272 GPRTemporary smallStrings(this);
2273 GPRTemporary scratch(this);
2274 GPRReg scratchReg = scratch.gpr();
2275 GPRReg smallStringsReg = smallStrings.gpr();
2277 JITCompiler::JumpList slowCases;
2278 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2279 m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2280 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2282 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2283 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2284 cellResult(scratchReg, m_currentNode);
2287 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2289 VirtualRegister virtualRegister = node->virtualRegister();
2290 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2292 switch (info.registerFormat()) {
2293 case DataFormatStorage:
2294 RELEASE_ASSERT_NOT_REACHED();
2296 case DataFormatBoolean:
2297 case DataFormatCell:
2298 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2299 return GeneratedOperandTypeUnknown;
2301 case DataFormatNone:
2302 case DataFormatJSCell:
2304 case DataFormatJSBoolean:
2305 case DataFormatJSDouble:
2306 return GeneratedOperandJSValue;
2308 case DataFormatJSInt32:
2309 case DataFormatInt32:
2310 return GeneratedOperandInteger;
2313 RELEASE_ASSERT_NOT_REACHED();
2314 return GeneratedOperandTypeUnknown;
2318 void SpeculativeJIT::compileValueToInt32(Node* node)
2320 switch (node->child1().useKind()) {
2323 SpeculateStrictInt52Operand op1(this, node->child1());
2324 GPRTemporary result(this, Reuse, op1);
2325 GPRReg op1GPR = op1.gpr();
2326 GPRReg resultGPR = result.gpr();
2327 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2328 int32Result(resultGPR, node, DataFormatInt32);
2331 #endif // USE(JSVALUE64)
2333 case DoubleRepUse: {
2334 GPRTemporary result(this);
2335 SpeculateDoubleOperand op1(this, node->child1());
2336 FPRReg fpr = op1.fpr();
2337 GPRReg gpr = result.gpr();
2338 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2340 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2341 hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2343 int32Result(gpr, node);
2349 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2350 case GeneratedOperandInteger: {
2351 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2352 GPRTemporary result(this, Reuse, op1);
2353 m_jit.move(op1.gpr(), result.gpr());
2354 int32Result(result.gpr(), node, op1.format());
2357 case GeneratedOperandJSValue: {
2358 GPRTemporary result(this);
2360 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2362 GPRReg gpr = op1.gpr();
2363 GPRReg resultGpr = result.gpr();
2364 FPRTemporary tempFpr(this);
2365 FPRReg fpr = tempFpr.fpr();
2367 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2368 JITCompiler::JumpList converted;
2370 if (node->child1().useKind() == NumberUse) {
2372 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2374 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2376 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2379 JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2381 // It's not a cell: so true turns into 1 and all else turns into 0.
2382 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2383 converted.append(m_jit.jump());
2385 isNumber.link(&m_jit);
2388 // First, if we get here we have a double encoded as a JSValue
2389 unboxDouble(gpr, resultGpr, fpr);
2391 silentSpillAllRegisters(resultGpr);
2392 callOperation(operationToInt32, resultGpr, fpr);
2393 silentFillAllRegisters();
2395 converted.append(m_jit.jump());
2397 isInteger.link(&m_jit);
2398 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2400 converted.link(&m_jit);
2402 Node* childNode = node->child1().node();
2403 VirtualRegister virtualRegister = childNode->virtualRegister();
2404 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2406 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2408 GPRReg payloadGPR = op1.payloadGPR();
2409 GPRReg resultGpr = result.gpr();
2411 JITCompiler::JumpList converted;
2413 if (info.registerFormat() == DataFormatJSInt32)
2414 m_jit.move(payloadGPR, resultGpr);
2416 GPRReg tagGPR = op1.tagGPR();
2417 FPRTemporary tempFpr(this);
2418 FPRReg fpr = tempFpr.fpr();
2419 FPRTemporary scratch(this);
2421 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2423 if (node->child1().useKind() == NumberUse) {
2425 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2427 MacroAssembler::AboveOrEqual, tagGPR,
2428 TrustedImm32(JSValue::LowestTag)));
2430 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2433 op1.jsValueRegs(), node->child1(), ~SpecCell,
2434 m_jit.branchIfCell(op1.jsValueRegs()));
2436 // It's not a cell: so true turns into 1 and all else turns into 0.
2437 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2438 m_jit.move(TrustedImm32(0), resultGpr);
2439 converted.append(m_jit.jump());
2441 isBoolean.link(&m_jit);
2442 m_jit.move(payloadGPR, resultGpr);
2443 converted.append(m_jit.jump());
2445 isNumber.link(&m_jit);
2448 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2450 silentSpillAllRegisters(resultGpr);
2451 callOperation(operationToInt32, resultGpr, fpr);
2452 silentFillAllRegisters();
2454 converted.append(m_jit.jump());
2456 isInteger.link(&m_jit);
2457 m_jit.move(payloadGPR, resultGpr);
2459 converted.link(&m_jit);
2462 int32Result(resultGpr, node);
2465 case GeneratedOperandTypeUnknown:
2466 RELEASE_ASSERT(!m_compileOkay);
2469 RELEASE_ASSERT_NOT_REACHED();
2474 ASSERT(!m_compileOkay);
2479 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2481 if (doesOverflow(node->arithMode())) {
2482 if (enableInt52()) {
2483 SpeculateInt32Operand op1(this, node->child1());
2484 GPRTemporary result(this, Reuse, op1);
2485 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2486 strictInt52Result(result.gpr(), node);
2489 SpeculateInt32Operand op1(this, node->child1());
2490 FPRTemporary result(this);
2492 GPRReg inputGPR = op1.gpr();
2493 FPRReg outputFPR = result.fpr();
2495 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2497 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2498 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2499 positive.link(&m_jit);
2501 doubleResult(outputFPR, node);
2505 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2507 SpeculateInt32Operand op1(this, node->child1());
2508 GPRTemporary result(this);
2510 m_jit.move(op1.gpr(), result.gpr());
2512 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2514 int32Result(result.gpr(), node, op1.format());
2517 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2519 SpeculateDoubleOperand op1(this, node->child1());
2520 FPRTemporary scratch(this);
2521 GPRTemporary result(this);
2523 FPRReg valueFPR = op1.fpr();
2524 FPRReg scratchFPR = scratch.fpr();
2525 GPRReg resultGPR = result.gpr();
2527 JITCompiler::JumpList failureCases;
2528 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2529 m_jit.branchConvertDoubleToInt32(
2530 valueFPR, resultGPR, failureCases, scratchFPR,
2531 shouldCheckNegativeZero(node->arithMode()));
2532 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2534 int32Result(resultGPR, node);
2537 void SpeculativeJIT::compileDoubleRep(Node* node)
2539 switch (node->child1().useKind()) {
2540 case RealNumberUse: {
2541 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2542 FPRTemporary result(this);
2544 JSValueRegs op1Regs = op1.jsValueRegs();
2545 FPRReg resultFPR = result.fpr();
2548 GPRTemporary temp(this);
2549 GPRReg tempGPR = temp.gpr();
2550 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2552 FPRTemporary temp(this);
2553 FPRReg tempFPR = temp.fpr();
2554 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2557 JITCompiler::Jump done = m_jit.branchDouble(
2558 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2561 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2562 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2566 doubleResult(resultFPR, node);
2572 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2574 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2575 if (isInt32Speculation(possibleTypes)) {
2576 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2577 FPRTemporary result(this);
2578 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2579 doubleResult(result.fpr(), node);
2583 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2584 FPRTemporary result(this);
2587 GPRTemporary temp(this);
2589 GPRReg op1GPR = op1.gpr();
2590 GPRReg tempGPR = temp.gpr();
2591 FPRReg resultFPR = result.fpr();
2592 JITCompiler::JumpList done;
2594 JITCompiler::Jump isInteger = m_jit.branch64(
2595 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2597 if (node->child1().useKind() == NotCellUse) {
2598 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2599 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2601 static const double zero = 0;
2602 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2604 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2605 done.append(isNull);
2607 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2608 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2610 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2611 static const double one = 1;
2612 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2613 done.append(m_jit.jump());
2614 done.append(isFalse);
2616 isUndefined.link(&m_jit);
2617 static const double NaN = PNaN;
2618 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2619 done.append(m_jit.jump());
2621 isNumber.link(&m_jit);
2622 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2624 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2625 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2628 unboxDouble(op1GPR, tempGPR, resultFPR);
2629 done.append(m_jit.jump());
2631 isInteger.link(&m_jit);
2632 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2634 #else // USE(JSVALUE64) -> this is the 32_64 case
2635 FPRTemporary temp(this);
2637 GPRReg op1TagGPR = op1.tagGPR();
2638 GPRReg op1PayloadGPR = op1.payloadGPR();
2639 FPRReg tempFPR = temp.fpr();
2640 FPRReg resultFPR = result.fpr();
2641 JITCompiler::JumpList done;
2643 JITCompiler::Jump isInteger = m_jit.branch32(
2644 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2646 if (node->child1().useKind() == NotCellUse) {
2647 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2648 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2650 static const double zero = 0;
2651 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2653 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2654 done.append(isNull);
2656 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2658 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2659 static const double one = 1;
2660 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2661 done.append(m_jit.jump());
2662 done.append(isFalse);
2664 isUndefined.link(&m_jit);
2665 static const double NaN = PNaN;
2666 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2667 done.append(m_jit.jump());
2669 isNumber.link(&m_jit);
2670 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2672 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2673 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2676 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2677 done.append(m_jit.jump());
2679 isInteger.link(&m_jit);
2680 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2682 #endif // USE(JSVALUE64)
2684 doubleResult(resultFPR, node);
2690 SpeculateStrictInt52Operand value(this, node->child1());
2691 FPRTemporary result(this);
2693 GPRReg valueGPR = value.gpr();
2694 FPRReg resultFPR = result.fpr();
2696 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2698 doubleResult(resultFPR, node);
2701 #endif // USE(JSVALUE64)
2704 RELEASE_ASSERT_NOT_REACHED();
2709 void SpeculativeJIT::compileValueRep(Node* node)
2711 switch (node->child1().useKind()) {
2712 case DoubleRepUse: {
2713 SpeculateDoubleOperand value(this, node->child1());
2714 JSValueRegsTemporary result(this);
2716 FPRReg valueFPR = value.fpr();
2717 JSValueRegs resultRegs = result.regs();
2719 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2720 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2721 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2722 // local was purified.
2723 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2724 m_jit.purifyNaN(valueFPR);
2726 boxDouble(valueFPR, resultRegs);
2728 jsValueResult(resultRegs, node);
2734 SpeculateStrictInt52Operand value(this, node->child1());
2735 GPRTemporary result(this);
2737 GPRReg valueGPR = value.gpr();
2738 GPRReg resultGPR = result.gpr();
2740 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2742 jsValueResult(resultGPR, node);
2745 #endif // USE(JSVALUE64)
2748 RELEASE_ASSERT_NOT_REACHED();
2753 static double clampDoubleToByte(double d)
2763 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2765 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2766 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2767 jit.xorPtr(result, result);
2768 MacroAssembler::Jump clamped = jit.jump();
2770 jit.move(JITCompiler::TrustedImm32(255), result);
2772 inBounds.link(&jit);
2775 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2777 // Unordered compare so we pick up NaN
2778 static const double zero = 0;
2779 static const double byteMax = 255;
2780 static const double half = 0.5;
2781 jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2782 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2783 jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2784 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2786 jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2787 // FIXME: This should probably just use a floating point round!
2788 // https://bugs.webkit.org/show_bug.cgi?id=72054
2789 jit.addDouble(source, scratch);
2790 jit.truncateDoubleToInt32(scratch, result);
2791 MacroAssembler::Jump truncatedInt = jit.jump();
2793 tooSmall.link(&jit);
2794 jit.xorPtr(result, result);
2795 MacroAssembler::Jump zeroed = jit.jump();
2798 jit.move(JITCompiler::TrustedImm32(255), result);
2800 truncatedInt.link(&jit);
2805 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2807 if (node->op() == PutByValAlias)
2808 return JITCompiler::Jump();
2809 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2810 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2812 uint32_t length = view->length();
2813 Node* indexNode = m_jit.graph().child(node, 1).node();
2814 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2815 return JITCompiler::Jump();
2816 return m_jit.branch32(
2817 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2819 return m_jit.branch32(
2820 MacroAssembler::AboveOrEqual, indexGPR,
2821 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2824 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2826 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2829 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2832 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2834 JITCompiler::Jump done;
2835 if (outOfBounds.isSet()) {
2836 done = m_jit.jump();
2837 if (node->arrayMode().isInBounds())
2838 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2840 outOfBounds.link(&m_jit);
2842 JITCompiler::Jump notWasteful = m_jit.branch32(
2843 MacroAssembler::NotEqual,
2844 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2845 TrustedImm32(WastefulTypedArray));
2847 JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2848 MacroAssembler::Zero,
2849 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2850 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2851 notWasteful.link(&m_jit);
2857 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2859 switch (elementSize(type)) {
2862 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2864 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2868 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2870 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2873 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2880 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2882 if (elementSize(type) < 4 || isSigned(type)) {
2883 int32Result(resultReg, node);
2887 ASSERT(elementSize(type) == 4 && !isSigned(type));
2888 if (node->shouldSpeculateInt32() && canSpeculate) {
2889 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2890 int32Result(resultReg, node);
2895 if (node->shouldSpeculateAnyInt()) {
2896 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2897 strictInt52Result(resultReg, node);
2902 FPRTemporary fresult(this);
2903 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2904 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2905 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2906 positive.link(&m_jit);
2907 doubleResult(fresult.fpr(), node);
2910 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2912 ASSERT(isInt(type));
2914 SpeculateCellOperand base(this, node->child1());
2915 SpeculateStrictInt32Operand property(this, node->child2());
2916 StorageOperand storage(this, node->child3());
2918 GPRReg baseReg = base.gpr();
2919 GPRReg propertyReg = property.gpr();
2920 GPRReg storageReg = storage.gpr();
2922 GPRTemporary result(this);
2923 GPRReg resultReg = result.gpr();
2925 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2927 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2928 loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2929 bool canSpeculate = true;
2930 setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2933 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2934 GPRTemporary& value,
2936 #if USE(JSVALUE32_64)
2937 GPRTemporary& propertyTag,
2938 GPRTemporary& valueTag,
2940 Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2942 bool isAppropriateConstant = false;
2943 if (valueUse->isConstant()) {
2944 JSValue jsValue = valueUse->asJSValue();
2945 SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2946 SpeculatedType actualType = speculationFromValue(jsValue);
2947 isAppropriateConstant = (expectedType | actualType) == expectedType;
2950 if (isAppropriateConstant) {
2951 JSValue jsValue = valueUse->asJSValue();
2952 if (!jsValue.isNumber()) {
2953 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2956 double d = jsValue.asNumber();
2958 d = clampDoubleToByte(d);
2959 GPRTemporary scratch(this);
2960 GPRReg scratchReg = scratch.gpr();
2961 m_jit.move(Imm32(toInt32(d)), scratchReg);
2962 value.adopt(scratch);
2964 switch (valueUse.useKind()) {
2966 SpeculateInt32Operand valueOp(this, valueUse);
2967 GPRTemporary scratch(this);
2968 GPRReg scratchReg = scratch.gpr();
2969 m_jit.move(valueOp.gpr(), scratchReg);
2971 compileClampIntegerToByte(m_jit, scratchReg);
2972 value.adopt(scratch);
2978 SpeculateStrictInt52Operand valueOp(this, valueUse);
2979 GPRTemporary scratch(this);
2980 GPRReg scratchReg = scratch.gpr();
2981 m_jit.move(valueOp.gpr(), scratchReg);
2983 MacroAssembler::Jump inBounds = m_jit.branch64(
2984 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2985 MacroAssembler::Jump tooBig = m_jit.branch64(
2986 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2987 m_jit.move(TrustedImm32(0), scratchReg);
2988 MacroAssembler::Jump clamped = m_jit.jump();
2989 tooBig.link(&m_jit);
2990 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2991 clamped.link(&m_jit);
2992 inBounds.link(&m_jit);
2994 value.adopt(scratch);
2997 #endif // USE(JSVALUE64)
2999 case DoubleRepUse: {
3000 RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
3002 SpeculateDoubleOperand valueOp(this, valueUse);
3003 GPRTemporary result(this);
3004 FPRTemporary floatScratch(this);
3005 FPRReg fpr = valueOp.fpr();
3006 GPRReg gpr = result.gpr();
3007 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
3008 value.adopt(result);
3010 #if USE(JSVALUE32_64)
3011 GPRTemporary realPropertyTag(this);
3012 propertyTag.adopt(realPropertyTag);
3013 GPRReg propertyTagGPR = propertyTag.gpr();
3015 GPRTemporary realValueTag(this);
3016 valueTag.adopt(realValueTag);
3017 GPRReg valueTagGPR = valueTag.gpr();
3019 SpeculateDoubleOperand valueOp(this, valueUse);
3020 GPRTemporary result(this);
3021 FPRReg fpr = valueOp.fpr();
3022 GPRReg gpr = result.gpr();
3023 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3024 m_jit.xorPtr(gpr, gpr);
3025 MacroAssembler::JumpList fixed(m_jit.jump());
3026 notNaN.link(&m_jit);
3028 fixed.append(m_jit.branchTruncateDoubleToInt32(
3029 fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3032 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3033 boxDouble(fpr, gpr);
3035 UNUSED_PARAM(property);
3036 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3037 boxDouble(fpr, valueTagGPR, gpr);
3039 slowPathCases.append(m_jit.jump());
3042 value.adopt(result);
3048 RELEASE_ASSERT_NOT_REACHED();
3055 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3057 ASSERT(isInt(type));
3059 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3060 GPRReg storageReg = storage.gpr();
3062 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3065 #if USE(JSVALUE32_64)
3066 GPRTemporary propertyTag;
3067 GPRTemporary valueTag;
3070 JITCompiler::JumpList slowPathCases;
3072 bool result = getIntTypedArrayStoreOperand(
3074 #if USE(JSVALUE32_64)
3075 propertyTag, valueTag,
3077 valueUse, slowPathCases, isClamped(type));
3083 GPRReg valueGPR = value.gpr();
3084 #if USE(JSVALUE32_64)
3085 GPRReg propertyTagGPR = propertyTag.gpr();
3086 GPRReg valueTagGPR = valueTag.gpr();
3089 ASSERT_UNUSED(valueGPR, valueGPR != property);
3090 ASSERT(valueGPR != base);
3091 ASSERT(valueGPR != storageReg);
3092 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3094 switch (elementSize(type)) {
3096 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3099 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3102 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3108 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3112 if (!slowPathCases.empty()) {
3114 if (node->op() == PutByValDirect) {
3115 addSlowPathGenerator(slowPathCall(
3116 slowPathCases, this,
3117 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3118 NoResult, base, property, valueGPR));
3120 addSlowPathGenerator(slowPathCall(
3121 slowPathCases, this,
3122 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3123 NoResult, base, property, valueGPR));
3125 #else // not USE(JSVALUE64)
3126 if (node->op() == PutByValDirect) {
3127 addSlowPathGenerator(slowPathCall(
3128 slowPathCases, this,
3129 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3130 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3132 addSlowPathGenerator(slowPathCall(
3133 slowPathCases, this,
3134 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3135 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3143 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3145 ASSERT(isFloat(type));
3147 SpeculateCellOperand base(this, node->child1());
3148 SpeculateStrictInt32Operand property(this, node->child2());
3149 StorageOperand storage(this, node->child3());
3151 GPRReg baseReg = base.gpr();
3152 GPRReg propertyReg = property.gpr();
3153 GPRReg storageReg = storage.gpr();
3155 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3157 FPRTemporary result(this);
3158 FPRReg resultReg = result.fpr();
3159 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3160 switch (elementSize(type)) {
3162 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3163 m_jit.convertFloatToDouble(resultReg, resultReg);
3166 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3170 RELEASE_ASSERT_NOT_REACHED();
3173 doubleResult(resultReg, node);
3176 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3178 ASSERT(isFloat(type));
3180 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3181 GPRReg storageReg = storage.gpr();
3183 Edge baseUse = m_jit.graph().varArgChild(node, 0);
3184 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3186 SpeculateDoubleOperand valueOp(this, valueUse);
3187 FPRTemporary scratch(this);
3188 FPRReg valueFPR = valueOp.fpr();
3189 FPRReg scratchFPR = scratch.fpr();
3191 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3193 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3195 switch (elementSize(type)) {
3197 m_jit.moveDouble(valueFPR, scratchFPR);
3198 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3199 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3203 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3206 RELEASE_ASSERT_NOT_REACHED();
3209 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3215 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3217 SpeculateCellOperand arg1(this, node->child1());
3218 SpeculateCellOperand arg2(this, node->child2());
3220 GPRReg arg1GPR = arg1.gpr();
3221 GPRReg arg2GPR = arg2.gpr();
3223 speculateObject(node->child1(), arg1GPR);
3224 speculateString(node->child2(), arg2GPR);
3226 GPRFlushedCallResult resultPayload(this);
3227 GPRReg resultPayloadGPR = resultPayload.gpr();
3229 JSValueRegs resultRegs(resultPayloadGPR);
3231 GPRFlushedCallResult2 resultTag(this);
3232 GPRReg resultTagGPR = resultTag.gpr();
3233 JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3237 callOperation(operationGetByValObjectString, extractResult(resultRegs), arg1GPR, arg2GPR);
3238 m_jit.exceptionCheck();
3240 jsValueResult(resultRegs, node);
3243 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3245 SpeculateCellOperand arg1(this, node->child1());
3246 SpeculateCellOperand arg2(this, node->child2());
3248 GPRReg arg1GPR = arg1.gpr();
3249 GPRReg arg2GPR = arg2.gpr();
3251 speculateObject(node->child1(), arg1GPR);
3252 speculateSymbol(node->child2(), arg2GPR);
3254 GPRFlushedCallResult resultPayload(this);
3255 GPRReg resultPayloadGPR = resultPayload.gpr();
3257 JSValueRegs resultRegs(resultPayloadGPR);
3259 GPRFlushedCallResult2 resultTag(this);
3260 GPRReg resultTagGPR = resultTag.gpr();
3261 JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3265 callOperation(operationGetByValObjectSymbol, extractResult(resultRegs), arg1GPR, arg2GPR);
3266 m_jit.exceptionCheck();
3268 jsValueResult(resultRegs, node);
3271 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3273 SpeculateCellOperand arg1(this, child1);
3274 SpeculateCellOperand arg2(this, child2);
3275 JSValueOperand arg3(this, child3);
3277 GPRReg arg1GPR = arg1.gpr();
3278 GPRReg arg2GPR = arg2.gpr();
3279 JSValueRegs arg3Regs = arg3.jsValueRegs();
3281 speculateString(child2, arg2GPR);
3284 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3285 m_jit.exceptionCheck();
3290 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3292 SpeculateCellOperand arg1(this, child1);
3293 SpeculateCellOperand arg2(this, child2);
3294 JSValueOperand arg3(this, child3);
3296 GPRReg arg1GPR = arg1.gpr();
3297 GPRReg arg2GPR = arg2.gpr();
3298 JSValueRegs arg3Regs = arg3.jsValueRegs();
3300 speculateSymbol(child2, arg2GPR);
3303 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3304 m_jit.exceptionCheck();
3309 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3311 // Check that prototype is an object.
3312 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3314 // Initialize scratchReg with the value being checked.
3315 m_jit.move(valueReg, scratchReg);
3317 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3318 MacroAssembler::Label loop(&m_jit);
3319 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3320 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3321 m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3323 m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3324 auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3325 m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3326 hasMonoProto.link(&m_jit);
3327 m_jit.move(scratch3Reg, scratchReg);
3329 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3330 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3331 auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3332 m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3333 hasMonoProto.link(&m_jit);
3334 m_jit.move(scratch3Reg, scratchReg);
3337 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3339 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3341 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3344 // No match - result is false.
3346 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);