2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSnippetParams.h"
42 #include "DirectArguments.h"
43 #include "JITAddGenerator.h"
44 #include "JITBitAndGenerator.h"
45 #include "JITBitOrGenerator.h"
46 #include "JITBitXorGenerator.h"
47 #include "JITDivGenerator.h"
48 #include "JITLeftShiftGenerator.h"
49 #include "JITMulGenerator.h"
50 #include "JITRightShiftGenerator.h"
51 #include "JITSubGenerator.h"
52 #include "JSAsyncFunction.h"
53 #include "JSAsyncGeneratorFunction.h"
54 #include "JSCInlines.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "LinkBuffer.h"
59 #include "RegExpConstructor.h"
60 #include "ScopedArguments.h"
61 #include "ScratchRegisterAllocator.h"
62 #include "SuperSampler.h"
63 #include "WeakMapImpl.h"
64 #include <wtf/BitVector.h>
66 #include <wtf/MathExtras.h>
68 namespace JSC { namespace DFG {
70 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
74 , m_lastGeneratedNode(LastNodeType)
76 , m_generationInfo(m_jit.graph().frameRegisterCount())
77 , m_state(m_jit.graph())
78 , m_interpreter(m_jit.graph(), m_state)
79 , m_stream(&jit.jitCode()->variableEventStream)
80 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
84 SpeculativeJIT::~SpeculativeJIT()
88 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
90 IndexingType indexingType = structure->indexingType();
91 bool hasIndexingHeader = hasIndexedProperties(indexingType);
93 unsigned inlineCapacity = structure->inlineCapacity();
94 unsigned outOfLineCapacity = structure->outOfLineCapacity();
96 GPRTemporary scratch(this);
97 GPRTemporary scratch2(this);
98 GPRReg scratchGPR = scratch.gpr();
99 GPRReg scratch2GPR = scratch2.gpr();
101 ASSERT(vectorLength >= numElements);
102 vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
104 JITCompiler::JumpList slowCases;
107 if (hasIndexingHeader)
108 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
109 size += outOfLineCapacity * sizeof(JSValue);
111 m_jit.move(TrustedImmPtr(0), storageGPR);
114 if (MarkedAllocator* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
115 m_jit.move(TrustedImmPtr(allocator), scratchGPR);
116 m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
119 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
122 if (hasIndexingHeader)
123 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
125 slowCases.append(m_jit.jump());
128 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
129 MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
131 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
132 uint32_t mask = Butterfly::computeIndexingMaskForVectorLength(vectorLength);
133 emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
134 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
136 slowCases.append(m_jit.jump());
138 // I want a slow path that also loads out the storage pointer, and that's
139 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
140 // of work for a very small piece of functionality. :-/
141 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
142 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
143 structure, vectorLength));
145 if (numElements < vectorLength && LIKELY(!hasUndecided(structure->indexingType()))) {
147 if (hasDouble(structure->indexingType()))
148 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
150 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
151 for (unsigned i = numElements; i < vectorLength; ++i)
152 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
154 EncodedValueDescriptor value;
155 if (hasDouble(structure->indexingType()))
156 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
158 value.asInt64 = JSValue::encode(JSValue());
159 for (unsigned i = numElements; i < vectorLength; ++i) {
160 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
161 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
166 if (hasIndexingHeader)
167 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
169 m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
171 m_jit.mutatorFence(*m_jit.vm());
174 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
176 if (inlineCallFrame && !inlineCallFrame->isVarargs())
177 m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
179 VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
180 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
182 m_jit.sub32(TrustedImm32(1), lengthGPR);
186 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
188 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
191 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
193 if (origin.inlineCallFrame) {
194 if (origin.inlineCallFrame->isClosureCall) {
196 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
200 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
204 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
207 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
211 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
212 GPRInfo::callFrameRegister, startGPR);
215 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
217 if (!Options::useOSRExitFuzz()
218 || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
219 || !doOSRExitFuzzing())
220 return MacroAssembler::Jump();
222 MacroAssembler::Jump result;
224 m_jit.pushToSave(GPRInfo::regT0);
225 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
226 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
227 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
228 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
229 unsigned at = Options::fireOSRExitFuzzAt();
230 if (at || atOrAfter) {
232 MacroAssembler::RelationalCondition condition;
234 threshold = atOrAfter;
235 condition = MacroAssembler::Below;
238 condition = MacroAssembler::NotEqual;
240 MacroAssembler::Jump ok = m_jit.branch32(
241 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
242 m_jit.popToRestore(GPRInfo::regT0);
243 result = m_jit.jump();
246 m_jit.popToRestore(GPRInfo::regT0);
251 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
255 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
256 if (fuzzJump.isSet()) {
257 JITCompiler::JumpList jumpsToFail;
258 jumpsToFail.append(fuzzJump);
259 jumpsToFail.append(jumpToFail);
260 m_jit.appendExitInfo(jumpsToFail);
262 m_jit.appendExitInfo(jumpToFail);
263 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
266 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
270 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
271 if (fuzzJump.isSet()) {
272 JITCompiler::JumpList myJumpsToFail;
273 myJumpsToFail.append(jumpsToFail);
274 myJumpsToFail.append(fuzzJump);
275 m_jit.appendExitInfo(myJumpsToFail);
277 m_jit.appendExitInfo(jumpsToFail);
278 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
281 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
284 return OSRExitJumpPlaceholder();
285 unsigned index = m_jit.jitCode()->osrExit.size();
286 m_jit.appendExitInfo();
287 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
288 return OSRExitJumpPlaceholder(index);
291 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
293 return speculationCheck(kind, jsValueSource, nodeUse.node());
296 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
298 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
301 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
303 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
306 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
310 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
311 m_jit.appendExitInfo(jumpToFail);
312 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
315 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
317 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
320 void SpeculativeJIT::emitInvalidationPoint(Node* node)
324 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
325 m_jit.jitCode()->appendOSRExit(OSRExit(
326 UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
327 this, m_stream->size()));
328 info.m_replacementSource = m_jit.watchpointLabel();
329 ASSERT(info.m_replacementSource.isSet());
333 void SpeculativeJIT::unreachable(Node* node)
335 m_compileOkay = false;
336 m_jit.abortWithReason(DFGUnreachableNode, node->op());
339 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
343 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
344 m_compileOkay = false;
345 if (verboseCompilationEnabled())
346 dataLog("Bailing compilation.\n");
349 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
351 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
354 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
356 ASSERT(needsTypeCheck(edge, typesPassedThrough));
357 m_interpreter.filter(edge, typesPassedThrough);
358 speculationCheck(exitKind, source, edge.node(), jumpToFail);
361 RegisterSet SpeculativeJIT::usedRegisters()
365 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
366 GPRReg gpr = GPRInfo::toRegister(i);
367 if (m_gprs.isInUse(gpr))
370 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
371 FPRReg fpr = FPRInfo::toRegister(i);
372 if (m_fprs.isInUse(fpr))
376 result.merge(RegisterSet::stubUnavailableRegisters());
381 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
383 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
386 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
388 m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
391 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
393 for (auto& slowPathGenerator : m_slowPathGenerators) {
394 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
395 slowPathGenerator->generate(this);
397 for (auto& slowPathLambda : m_slowPathLambdas) {
398 Node* currentNode = slowPathLambda.currentNode;
399 m_currentNode = currentNode;
400 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
401 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
402 slowPathLambda.generator();
403 m_outOfLineStreamIndex = std::nullopt;
407 void SpeculativeJIT::clearGenerationInfo()
409 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
410 m_generationInfo[i] = GenerationInfo();
411 m_gprs = RegisterBank<GPRInfo>();
412 m_fprs = RegisterBank<FPRInfo>();
415 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
417 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
418 Node* node = info.node();
419 DataFormat registerFormat = info.registerFormat();
420 ASSERT(registerFormat != DataFormatNone);
421 ASSERT(registerFormat != DataFormatDouble);
423 SilentSpillAction spillAction;
424 SilentFillAction fillAction;
426 if (!info.needsSpill())
427 spillAction = DoNothingForSpill;
430 ASSERT(info.gpr() == source);
431 if (registerFormat == DataFormatInt32)
432 spillAction = Store32Payload;
433 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
434 spillAction = StorePtr;
435 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
436 spillAction = Store64;
438 ASSERT(registerFormat & DataFormatJS);
439 spillAction = Store64;
441 #elif USE(JSVALUE32_64)
442 if (registerFormat & DataFormatJS) {
443 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
444 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
446 ASSERT(info.gpr() == source);
447 spillAction = Store32Payload;
452 if (registerFormat == DataFormatInt32) {
453 ASSERT(info.gpr() == source);
454 ASSERT(isJSInt32(info.registerFormat()));
455 if (node->hasConstant()) {
456 ASSERT(node->isInt32Constant());
457 fillAction = SetInt32Constant;
459 fillAction = Load32Payload;
460 } else if (registerFormat == DataFormatBoolean) {
462 RELEASE_ASSERT_NOT_REACHED();
463 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
464 fillAction = DoNothingForFill;
466 #elif USE(JSVALUE32_64)
467 ASSERT(info.gpr() == source);
468 if (node->hasConstant()) {
469 ASSERT(node->isBooleanConstant());
470 fillAction = SetBooleanConstant;
472 fillAction = Load32Payload;
474 } else if (registerFormat == DataFormatCell) {
475 ASSERT(info.gpr() == source);
476 if (node->hasConstant()) {
477 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
478 node->asCell(); // To get the assertion.
479 fillAction = SetCellConstant;
482 fillAction = LoadPtr;
484 fillAction = Load32Payload;
487 } else if (registerFormat == DataFormatStorage) {
488 ASSERT(info.gpr() == source);
489 fillAction = LoadPtr;
490 } else if (registerFormat == DataFormatInt52) {
491 if (node->hasConstant())
492 fillAction = SetInt52Constant;
493 else if (info.spillFormat() == DataFormatInt52)
495 else if (info.spillFormat() == DataFormatStrictInt52)
496 fillAction = Load64ShiftInt52Left;
497 else if (info.spillFormat() == DataFormatNone)
500 RELEASE_ASSERT_NOT_REACHED();
501 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
502 fillAction = Load64; // Make GCC happy.
505 } else if (registerFormat == DataFormatStrictInt52) {
506 if (node->hasConstant())
507 fillAction = SetStrictInt52Constant;
508 else if (info.spillFormat() == DataFormatInt52)
509 fillAction = Load64ShiftInt52Right;
510 else if (info.spillFormat() == DataFormatStrictInt52)
512 else if (info.spillFormat() == DataFormatNone)
515 RELEASE_ASSERT_NOT_REACHED();
516 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
517 fillAction = Load64; // Make GCC happy.
521 ASSERT(registerFormat & DataFormatJS);
523 ASSERT(info.gpr() == source);
524 if (node->hasConstant()) {
525 if (node->isCellConstant())
526 fillAction = SetTrustedJSConstant;
528 fillAction = SetJSConstant;
529 } else if (info.spillFormat() == DataFormatInt32) {
530 ASSERT(registerFormat == DataFormatJSInt32);
531 fillAction = Load32PayloadBoxInt;
535 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
536 if (node->hasConstant())
537 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
538 else if (info.payloadGPR() == source)
539 fillAction = Load32Payload;
540 else { // Fill the Tag
541 switch (info.spillFormat()) {
542 case DataFormatInt32:
543 ASSERT(registerFormat == DataFormatJSInt32);
544 fillAction = SetInt32Tag;
547 ASSERT(registerFormat == DataFormatJSCell);
548 fillAction = SetCellTag;
550 case DataFormatBoolean:
551 ASSERT(registerFormat == DataFormatJSBoolean);
552 fillAction = SetBooleanTag;
555 fillAction = Load32Tag;
562 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
565 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
567 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
568 Node* node = info.node();
569 ASSERT(info.registerFormat() == DataFormatDouble);
571 SilentSpillAction spillAction;
572 SilentFillAction fillAction;
574 if (!info.needsSpill())
575 spillAction = DoNothingForSpill;
577 ASSERT(!node->hasConstant());
578 ASSERT(info.spillFormat() == DataFormatNone);
579 ASSERT(info.fpr() == source);
580 spillAction = StoreDouble;
584 if (node->hasConstant()) {
585 node->asNumber(); // To get the assertion.
586 fillAction = SetDoubleConstant;
588 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
589 fillAction = LoadDouble;
591 #elif USE(JSVALUE32_64)
592 ASSERT(info.registerFormat() == DataFormatDouble);
593 if (node->hasConstant()) {
594 node->asNumber(); // To get the assertion.
595 fillAction = SetDoubleConstant;
597 fillAction = LoadDouble;
600 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
603 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
605 switch (plan.spillAction()) {
606 case DoNothingForSpill:
609 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
612 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
615 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
619 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
623 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
626 RELEASE_ASSERT_NOT_REACHED();
630 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
632 switch (plan.fillAction()) {
633 case DoNothingForFill:
635 case SetInt32Constant:
636 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
639 case SetInt52Constant:
640 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
642 case SetStrictInt52Constant:
643 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
645 #endif // USE(JSVALUE64)
646 case SetBooleanConstant:
647 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
649 case SetCellConstant:
650 ASSERT(plan.node()->constant()->value().isCell());
651 m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
654 case SetTrustedJSConstant:
655 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
658 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660 case SetDoubleConstant:
661 m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
663 case Load32PayloadBoxInt:
664 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
665 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
667 case Load32PayloadConvertToInt52:
668 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
669 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
670 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
672 case Load32PayloadSignExtend:
673 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
674 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677 case SetJSConstantTag:
678 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
680 case SetJSConstantPayload:
681 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
692 case SetDoubleConstant:
693 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
697 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
707 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
709 case Load64ShiftInt52Right:
710 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
711 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
713 case Load64ShiftInt52Left:
714 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
715 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
719 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722 RELEASE_ASSERT_NOT_REACHED();
726 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
728 switch (arrayMode.arrayClass()) {
729 case Array::OriginalArray: {
731 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
732 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
738 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
739 return m_jit.branch32(
740 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
742 case Array::NonArray:
743 case Array::OriginalNonArray:
744 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
745 return m_jit.branch32(
746 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
748 case Array::PossiblyArray:
749 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
750 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753 RELEASE_ASSERT_NOT_REACHED();
754 return JITCompiler::Jump();
757 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
759 JITCompiler::JumpList result;
761 switch (arrayMode.type()) {
764 case Array::Contiguous:
765 case Array::Undecided:
766 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
768 case Array::ArrayStorage:
769 case Array::SlowPutArrayStorage: {
770 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
772 if (arrayMode.isJSArray()) {
773 if (arrayMode.isSlowPut()) {
776 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
777 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
778 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
781 MacroAssembler::Above, tempGPR,
782 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
785 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
787 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
790 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
791 if (arrayMode.isSlowPut()) {
792 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
795 MacroAssembler::Above, tempGPR,
796 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
800 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
811 void SpeculativeJIT::checkArray(Node* node)
813 ASSERT(node->arrayMode().isSpecific());
814 ASSERT(!node->arrayMode().doesConversion());
816 SpeculateCellOperand base(this, node->child1());
817 GPRReg baseReg = base.gpr();
819 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
820 noResult(m_currentNode);
824 const ClassInfo* expectedClassInfo = 0;
826 switch (node->arrayMode().type()) {
827 case Array::AnyTypedArray:
829 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
833 case Array::Contiguous:
834 case Array::Undecided:
835 case Array::ArrayStorage:
836 case Array::SlowPutArrayStorage: {
837 GPRTemporary temp(this);
838 GPRReg tempGPR = temp.gpr();
839 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
841 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
842 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
844 noResult(m_currentNode);
847 case Array::DirectArguments:
848 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
849 noResult(m_currentNode);
851 case Array::ScopedArguments:
852 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
853 noResult(m_currentNode);
856 speculateCellTypeWithoutTypeFiltering(
857 node->child1(), baseReg,
858 typeForTypedArrayType(node->arrayMode().typedArrayType()));
859 noResult(m_currentNode);
863 RELEASE_ASSERT(expectedClassInfo);
865 GPRTemporary temp(this);
866 GPRTemporary temp2(this);
867 m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
869 BadType, JSValueSource::unboxedCell(baseReg), node,
871 MacroAssembler::NotEqual,
872 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
873 TrustedImmPtr(PoisonedClassInfoPtr(expectedClassInfo).bits())));
875 noResult(m_currentNode);
878 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
880 ASSERT(node->arrayMode().doesConversion());
882 GPRTemporary temp(this);
883 GPRTemporary structure;
884 GPRReg tempGPR = temp.gpr();
885 GPRReg structureGPR = InvalidGPRReg;
887 if (node->op() != ArrayifyToStructure) {
888 GPRTemporary realStructure(this);
889 structure.adopt(realStructure);
890 structureGPR = structure.gpr();
893 // We can skip all that comes next if we already have array storage.
894 MacroAssembler::JumpList slowPath;
896 if (node->op() == ArrayifyToStructure) {
897 slowPath.append(m_jit.branchWeakStructure(
898 JITCompiler::NotEqual,
899 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
903 MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
905 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
908 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
909 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
911 noResult(m_currentNode);
914 void SpeculativeJIT::arrayify(Node* node)
916 ASSERT(node->arrayMode().isSpecific());
918 SpeculateCellOperand base(this, node->child1());
920 if (!node->child2()) {
921 arrayify(node, base.gpr(), InvalidGPRReg);
925 SpeculateInt32Operand property(this, node->child2());
927 arrayify(node, base.gpr(), property.gpr());
930 GPRReg SpeculativeJIT::fillStorage(Edge edge)
932 VirtualRegister virtualRegister = edge->virtualRegister();
933 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
935 switch (info.registerFormat()) {
936 case DataFormatNone: {
937 if (info.spillFormat() == DataFormatStorage) {
938 GPRReg gpr = allocate();
939 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
940 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
941 info.fillStorage(*m_stream, gpr);
945 // Must be a cell; fill it as a cell and then return the pointer.
946 return fillSpeculateCell(edge);
949 case DataFormatStorage: {
950 GPRReg gpr = info.gpr();
956 return fillSpeculateCell(edge);
960 void SpeculativeJIT::useChildren(Node* node)
962 if (node->flags() & NodeHasVarArgs) {
963 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
964 if (!!m_jit.graph().m_varArgChildren[childIdx])
965 use(m_jit.graph().m_varArgChildren[childIdx]);
968 Edge child1 = node->child1();
970 ASSERT(!node->child2() && !node->child3());
975 Edge child2 = node->child2();
977 ASSERT(!node->child3());
982 Edge child3 = node->child3();
989 void SpeculativeJIT::compileTryGetById(Node* node)
991 switch (node->child1().useKind()) {
993 SpeculateCellOperand base(this, node->child1());
994 JSValueRegsTemporary result(this, Reuse, base);
996 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
997 JSValueRegs resultRegs = result.regs();
1001 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1003 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1008 JSValueOperand base(this, node->child1());
1009 JSValueRegsTemporary result(this, Reuse, base);
1011 JSValueRegs baseRegs = base.jsValueRegs();
1012 JSValueRegs resultRegs = result.regs();
1016 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1018 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1020 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1025 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1030 void SpeculativeJIT::compileIn(Node* node)
1032 SpeculateCellOperand base(this, node->child1());
1033 GPRReg baseGPR = base.gpr();
1035 if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1036 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1037 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1039 GPRTemporary result(this);
1040 GPRReg resultGPR = result.gpr();
1042 use(node->child2());
1044 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1045 MacroAssembler::Label done = m_jit.label();
1047 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1048 // we can cast it to const AtomicStringImpl* safely.
1049 auto slowPath = slowPathCall(
1050 jump.m_jump, this, operationInOptimize,
1051 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1052 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1054 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1055 stubInfo->codeOrigin = node->origin.semantic;
1056 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1057 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1058 stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1059 #if USE(JSVALUE32_64)
1060 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1061 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1062 stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1064 stubInfo->patch.usedRegisters = usedRegisters();
1066 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1067 addSlowPathGenerator(WTFMove(slowPath));
1071 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1076 JSValueOperand key(this, node->child2());
1077 JSValueRegs regs = key.jsValueRegs();
1079 GPRFlushedCallResult result(this);
1080 GPRReg resultGPR = result.gpr();
1087 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1089 m_jit.exceptionCheck();
1090 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1093 void SpeculativeJIT::compileDeleteById(Node* node)
1095 JSValueOperand value(this, node->child1());
1096 GPRFlushedCallResult result(this);
1098 JSValueRegs valueRegs = value.jsValueRegs();
1099 GPRReg resultGPR = result.gpr();
1104 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1105 m_jit.exceptionCheck();
1107 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1110 void SpeculativeJIT::compileDeleteByVal(Node* node)
1112 JSValueOperand base(this, node->child1());
1113 JSValueOperand key(this, node->child2());
1114 GPRFlushedCallResult result(this);
1116 JSValueRegs baseRegs = base.jsValueRegs();
1117 JSValueRegs keyRegs = key.jsValueRegs();
1118 GPRReg resultGPR = result.gpr();
1124 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1125 m_jit.exceptionCheck();
1127 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1130 void SpeculativeJIT::compilePushWithScope(Node* node)
1132 SpeculateCellOperand currentScope(this, node->child1());
1133 GPRReg currentScopeGPR = currentScope.gpr();
1135 GPRFlushedCallResult result(this);
1136 GPRReg resultGPR = result.gpr();
1138 auto objectEdge = node->child2();
1139 if (objectEdge.useKind() == ObjectUse) {
1140 SpeculateCellOperand object(this, objectEdge);
1141 GPRReg objectGPR = object.gpr();
1142 speculateObject(objectEdge, objectGPR);
1145 callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1146 // No exception check here as we did not have to call toObject().
1148 ASSERT(objectEdge.useKind() == UntypedUse);
1149 JSValueOperand object(this, objectEdge);
1150 JSValueRegs objectRegs = object.jsValueRegs();
1153 callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1154 m_jit.exceptionCheck();
1157 cellResult(resultGPR, node);
1160 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1162 unsigned branchIndexInBlock = detectPeepHoleBranch();
1163 if (branchIndexInBlock != UINT_MAX) {
1164 Node* branchNode = m_block->at(branchIndexInBlock);
1166 ASSERT(node->adjustedRefCount() == 1);
1168 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1170 m_indexInBlock = branchIndexInBlock;
1171 m_currentNode = branchNode;
1176 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1181 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1183 unsigned branchIndexInBlock = detectPeepHoleBranch();
1184 if (branchIndexInBlock != UINT_MAX) {
1185 Node* branchNode = m_block->at(branchIndexInBlock);
1187 ASSERT(node->adjustedRefCount() == 1);
1189 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1191 m_indexInBlock = branchIndexInBlock;
1192 m_currentNode = branchNode;
1197 nonSpeculativeNonPeepholeStrictEq(node, invert);
1202 static const char* dataFormatString(DataFormat format)
1204 // These values correspond to the DataFormat enum.
1205 const char* strings[] = {
1223 return strings[format];
1226 void SpeculativeJIT::dump(const char* label)
1229 dataLogF("<%s>\n", label);
1231 dataLogF(" gprs:\n");
1233 dataLogF(" fprs:\n");
1235 dataLogF(" VirtualRegisters:\n");
1236 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1237 GenerationInfo& info = m_generationInfo[i];
1239 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1241 dataLogF(" % 3d:[__][__]", i);
1242 if (info.registerFormat() == DataFormatDouble)
1243 dataLogF(":fpr%d\n", info.fpr());
1244 else if (info.registerFormat() != DataFormatNone
1245 #if USE(JSVALUE32_64)
1246 && !(info.registerFormat() & DataFormatJS)
1249 ASSERT(info.gpr() != InvalidGPRReg);
1250 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1255 dataLogF("</%s>\n", label);
1258 GPRTemporary::GPRTemporary()
1260 , m_gpr(InvalidGPRReg)
1264 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1266 , m_gpr(InvalidGPRReg)
1268 m_gpr = m_jit->allocate();
1271 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1273 , m_gpr(InvalidGPRReg)
1275 m_gpr = m_jit->allocate(specific);
1278 #if USE(JSVALUE32_64)
1279 GPRTemporary::GPRTemporary(
1280 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1282 , m_gpr(InvalidGPRReg)
1284 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1285 m_gpr = m_jit->reuse(op1.gpr(which));
1287 m_gpr = m_jit->allocate();
1289 #endif // USE(JSVALUE32_64)
1291 JSValueRegsTemporary::JSValueRegsTemporary() { }
1293 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1304 template<typename T>
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1306 : m_gpr(jit, Reuse, operand)
1310 template<typename T>
1311 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1313 if (resultWord == PayloadWord) {
1314 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1315 m_tagGPR = GPRTemporary(jit);
1317 m_payloadGPR = GPRTemporary(jit);
1318 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1324 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1326 m_gpr = GPRTemporary(jit, Reuse, operand);
1329 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1331 if (jit->canReuse(operand.node())) {
1332 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1333 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1335 m_payloadGPR = GPRTemporary(jit);
1336 m_tagGPR = GPRTemporary(jit);
1341 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1343 JSValueRegs JSValueRegsTemporary::regs()
1346 return JSValueRegs(m_gpr.gpr());
1348 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1352 void GPRTemporary::adopt(GPRTemporary& other)
1355 ASSERT(m_gpr == InvalidGPRReg);
1356 ASSERT(other.m_jit);
1357 ASSERT(other.m_gpr != InvalidGPRReg);
1358 m_jit = other.m_jit;
1359 m_gpr = other.m_gpr;
1361 other.m_gpr = InvalidGPRReg;
1364 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1366 ASSERT(other.m_jit);
1367 ASSERT(other.m_fpr != InvalidFPRReg);
1368 m_jit = other.m_jit;
1369 m_fpr = other.m_fpr;
1371 other.m_jit = nullptr;
1374 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1376 , m_fpr(InvalidFPRReg)
1378 m_fpr = m_jit->fprAllocate();
1381 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1383 , m_fpr(InvalidFPRReg)
1385 if (m_jit->canReuse(op1.node()))
1386 m_fpr = m_jit->reuse(op1.fpr());
1388 m_fpr = m_jit->fprAllocate();
1391 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1393 , m_fpr(InvalidFPRReg)
1395 if (m_jit->canReuse(op1.node()))
1396 m_fpr = m_jit->reuse(op1.fpr());
1397 else if (m_jit->canReuse(op2.node()))
1398 m_fpr = m_jit->reuse(op2.fpr());
1399 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1400 m_fpr = m_jit->reuse(op1.fpr());
1402 m_fpr = m_jit->fprAllocate();
1405 #if USE(JSVALUE32_64)
1406 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1408 , m_fpr(InvalidFPRReg)
1410 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1411 m_fpr = m_jit->reuse(op1.fpr());
1413 m_fpr = m_jit->fprAllocate();
1417 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1419 BasicBlock* taken = branchNode->branchData()->taken.block;
1420 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1422 if (taken == nextBlock()) {
1423 condition = MacroAssembler::invert(condition);
1424 std::swap(taken, notTaken);
1427 SpeculateDoubleOperand op1(this, node->child1());
1428 SpeculateDoubleOperand op2(this, node->child2());
1430 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1434 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1436 BasicBlock* taken = branchNode->branchData()->taken.block;
1437 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1439 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1441 if (taken == nextBlock()) {
1442 condition = MacroAssembler::NotEqual;
1443 BasicBlock* tmp = taken;
1448 SpeculateCellOperand op1(this, node->child1());
1449 SpeculateCellOperand op2(this, node->child2());
1451 GPRReg op1GPR = op1.gpr();
1452 GPRReg op2GPR = op2.gpr();
1454 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1455 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1457 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1459 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1461 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1464 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1466 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1467 m_jit.branchIfNotObject(op1GPR));
1469 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1471 MacroAssembler::NonZero,
1472 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1473 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1475 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1477 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1478 m_jit.branchIfNotObject(op2GPR));
1480 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1482 MacroAssembler::NonZero,
1483 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1484 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1487 branchPtr(condition, op1GPR, op2GPR, taken);
1491 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1493 BasicBlock* taken = branchNode->branchData()->taken.block;
1494 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1496 // The branch instruction will branch to the taken block.
1497 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1498 if (taken == nextBlock()) {
1499 condition = JITCompiler::invert(condition);
1500 BasicBlock* tmp = taken;
1505 if (node->child1()->isInt32Constant()) {
1506 int32_t imm = node->child1()->asInt32();
1507 SpeculateBooleanOperand op2(this, node->child2());
1508 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1509 } else if (node->child2()->isInt32Constant()) {
1510 SpeculateBooleanOperand op1(this, node->child1());
1511 int32_t imm = node->child2()->asInt32();
1512 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1514 SpeculateBooleanOperand op1(this, node->child1());
1515 SpeculateBooleanOperand op2(this, node->child2());
1516 branch32(condition, op1.gpr(), op2.gpr(), taken);
1522 void SpeculativeJIT::compileStringSlice(Node* node)
1524 SpeculateCellOperand string(this, node->child1());
1525 GPRTemporary startIndex(this);
1526 GPRTemporary temp(this);
1527 GPRTemporary temp2(this);
1529 GPRReg stringGPR = string.gpr();
1530 GPRReg startIndexGPR = startIndex.gpr();
1531 GPRReg tempGPR = temp.gpr();
1532 GPRReg temp2GPR = temp2.gpr();
1534 speculateString(node->child1(), stringGPR);
1537 m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1539 emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1541 emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1543 m_jit.move(temp2GPR, tempGPR);
1546 CCallHelpers::JumpList doneCases;
1547 CCallHelpers::JumpList slowCases;
1549 auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1550 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1551 doneCases.append(m_jit.jump());
1553 nonEmptyCase.link(&m_jit);
1554 m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1555 slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1557 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1558 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1560 m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1562 // Load the character into scratchReg
1563 m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1564 auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1566 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1567 auto cont8Bit = m_jit.jump();
1569 is16Bit.link(&m_jit);
1570 m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1572 auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1574 // 8 bit string values don't need the isASCII check.
1575 cont8Bit.link(&m_jit);
1577 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1578 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1579 m_jit.loadPtr(tempGPR, tempGPR);
1581 addSlowPathGenerator(
1583 bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1585 addSlowPathGenerator(
1587 slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1589 doneCases.link(&m_jit);
1590 cellResult(tempGPR, node);
1593 void SpeculativeJIT::compileToLowerCase(Node* node)
1595 ASSERT(node->op() == ToLowerCase);
1596 SpeculateCellOperand string(this, node->child1());
1597 GPRTemporary temp(this);
1598 GPRTemporary index(this);
1599 GPRTemporary charReg(this);
1600 GPRTemporary length(this);
1602 GPRReg stringGPR = string.gpr();
1603 GPRReg tempGPR = temp.gpr();
1604 GPRReg indexGPR = index.gpr();
1605 GPRReg charGPR = charReg.gpr();
1606 GPRReg lengthGPR = length.gpr();
1608 speculateString(node->child1(), stringGPR);
1610 CCallHelpers::JumpList slowPath;
1612 m_jit.move(TrustedImmPtr(0), indexGPR);
1614 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1615 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1617 slowPath.append(m_jit.branchTest32(
1618 MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1619 MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1620 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1621 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1623 auto loopStart = m_jit.label();
1624 auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1625 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1626 slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1627 m_jit.sub32(TrustedImm32('A'), charGPR);
1628 slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1630 m_jit.add32(TrustedImm32(1), indexGPR);
1631 m_jit.jump().linkTo(loopStart, &m_jit);
1633 slowPath.link(&m_jit);
1634 silentSpillAllRegisters(lengthGPR);
1635 callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1636 silentFillAllRegisters();
1637 m_jit.exceptionCheck();
1638 auto done = m_jit.jump();
1640 loopDone.link(&m_jit);
1641 m_jit.move(stringGPR, lengthGPR);
1644 cellResult(lengthGPR, node);
1647 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1649 BasicBlock* taken = branchNode->branchData()->taken.block;
1650 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1652 // The branch instruction will branch to the taken block.
1653 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1654 if (taken == nextBlock()) {
1655 condition = JITCompiler::invert(condition);
1656 BasicBlock* tmp = taken;
1661 if (node->child1()->isInt32Constant()) {
1662 int32_t imm = node->child1()->asInt32();
1663 SpeculateInt32Operand op2(this, node->child2());
1664 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1665 } else if (node->child2()->isInt32Constant()) {
1666 SpeculateInt32Operand op1(this, node->child1());
1667 int32_t imm = node->child2()->asInt32();
1668 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1670 SpeculateInt32Operand op1(this, node->child1());
1671 SpeculateInt32Operand op2(this, node->child2());
1672 branch32(condition, op1.gpr(), op2.gpr(), taken);
1678 // Returns true if the compare is fused with a subsequent branch.
1679 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1681 // Fused compare & branch.
1682 unsigned branchIndexInBlock = detectPeepHoleBranch();
1683 if (branchIndexInBlock != UINT_MAX) {
1684 Node* branchNode = m_block->at(branchIndexInBlock);
1686 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1687 // so can be no intervening nodes to also reference the compare.
1688 ASSERT(node->adjustedRefCount() == 1);
1690 if (node->isBinaryUseKind(Int32Use))
1691 compilePeepHoleInt32Branch(node, branchNode, condition);
1693 else if (node->isBinaryUseKind(Int52RepUse))
1694 compilePeepHoleInt52Branch(node, branchNode, condition);
1695 #endif // USE(JSVALUE64)
1696 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1697 // Use non-peephole comparison, for now.
1699 } else if (node->isBinaryUseKind(DoubleRepUse))
1700 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1701 else if (node->op() == CompareEq) {
1702 if (node->isBinaryUseKind(BooleanUse))
1703 compilePeepHoleBooleanBranch(node, branchNode, condition);
1704 else if (node->isBinaryUseKind(SymbolUse))
1705 compilePeepHoleSymbolEquality(node, branchNode);
1706 else if (node->isBinaryUseKind(ObjectUse))
1707 compilePeepHoleObjectEquality(node, branchNode);
1708 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1709 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1710 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1711 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1712 else if (!needsTypeCheck(node->child1(), SpecOther))
1713 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1714 else if (!needsTypeCheck(node->child2(), SpecOther))
1715 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1717 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1721 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1725 use(node->child1());
1726 use(node->child2());
1727 m_indexInBlock = branchIndexInBlock;
1728 m_currentNode = branchNode;
1734 void SpeculativeJIT::noticeOSRBirth(Node* node)
1736 if (!node->hasVirtualRegister())
1739 VirtualRegister virtualRegister = node->virtualRegister();
1740 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1742 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1745 void SpeculativeJIT::compileMovHint(Node* node)
1747 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1749 Node* child = node->child1().node();
1750 noticeOSRBirth(child);
1752 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1755 void SpeculativeJIT::bail(AbortReason reason)
1757 if (verboseCompilationEnabled())
1758 dataLog("Bailing compilation.\n");
1759 m_compileOkay = true;
1760 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1761 clearGenerationInfo();
1764 void SpeculativeJIT::compileCurrentBlock()
1766 ASSERT(m_compileOkay);
1771 ASSERT(m_block->isReachable);
1773 m_jit.blockHeads()[m_block->index] = m_jit.label();
1775 if (!m_block->intersectionOfCFAHasVisited) {
1776 // Don't generate code for basic blocks that are unreachable according to CFA.
1777 // But to be sure that nobody has generated a jump to this block, drop in a
1779 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1783 if (m_block->isCatchEntrypoint) {
1784 m_jit.addPtr(CCallHelpers::TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1785 m_jit.emitSaveCalleeSaves();
1786 m_jit.emitMaterializeTagCheckRegisters();
1787 m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1790 m_stream->appendAndLog(VariableEvent::reset());
1792 m_jit.jitAssertHasValidCallFrame();
1793 m_jit.jitAssertTagsInPlace();
1794 m_jit.jitAssertArgumentCountSane();
1797 m_state.beginBasicBlock(m_block);
1799 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1800 int operand = m_block->variablesAtHead.operandForIndex(i);
1801 Node* node = m_block->variablesAtHead[i];
1803 continue; // No need to record dead SetLocal's.
1805 VariableAccessData* variable = node->variableAccessData();
1807 if (!node->refCount())
1808 continue; // No need to record dead SetLocal's.
1809 format = dataFormatFor(variable->flushFormat());
1810 m_stream->appendAndLog(
1811 VariableEvent::setLocal(
1812 VirtualRegister(operand),
1813 variable->machineLocal(),
1817 m_origin = NodeOrigin();
1819 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1820 m_currentNode = m_block->at(m_indexInBlock);
1822 // We may have hit a contradiction that the CFA was aware of but that the JIT
1823 // didn't cause directly.
1824 if (!m_state.isValid()) {
1825 bail(DFGBailedAtTopOfBlock);
1829 m_interpreter.startExecuting();
1830 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1831 m_jit.setForNode(m_currentNode);
1832 m_origin = m_currentNode->origin;
1833 if (validationEnabled())
1834 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1835 m_lastGeneratedNode = m_currentNode->op();
1837 ASSERT(m_currentNode->shouldGenerate());
1839 if (verboseCompilationEnabled()) {
1841 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1842 (int)m_currentNode->index(),
1843 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1847 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1848 m_jit.jitReleaseAssertNoException(*m_jit.vm());
1850 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1852 compile(m_currentNode);
1854 if (belongsInMinifiedGraph(m_currentNode->op()))
1855 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1857 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1858 m_jit.clearRegisterAllocationOffsets();
1861 if (!m_compileOkay) {
1862 bail(DFGBailedAtEndOfNode);
1866 // Make sure that the abstract state is rematerialized for the next node.
1867 m_interpreter.executeEffects(m_indexInBlock);
1870 // Perform the most basic verification that children have been used correctly.
1871 if (!ASSERT_DISABLED) {
1872 for (auto& info : m_generationInfo)
1873 RELEASE_ASSERT(!info.alive());
1877 // If we are making type predictions about our arguments then
1878 // we need to check that they are correct on function entry.
1879 void SpeculativeJIT::checkArgumentTypes()
1881 ASSERT(!m_currentNode);
1882 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1884 auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1885 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1886 Node* node = arguments[i];
1888 // The argument is dead. We don't do any checks for such arguments.
1892 ASSERT(node->op() == SetArgument);
1893 ASSERT(node->shouldGenerate());
1895 VariableAccessData* variableAccessData = node->variableAccessData();
1896 FlushFormat format = variableAccessData->flushFormat();
1898 if (format == FlushedJSValue)
1901 VirtualRegister virtualRegister = variableAccessData->local();
1903 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1907 case FlushedInt32: {
1908 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1911 case FlushedBoolean: {
1912 GPRTemporary temp(this);
1913 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1914 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1915 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1919 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1923 RELEASE_ASSERT_NOT_REACHED();
1928 case FlushedInt32: {
1929 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1932 case FlushedBoolean: {
1933 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1937 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1941 RELEASE_ASSERT_NOT_REACHED();
1947 m_origin = NodeOrigin();
1950 bool SpeculativeJIT::compile()
1952 checkArgumentTypes();
1954 ASSERT(!m_currentNode);
1955 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1956 m_jit.setForBlockIndex(blockIndex);
1957 m_block = m_jit.graph().block(blockIndex);
1958 compileCurrentBlock();
1964 void SpeculativeJIT::createOSREntries()
1966 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1967 BasicBlock* block = m_jit.graph().block(blockIndex);
1970 if (block->isOSRTarget || block->isCatchEntrypoint) {
1971 // Currently we don't have OSR entry trampolines. We could add them
1973 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1978 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1980 unsigned osrEntryIndex = 0;
1981 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1982 BasicBlock* block = m_jit.graph().block(blockIndex);
1985 if (!block->isOSRTarget && !block->isCatchEntrypoint)
1987 if (block->isCatchEntrypoint) {
1988 auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1989 Vector<FlushFormat> argumentFormats;
1990 argumentFormats.reserveInitialCapacity(argumentsVector.size());
1991 for (Node* setArgument : argumentsVector) {
1993 FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1994 ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1995 argumentFormats.uncheckedAppend(flushFormat);
1997 argumentFormats.uncheckedAppend(DeadFlush);
1999 m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
2001 ASSERT(block->isOSRTarget);
2002 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
2006 m_jit.jitCode()->finalizeOSREntrypoints();
2007 m_jit.jitCode()->common.finalizeCatchEntrypoints();
2009 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
2011 if (verboseCompilationEnabled()) {
2012 DumpContext dumpContext;
2013 dataLog("OSR Entries:\n");
2014 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
2015 dataLog(" ", inContext(entryData, &dumpContext), "\n");
2016 if (!dumpContext.isEmpty())
2017 dumpContext.dump(WTF::dataFile());
2021 void SpeculativeJIT::compileCheckTraps(Node*)
2023 ASSERT(Options::usePollingTraps());
2024 GPRTemporary unused(this);
2025 GPRReg unusedGPR = unused.gpr();
2027 JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2028 JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2030 addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2033 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2035 Edge child3 = m_jit.graph().varArgChild(node, 2);
2036 Edge child4 = m_jit.graph().varArgChild(node, 3);
2038 ArrayMode arrayMode = node->arrayMode();
2040 GPRReg baseReg = base.gpr();
2041 GPRReg propertyReg = property.gpr();
2043 SpeculateDoubleOperand value(this, child3);
2045 FPRReg valueReg = value.fpr();
2048 JSValueRegs(), child3, SpecFullRealNumber,
2050 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2055 StorageOperand storage(this, child4);
2056 GPRReg storageReg = storage.gpr();
2058 if (node->op() == PutByValAlias) {
2059 // Store the value to the array.
2060 GPRReg propertyReg = property.gpr();
2061 FPRReg valueReg = value.fpr();
2062 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2064 noResult(m_currentNode);
2068 GPRTemporary temporary;
2069 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2071 MacroAssembler::Jump slowCase;
2073 if (arrayMode.isInBounds()) {
2075 OutOfBounds, JSValueRegs(), 0,
2076 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2078 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2080 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2082 if (!arrayMode.isOutOfBounds())
2083 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2085 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2086 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2088 inBounds.link(&m_jit);
2091 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2098 if (arrayMode.isOutOfBounds()) {
2099 addSlowPathGenerator(
2102 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2103 NoResult, baseReg, propertyReg, valueReg));
2106 noResult(m_currentNode, UseChildrenCalledExplicitly);
2109 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2111 SpeculateCellOperand string(this, node->child1());
2112 SpeculateStrictInt32Operand index(this, node->child2());
2113 StorageOperand storage(this, node->child3());
2115 GPRReg stringReg = string.gpr();
2116 GPRReg indexReg = index.gpr();
2117 GPRReg storageReg = storage.gpr();
2119 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2121 // unsigned comparison so we can filter out negative indices and indices that are too large
2122 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2124 GPRTemporary scratch(this);
2125 GPRReg scratchReg = scratch.gpr();
2127 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2128 m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), indexReg);
2130 // Load the character into scratchReg
2131 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2133 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2134 JITCompiler::Jump cont8Bit = m_jit.jump();
2136 is16Bit.link(&m_jit);
2138 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2140 cont8Bit.link(&m_jit);
2142 int32Result(scratchReg, m_currentNode);
2145 void SpeculativeJIT::compileGetByValOnString(Node* node)
2147 SpeculateCellOperand base(this, node->child1());
2148 SpeculateStrictInt32Operand property(this, node->child2());
2149 StorageOperand storage(this, node->child3());
2150 GPRReg baseReg = base.gpr();
2151 GPRReg propertyReg = property.gpr();
2152 GPRReg storageReg = storage.gpr();
2154 GPRTemporary scratch(this);
2155 GPRReg scratchReg = scratch.gpr();
2156 #if USE(JSVALUE32_64)
2157 GPRTemporary resultTag;
2158 GPRReg resultTagReg = InvalidGPRReg;
2159 if (node->arrayMode().isOutOfBounds()) {
2160 GPRTemporary realResultTag(this);
2161 resultTag.adopt(realResultTag);
2162 resultTagReg = resultTag.gpr();
2166 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2168 // unsigned comparison so we can filter out negative indices and indices that are too large
2169 JITCompiler::Jump outOfBounds = m_jit.branch32(
2170 MacroAssembler::AboveOrEqual, propertyReg,
2171 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2172 if (node->arrayMode().isInBounds())
2173 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2175 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2176 m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), propertyReg);
2178 // Load the character into scratchReg
2179 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2181 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2182 JITCompiler::Jump cont8Bit = m_jit.jump();
2184 is16Bit.link(&m_jit);
2186 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2188 JITCompiler::Jump bigCharacter =
2189 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2191 // 8 bit string values don't need the isASCII check.
2192 cont8Bit.link(&m_jit);
2194 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2195 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2196 m_jit.loadPtr(scratchReg, scratchReg);
2198 addSlowPathGenerator(
2200 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2202 if (node->arrayMode().isOutOfBounds()) {
2203 #if USE(JSVALUE32_64)
2204 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2207 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2208 bool prototypeChainIsSane = false;
2209 if (globalObject->stringPrototypeChainIsSane()) {
2210 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2211 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2212 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2213 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2214 // indexed properties either.
2215 // https://bugs.webkit.org/show_bug.cgi?id=144668
2216 m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2217 m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2218 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2220 if (prototypeChainIsSane) {
2222 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2223 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2225 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2226 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2227 baseReg, propertyReg));
2231 addSlowPathGenerator(
2233 outOfBounds, this, operationGetByValStringInt,
2234 scratchReg, baseReg, propertyReg));
2236 addSlowPathGenerator(
2238 outOfBounds, this, operationGetByValStringInt,
2239 JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2244 jsValueResult(scratchReg, m_currentNode);
2246 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2249 cellResult(scratchReg, m_currentNode);
2252 void SpeculativeJIT::compileFromCharCode(Node* node)
2254 Edge& child = node->child1();
2255 if (child.useKind() == UntypedUse) {
2256 JSValueOperand opr(this, child);
2257 JSValueRegs oprRegs = opr.jsValueRegs();
2259 GPRTemporary result(this);
2260 JSValueRegs resultRegs = JSValueRegs(result.gpr());
2262 GPRTemporary resultTag(this);
2263 GPRTemporary resultPayload(this);
2264 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2267 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2268 m_jit.exceptionCheck();
2270 jsValueResult(resultRegs, node);
2274 SpeculateStrictInt32Operand property(this, child);
2275 GPRReg propertyReg = property.gpr();
2276 GPRTemporary smallStrings(this);
2277 GPRTemporary scratch(this);
2278 GPRReg scratchReg = scratch.gpr();
2279 GPRReg smallStringsReg = smallStrings.gpr();
2281 JITCompiler::JumpList slowCases;
2282 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2283 m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2284 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2286 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2287 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2288 cellResult(scratchReg, m_currentNode);
2291 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2293 VirtualRegister virtualRegister = node->virtualRegister();
2294 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2296 switch (info.registerFormat()) {
2297 case DataFormatStorage:
2298 RELEASE_ASSERT_NOT_REACHED();
2300 case DataFormatBoolean:
2301 case DataFormatCell:
2302 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2303 return GeneratedOperandTypeUnknown;
2305 case DataFormatNone:
2306 case DataFormatJSCell:
2308 case DataFormatJSBoolean:
2309 case DataFormatJSDouble:
2310 return GeneratedOperandJSValue;
2312 case DataFormatJSInt32:
2313 case DataFormatInt32:
2314 return GeneratedOperandInteger;
2317 RELEASE_ASSERT_NOT_REACHED();
2318 return GeneratedOperandTypeUnknown;
2322 void SpeculativeJIT::compileValueToInt32(Node* node)
2324 switch (node->child1().useKind()) {
2327 SpeculateStrictInt52Operand op1(this, node->child1());
2328 GPRTemporary result(this, Reuse, op1);
2329 GPRReg op1GPR = op1.gpr();
2330 GPRReg resultGPR = result.gpr();
2331 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2332 int32Result(resultGPR, node, DataFormatInt32);
2335 #endif // USE(JSVALUE64)
2337 case DoubleRepUse: {
2338 GPRTemporary result(this);
2339 SpeculateDoubleOperand op1(this, node->child1());
2340 FPRReg fpr = op1.fpr();
2341 GPRReg gpr = result.gpr();
2342 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2344 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2345 hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2347 int32Result(gpr, node);
2353 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2354 case GeneratedOperandInteger: {
2355 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2356 GPRTemporary result(this, Reuse, op1);
2357 m_jit.move(op1.gpr(), result.gpr());
2358 int32Result(result.gpr(), node, op1.format());
2361 case GeneratedOperandJSValue: {
2362 GPRTemporary result(this);
2364 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2366 GPRReg gpr = op1.gpr();
2367 GPRReg resultGpr = result.gpr();
2368 FPRTemporary tempFpr(this);
2369 FPRReg fpr = tempFpr.fpr();
2371 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2372 JITCompiler::JumpList converted;
2374 if (node->child1().useKind() == NumberUse) {
2376 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2378 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2380 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2383 JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2385 // It's not a cell: so true turns into 1 and all else turns into 0.
2386 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2387 converted.append(m_jit.jump());
2389 isNumber.link(&m_jit);
2392 // First, if we get here we have a double encoded as a JSValue
2393 unboxDouble(gpr, resultGpr, fpr);
2395 silentSpillAllRegisters(resultGpr);
2396 callOperation(operationToInt32, resultGpr, fpr);
2397 silentFillAllRegisters();
2399 converted.append(m_jit.jump());
2401 isInteger.link(&m_jit);
2402 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2404 converted.link(&m_jit);
2406 Node* childNode = node->child1().node();
2407 VirtualRegister virtualRegister = childNode->virtualRegister();
2408 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2410 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2412 GPRReg payloadGPR = op1.payloadGPR();
2413 GPRReg resultGpr = result.gpr();
2415 JITCompiler::JumpList converted;
2417 if (info.registerFormat() == DataFormatJSInt32)
2418 m_jit.move(payloadGPR, resultGpr);
2420 GPRReg tagGPR = op1.tagGPR();
2421 FPRTemporary tempFpr(this);
2422 FPRReg fpr = tempFpr.fpr();
2423 FPRTemporary scratch(this);
2425 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2427 if (node->child1().useKind() == NumberUse) {
2429 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2431 MacroAssembler::AboveOrEqual, tagGPR,
2432 TrustedImm32(JSValue::LowestTag)));
2434 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2437 op1.jsValueRegs(), node->child1(), ~SpecCell,
2438 m_jit.branchIfCell(op1.jsValueRegs()));
2440 // It's not a cell: so true turns into 1 and all else turns into 0.
2441 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2442 m_jit.move(TrustedImm32(0), resultGpr);
2443 converted.append(m_jit.jump());
2445 isBoolean.link(&m_jit);
2446 m_jit.move(payloadGPR, resultGpr);
2447 converted.append(m_jit.jump());
2449 isNumber.link(&m_jit);
2452 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2454 silentSpillAllRegisters(resultGpr);
2455 callOperation(operationToInt32, resultGpr, fpr);
2456 silentFillAllRegisters();
2458 converted.append(m_jit.jump());
2460 isInteger.link(&m_jit);
2461 m_jit.move(payloadGPR, resultGpr);
2463 converted.link(&m_jit);
2466 int32Result(resultGpr, node);
2469 case GeneratedOperandTypeUnknown:
2470 RELEASE_ASSERT(!m_compileOkay);
2473 RELEASE_ASSERT_NOT_REACHED();
2478 ASSERT(!m_compileOkay);
2483 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2485 if (doesOverflow(node->arithMode())) {
2486 if (enableInt52()) {
2487 SpeculateInt32Operand op1(this, node->child1());
2488 GPRTemporary result(this, Reuse, op1);
2489 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2490 strictInt52Result(result.gpr(), node);
2493 SpeculateInt32Operand op1(this, node->child1());
2494 FPRTemporary result(this);
2496 GPRReg inputGPR = op1.gpr();
2497 FPRReg outputFPR = result.fpr();
2499 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2501 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2502 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2503 positive.link(&m_jit);
2505 doubleResult(outputFPR, node);
2509 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2511 SpeculateInt32Operand op1(this, node->child1());
2512 GPRTemporary result(this);
2514 m_jit.move(op1.gpr(), result.gpr());
2516 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2518 int32Result(result.gpr(), node, op1.format());
2521 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2523 SpeculateDoubleOperand op1(this, node->child1());
2524 FPRTemporary scratch(this);
2525 GPRTemporary result(this);
2527 FPRReg valueFPR = op1.fpr();
2528 FPRReg scratchFPR = scratch.fpr();
2529 GPRReg resultGPR = result.gpr();
2531 JITCompiler::JumpList failureCases;
2532 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2533 m_jit.branchConvertDoubleToInt32(
2534 valueFPR, resultGPR, failureCases, scratchFPR,
2535 shouldCheckNegativeZero(node->arithMode()));
2536 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2538 int32Result(resultGPR, node);
2541 void SpeculativeJIT::compileDoubleRep(Node* node)
2543 switch (node->child1().useKind()) {
2544 case RealNumberUse: {
2545 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2546 FPRTemporary result(this);
2548 JSValueRegs op1Regs = op1.jsValueRegs();
2549 FPRReg resultFPR = result.fpr();
2552 GPRTemporary temp(this);
2553 GPRReg tempGPR = temp.gpr();
2554 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2556 FPRTemporary temp(this);
2557 FPRReg tempFPR = temp.fpr();
2558 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2561 JITCompiler::Jump done = m_jit.branchDouble(
2562 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2565 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2566 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2570 doubleResult(resultFPR, node);
2576 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2578 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2579 if (isInt32Speculation(possibleTypes)) {
2580 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2581 FPRTemporary result(this);
2582 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2583 doubleResult(result.fpr(), node);
2587 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2588 FPRTemporary result(this);
2591 GPRTemporary temp(this);
2593 GPRReg op1GPR = op1.gpr();
2594 GPRReg tempGPR = temp.gpr();
2595 FPRReg resultFPR = result.fpr();
2596 JITCompiler::JumpList done;
2598 JITCompiler::Jump isInteger = m_jit.branch64(
2599 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2601 if (node->child1().useKind() == NotCellUse) {
2602 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2603 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2605 static const double zero = 0;
2606 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2608 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2609 done.append(isNull);
2611 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2612 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2614 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2615 static const double one = 1;
2616 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2617 done.append(m_jit.jump());
2618 done.append(isFalse);
2620 isUndefined.link(&m_jit);
2621 static const double NaN = PNaN;
2622 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2623 done.append(m_jit.jump());
2625 isNumber.link(&m_jit);
2626 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2628 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2629 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2632 unboxDouble(op1GPR, tempGPR, resultFPR);
2633 done.append(m_jit.jump());
2635 isInteger.link(&m_jit);
2636 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2638 #else // USE(JSVALUE64) -> this is the 32_64 case
2639 FPRTemporary temp(this);
2641 GPRReg op1TagGPR = op1.tagGPR();
2642 GPRReg op1PayloadGPR = op1.payloadGPR();
2643 FPRReg tempFPR = temp.fpr();
2644 FPRReg resultFPR = result.fpr();
2645 JITCompiler::JumpList done;
2647 JITCompiler::Jump isInteger = m_jit.branch32(
2648 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2650 if (node->child1().useKind() == NotCellUse) {
2651 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2652 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2654 static const double zero = 0;
2655 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2657 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2658 done.append(isNull);
2660 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2662 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2663 static const double one = 1;
2664 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2665 done.append(m_jit.jump());
2666 done.append(isFalse);
2668 isUndefined.link(&m_jit);
2669 static const double NaN = PNaN;
2670 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2671 done.append(m_jit.jump());
2673 isNumber.link(&m_jit);
2674 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2676 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2677 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2680 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2681 done.append(m_jit.jump());
2683 isInteger.link(&m_jit);
2684 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2686 #endif // USE(JSVALUE64)
2688 doubleResult(resultFPR, node);
2694 SpeculateStrictInt52Operand value(this, node->child1());
2695 FPRTemporary result(this);
2697 GPRReg valueGPR = value.gpr();
2698 FPRReg resultFPR = result.fpr();
2700 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2702 doubleResult(resultFPR, node);
2705 #endif // USE(JSVALUE64)
2708 RELEASE_ASSERT_NOT_REACHED();
2713 void SpeculativeJIT::compileValueRep(Node* node)
2715 switch (node->child1().useKind()) {
2716 case DoubleRepUse: {
2717 SpeculateDoubleOperand value(this, node->child1());
2718 JSValueRegsTemporary result(this);
2720 FPRReg valueFPR = value.fpr();
2721 JSValueRegs resultRegs = result.regs();
2723 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2724 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2725 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2726 // local was purified.
2727 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2728 m_jit.purifyNaN(valueFPR);
2730 boxDouble(valueFPR, resultRegs);
2732 jsValueResult(resultRegs, node);
2738 SpeculateStrictInt52Operand value(this, node->child1());
2739 GPRTemporary result(this);
2741 GPRReg valueGPR = value.gpr();
2742 GPRReg resultGPR = result.gpr();
2744 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2746 jsValueResult(resultGPR, node);
2749 #endif // USE(JSVALUE64)
2752 RELEASE_ASSERT_NOT_REACHED();
2757 static double clampDoubleToByte(double d)
2767 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2769 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2770 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2771 jit.xorPtr(result, result);
2772 MacroAssembler::Jump clamped = jit.jump();
2774 jit.move(JITCompiler::TrustedImm32(255), result);
2776 inBounds.link(&jit);
2779 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2781 // Unordered compare so we pick up NaN
2782 static const double zero = 0;
2783 static const double byteMax = 255;
2784 static const double half = 0.5;
2785 jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2786 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2787 jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2788 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2790 jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2791 // FIXME: This should probably just use a floating point round!
2792 // https://bugs.webkit.org/show_bug.cgi?id=72054
2793 jit.addDouble(source, scratch);
2794 jit.truncateDoubleToInt32(scratch, result);
2795 MacroAssembler::Jump truncatedInt = jit.jump();
2797 tooSmall.link(&jit);
2798 jit.xorPtr(result, result);
2799 MacroAssembler::Jump zeroed = jit.jump();
2802 jit.move(JITCompiler::TrustedImm32(255), result);
2804 truncatedInt.link(&jit);
2809 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2811 if (node->op() == PutByValAlias)
2812 return JITCompiler::Jump();
2813 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2814 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2816 uint32_t length = view->length();
2817 Node* indexNode = m_jit.graph().child(node, 1).node();
2818 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2819 return JITCompiler::Jump();
2820 return m_jit.branch32(
2821 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2823 return m_jit.branch32(
2824 MacroAssembler::AboveOrEqual, indexGPR,
2825 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2828 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2830 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2833 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2836 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2838 JITCompiler::Jump done;
2839 if (outOfBounds.isSet()) {
2840 done = m_jit.jump();
2841 if (node->arrayMode().isInBounds())
2842 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2844 outOfBounds.link(&m_jit);
2846 JITCompiler::Jump notWasteful = m_jit.branch32(
2847 MacroAssembler::NotEqual,
2848 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2849 TrustedImm32(WastefulTypedArray));
2851 JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2852 MacroAssembler::Zero,
2853 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2854 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2855 notWasteful.link(&m_jit);
2861 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2863 switch (elementSize(type)) {
2866 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2868 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2872 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2874 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2877 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2884 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2886 if (elementSize(type) < 4 || isSigned(type)) {
2887 int32Result(resultReg, node);
2891 ASSERT(elementSize(type) == 4 && !isSigned(type));
2892 if (node->shouldSpeculateInt32() && canSpeculate) {
2893 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2894 int32Result(resultReg, node);
2899 if (node->shouldSpeculateAnyInt()) {
2900 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2901 strictInt52Result(resultReg, node);
2906 FPRTemporary fresult(this);
2907 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2908 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2909 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2910 positive.link(&m_jit);
2911 doubleResult(fresult.fpr(), node);
2914 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2916 ASSERT(isInt(type));
2918 SpeculateCellOperand base(this, node->child1());
2919 SpeculateStrictInt32Operand property(this, node->child2());
2920 StorageOperand storage(this, node->child3());
2922 GPRReg baseReg = base.gpr();
2923 GPRReg propertyReg = property.gpr();
2924 GPRReg storageReg = storage.gpr();
2926 GPRTemporary result(this);
2927 GPRReg resultReg = result.gpr();
2929 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2931 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2932 loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2933 bool canSpeculate = true;
2934 setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2937 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2938 GPRTemporary& value,
2940 #if USE(JSVALUE32_64)
2941 GPRTemporary& propertyTag,
2942 GPRTemporary& valueTag,
2944 Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2946 bool isAppropriateConstant = false;
2947 if (valueUse->isConstant()) {
2948 JSValue jsValue = valueUse->asJSValue();
2949 SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2950 SpeculatedType actualType = speculationFromValue(jsValue);
2951 isAppropriateConstant = (expectedType | actualType) == expectedType;
2954 if (isAppropriateConstant) {
2955 JSValue jsValue = valueUse->asJSValue();
2956 if (!jsValue.isNumber()) {
2957 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2960 double d = jsValue.asNumber();
2962 d = clampDoubleToByte(d);
2963 GPRTemporary scratch(this);
2964 GPRReg scratchReg = scratch.gpr();
2965 m_jit.move(Imm32(toInt32(d)), scratchReg);
2966 value.adopt(scratch);
2968 switch (valueUse.useKind()) {
2970 SpeculateInt32Operand valueOp(this, valueUse);
2971 GPRTemporary scratch(this);
2972 GPRReg scratchReg = scratch.gpr();
2973 m_jit.move(valueOp.gpr(), scratchReg);
2975 compileClampIntegerToByte(m_jit, scratchReg);
2976 value.adopt(scratch);
2982 SpeculateStrictInt52Operand valueOp(this, valueUse);
2983 GPRTemporary scratch(this);
2984 GPRReg scratchReg = scratch.gpr();
2985 m_jit.move(valueOp.gpr(), scratchReg);
2987 MacroAssembler::Jump inBounds = m_jit.branch64(
2988 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2989 MacroAssembler::Jump tooBig = m_jit.branch64(
2990 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2991 m_jit.move(TrustedImm32(0), scratchReg);
2992 MacroAssembler::Jump clamped = m_jit.jump();
2993 tooBig.link(&m_jit);
2994 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2995 clamped.link(&m_jit);
2996 inBounds.link(&m_jit);
2998 value.adopt(scratch);
3001 #endif // USE(JSVALUE64)
3003 case DoubleRepUse: {
3004 RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
3006 SpeculateDoubleOperand valueOp(this, valueUse);
3007 GPRTemporary result(this);
3008 FPRTemporary floatScratch(this);
3009 FPRReg fpr = valueOp.fpr();
3010 GPRReg gpr = result.gpr();
3011 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
3012 value.adopt(result);
3014 #if USE(JSVALUE32_64)
3015 GPRTemporary realPropertyTag(this);
3016 propertyTag.adopt(realPropertyTag);
3017 GPRReg propertyTagGPR = propertyTag.gpr();
3019 GPRTemporary realValueTag(this);
3020 valueTag.adopt(realValueTag);
3021 GPRReg valueTagGPR = valueTag.gpr();
3023 SpeculateDoubleOperand valueOp(this, valueUse);
3024 GPRTemporary result(this);
3025 FPRReg fpr = valueOp.fpr();
3026 GPRReg gpr = result.gpr();
3027 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3028 m_jit.xorPtr(gpr, gpr);
3029 MacroAssembler::JumpList fixed(m_jit.jump());
3030 notNaN.link(&m_jit);
3032 fixed.append(m_jit.branchTruncateDoubleToInt32(
3033 fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3036 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3037 boxDouble(fpr, gpr);
3039 UNUSED_PARAM(property);
3040 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3041 boxDouble(fpr, valueTagGPR, gpr);
3043 slowPathCases.append(m_jit.jump());
3046 value.adopt(result);
3052 RELEASE_ASSERT_NOT_REACHED();
3059 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3061 ASSERT(isInt(type));
3063 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3064 GPRReg storageReg = storage.gpr();
3066 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3069 #if USE(JSVALUE32_64)
3070 GPRTemporary propertyTag;
3071 GPRTemporary valueTag;
3074 JITCompiler::JumpList slowPathCases;
3076 bool result = getIntTypedArrayStoreOperand(
3078 #if USE(JSVALUE32_64)
3079 propertyTag, valueTag,
3081 valueUse, slowPathCases, isClamped(type));
3087 GPRReg valueGPR = value.gpr();
3088 #if USE(JSVALUE32_64)
3089 GPRReg propertyTagGPR = propertyTag.gpr();
3090 GPRReg valueTagGPR = valueTag.gpr();
3093 ASSERT_UNUSED(valueGPR, valueGPR != property);
3094 ASSERT(valueGPR != base);
3095 ASSERT(valueGPR != storageReg);
3096 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3098 switch (elementSize(type)) {
3100 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3103 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3106 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3112 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3116 if (!slowPathCases.empty()) {
3118 if (node->op() == PutByValDirect) {
3119 addSlowPathGenerator(slowPathCall(
3120 slowPathCases, this,
3121 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3122 NoResult, base, property, valueGPR));
3124 addSlowPathGenerator(slowPathCall(
3125 slowPathCases, this,
3126 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3127 NoResult, base, property, valueGPR));
3129 #else // not USE(JSVALUE64)
3130 if (node->op() == PutByValDirect) {
3131 addSlowPathGenerator(slowPathCall(
3132 slowPathCases, this,
3133 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3134 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3136 addSlowPathGenerator(slowPathCall(
3137 slowPathCases, this,
3138 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3139 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3147 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3149 ASSERT(isFloat(type));
3151 SpeculateCellOperand base(this, node->child1());
3152 SpeculateStrictInt32Operand property(this, node->child2());
3153 StorageOperand storage(this, node->child3());
3155 GPRReg baseReg = base.gpr();
3156 GPRReg propertyReg = property.gpr();
3157 GPRReg storageReg = storage.gpr();
3159 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3161 FPRTemporary result(this);
3162 FPRReg resultReg = result.fpr();
3163 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3164 switch (elementSize(type)) {
3166 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3167 m_jit.convertFloatToDouble(resultReg, resultReg);
3170 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3174 RELEASE_ASSERT_NOT_REACHED();
3177 doubleResult(resultReg, node);
3180 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3182 ASSERT(isFloat(type));
3184 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3185 GPRReg storageReg = storage.gpr();
3187 Edge baseUse = m_jit.graph().varArgChild(node, 0);
3188 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3190 SpeculateDoubleOperand valueOp(this, valueUse);
3191 FPRTemporary scratch(this);
3192 FPRReg valueFPR = valueOp.fpr();
3193 FPRReg scratchFPR = scratch.fpr();
3195 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3197 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3199 switch (elementSize(type)) {
3201 m_jit.moveDouble(valueFPR, scratchFPR);
3202 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3203 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3207 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3210 RELEASE_ASSERT_NOT_REACHED();
3213 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3219 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3221 SpeculateCellOperand arg1(this, node->child1());
3222 SpeculateCellOperand arg2(this, node->child2());
3224 GPRReg arg1GPR = arg1.gpr();
3225 GPRReg arg2GPR = arg2.gpr();
3227 speculateObject(node->child1(), arg1GPR);
3228 speculateString(node->child2(), arg2GPR);
3230 GPRFlushedCallResult resultPayload(this);
3231 GPRReg resultPayloadGPR = resultPayload.gpr();
3233 JSValueRegs resultRegs(resultPayloadGPR);
3235 GPRFlushedCallResult2 resultTag(this);
3236 GPRReg resultTagGPR = resultTag.gpr();
3237 JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3241 callOperation(operationGetByValObjectString, extractResult(resultRegs), arg1GPR, arg2GPR);
3242 m_jit.exceptionCheck();
3244 jsValueResult(resultRegs, node);
3247 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3249 SpeculateCellOperand arg1(this, node->child1());
3250 SpeculateCellOperand arg2(this, node->child2());
3252 GPRReg arg1GPR = arg1.gpr();
3253 GPRReg arg2GPR = arg2.gpr();
3255 speculateObject(node->child1(), arg1GPR);
3256 speculateSymbol(node->child2(), arg2GPR);
3258 GPRFlushedCallResult resultPayload(this);
3259 GPRReg resultPayloadGPR = resultPayload.gpr();
3261 JSValueRegs resultRegs(resultPayloadGPR);
3263 GPRFlushedCallResult2 resultTag(this);
3264 GPRReg resultTagGPR = resultTag.gpr();
3265 JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3269 callOperation(operationGetByValObjectSymbol, extractResult(resultRegs), arg1GPR, arg2GPR);
3270 m_jit.exceptionCheck();
3272 jsValueResult(resultRegs, node);
3275 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3277 SpeculateCellOperand arg1(this, child1);
3278 SpeculateCellOperand arg2(this, child2);
3279 JSValueOperand arg3(this, child3);
3281 GPRReg arg1GPR = arg1.gpr();
3282 GPRReg arg2GPR = arg2.gpr();
3283 JSValueRegs arg3Regs = arg3.jsValueRegs();
3285 speculateString(child2, arg2GPR);
3288 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3289 m_jit.exceptionCheck();
3294 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3296 SpeculateCellOperand arg1(this, child1);
3297 SpeculateCellOperand arg2(this, child2);
3298 JSValueOperand arg3(this, child3);
3300 GPRReg arg1GPR = arg1.gpr();
3301 GPRReg arg2GPR = arg2.gpr();
3302 JSValueRegs arg3Regs = arg3.jsValueRegs();
3304 speculateSymbol(child2, arg2GPR);
3307 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3308 m_jit.exceptionCheck();
3313 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3315 // Check that prototype is an object.
3316 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3318 // Initialize scratchReg with the value being checked.
3319 m_jit.move(valueReg, scratchReg);
3321 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3322 MacroAssembler::Label loop(&m_jit);
3323 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3324 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3325 m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3327 m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3328 auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3329 m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3330 hasMonoProto.link(&m_jit);
3331 m_jit.move(scratch3Reg, scratchReg);
3333 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3334 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3335 auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3336 m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3337 hasMonoProto.link(&m_jit);
3338 m_jit.move(scratch3Reg, scratchReg);
3341 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3343 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3345 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);