2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSnippetParams.h"
42 #include "DirectArguments.h"
43 #include "JITAddGenerator.h"
44 #include "JITBitAndGenerator.h"
45 #include "JITBitOrGenerator.h"
46 #include "JITBitXorGenerator.h"
47 #include "JITDivGenerator.h"
48 #include "JITLeftShiftGenerator.h"
49 #include "JITMulGenerator.h"
50 #include "JITRightShiftGenerator.h"
51 #include "JITSubGenerator.h"
52 #include "JSAsyncFunction.h"
53 #include "JSAsyncGeneratorFunction.h"
54 #include "JSCInlines.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "JSPropertyNameEnumerator.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include "SuperSampler.h"
64 #include "WeakMapImpl.h"
65 #include <wtf/BitVector.h>
67 #include <wtf/MathExtras.h>
69 namespace JSC { namespace DFG {
71 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
75 , m_lastGeneratedNode(LastNodeType)
77 , m_generationInfo(m_jit.graph().frameRegisterCount())
78 , m_state(m_jit.graph())
79 , m_interpreter(m_jit.graph(), m_state)
80 , m_stream(&jit.jitCode()->variableEventStream)
81 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
85 SpeculativeJIT::~SpeculativeJIT()
89 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
91 IndexingType indexingType = structure->indexingType();
92 bool hasIndexingHeader = hasIndexedProperties(indexingType);
94 unsigned inlineCapacity = structure->inlineCapacity();
95 unsigned outOfLineCapacity = structure->outOfLineCapacity();
97 GPRTemporary scratch(this);
98 GPRTemporary scratch2(this);
99 GPRReg scratchGPR = scratch.gpr();
100 GPRReg scratch2GPR = scratch2.gpr();
102 ASSERT(vectorLength >= numElements);
103 vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
105 JITCompiler::JumpList slowCases;
108 if (hasIndexingHeader)
109 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
110 size += outOfLineCapacity * sizeof(JSValue);
112 m_jit.move(TrustedImmPtr(0), storageGPR);
115 if (MarkedAllocator* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
116 m_jit.move(TrustedImmPtr(allocator), scratchGPR);
117 m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
120 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
123 if (hasIndexingHeader)
124 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
126 slowCases.append(m_jit.jump());
129 size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
130 MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
132 m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
133 uint32_t mask = Butterfly::computeIndexingMaskForVectorLength(vectorLength);
134 emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
135 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
137 slowCases.append(m_jit.jump());
139 // I want a slow path that also loads out the storage pointer, and that's
140 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
141 // of work for a very small piece of functionality. :-/
142 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
143 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
144 structure, vectorLength));
146 if (numElements < vectorLength && LIKELY(!hasUndecided(structure->indexingType()))) {
148 if (hasDouble(structure->indexingType()))
149 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
151 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
152 for (unsigned i = numElements; i < vectorLength; ++i)
153 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
155 EncodedValueDescriptor value;
156 if (hasDouble(structure->indexingType()))
157 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
159 value.asInt64 = JSValue::encode(JSValue());
160 for (unsigned i = numElements; i < vectorLength; ++i) {
161 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
162 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
167 if (hasIndexingHeader)
168 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
170 m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
172 m_jit.mutatorFence(*m_jit.vm());
175 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
177 if (inlineCallFrame && !inlineCallFrame->isVarargs())
178 m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
180 VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
181 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
183 m_jit.sub32(TrustedImm32(1), lengthGPR);
187 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
189 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
192 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
194 if (origin.inlineCallFrame) {
195 if (origin.inlineCallFrame->isClosureCall) {
197 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
201 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
205 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
208 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
212 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
213 GPRInfo::callFrameRegister, startGPR);
216 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
218 if (!Options::useOSRExitFuzz()
219 || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
220 || !doOSRExitFuzzing())
221 return MacroAssembler::Jump();
223 MacroAssembler::Jump result;
225 m_jit.pushToSave(GPRInfo::regT0);
226 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
227 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
228 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
229 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
230 unsigned at = Options::fireOSRExitFuzzAt();
231 if (at || atOrAfter) {
233 MacroAssembler::RelationalCondition condition;
235 threshold = atOrAfter;
236 condition = MacroAssembler::Below;
239 condition = MacroAssembler::NotEqual;
241 MacroAssembler::Jump ok = m_jit.branch32(
242 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
243 m_jit.popToRestore(GPRInfo::regT0);
244 result = m_jit.jump();
247 m_jit.popToRestore(GPRInfo::regT0);
252 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
256 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
257 if (fuzzJump.isSet()) {
258 JITCompiler::JumpList jumpsToFail;
259 jumpsToFail.append(fuzzJump);
260 jumpsToFail.append(jumpToFail);
261 m_jit.appendExitInfo(jumpsToFail);
263 m_jit.appendExitInfo(jumpToFail);
264 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
267 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
271 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
272 if (fuzzJump.isSet()) {
273 JITCompiler::JumpList myJumpsToFail;
274 myJumpsToFail.append(jumpsToFail);
275 myJumpsToFail.append(fuzzJump);
276 m_jit.appendExitInfo(myJumpsToFail);
278 m_jit.appendExitInfo(jumpsToFail);
279 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
282 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
285 return OSRExitJumpPlaceholder();
286 unsigned index = m_jit.jitCode()->osrExit.size();
287 m_jit.appendExitInfo();
288 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
289 return OSRExitJumpPlaceholder(index);
292 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
294 return speculationCheck(kind, jsValueSource, nodeUse.node());
297 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
299 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
302 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
304 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
307 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
311 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
312 m_jit.appendExitInfo(jumpToFail);
313 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
316 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
318 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
321 void SpeculativeJIT::emitInvalidationPoint(Node* node)
325 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
326 m_jit.jitCode()->appendOSRExit(OSRExit(
327 UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
328 this, m_stream->size()));
329 info.m_replacementSource = m_jit.watchpointLabel();
330 ASSERT(info.m_replacementSource.isSet());
334 void SpeculativeJIT::unreachable(Node* node)
336 m_compileOkay = false;
337 m_jit.abortWithReason(DFGUnreachableNode, node->op());
340 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
344 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
345 m_compileOkay = false;
346 if (verboseCompilationEnabled())
347 dataLog("Bailing compilation.\n");
350 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
352 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
355 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
357 ASSERT(needsTypeCheck(edge, typesPassedThrough));
358 m_interpreter.filter(edge, typesPassedThrough);
359 speculationCheck(exitKind, source, edge.node(), jumpToFail);
362 RegisterSet SpeculativeJIT::usedRegisters()
366 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
367 GPRReg gpr = GPRInfo::toRegister(i);
368 if (m_gprs.isInUse(gpr))
371 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
372 FPRReg fpr = FPRInfo::toRegister(i);
373 if (m_fprs.isInUse(fpr))
377 result.merge(RegisterSet::stubUnavailableRegisters());
382 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
384 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
387 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
389 m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
392 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
394 for (auto& slowPathGenerator : m_slowPathGenerators) {
395 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
396 slowPathGenerator->generate(this);
398 for (auto& slowPathLambda : m_slowPathLambdas) {
399 Node* currentNode = slowPathLambda.currentNode;
400 m_currentNode = currentNode;
401 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
402 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
403 slowPathLambda.generator();
404 m_outOfLineStreamIndex = std::nullopt;
408 void SpeculativeJIT::clearGenerationInfo()
410 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
411 m_generationInfo[i] = GenerationInfo();
412 m_gprs = RegisterBank<GPRInfo>();
413 m_fprs = RegisterBank<FPRInfo>();
416 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
418 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
419 Node* node = info.node();
420 DataFormat registerFormat = info.registerFormat();
421 ASSERT(registerFormat != DataFormatNone);
422 ASSERT(registerFormat != DataFormatDouble);
424 SilentSpillAction spillAction;
425 SilentFillAction fillAction;
427 if (!info.needsSpill())
428 spillAction = DoNothingForSpill;
431 ASSERT(info.gpr() == source);
432 if (registerFormat == DataFormatInt32)
433 spillAction = Store32Payload;
434 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
435 spillAction = StorePtr;
436 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
437 spillAction = Store64;
439 ASSERT(registerFormat & DataFormatJS);
440 spillAction = Store64;
442 #elif USE(JSVALUE32_64)
443 if (registerFormat & DataFormatJS) {
444 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
445 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
447 ASSERT(info.gpr() == source);
448 spillAction = Store32Payload;
453 if (registerFormat == DataFormatInt32) {
454 ASSERT(info.gpr() == source);
455 ASSERT(isJSInt32(info.registerFormat()));
456 if (node->hasConstant()) {
457 ASSERT(node->isInt32Constant());
458 fillAction = SetInt32Constant;
460 fillAction = Load32Payload;
461 } else if (registerFormat == DataFormatBoolean) {
463 RELEASE_ASSERT_NOT_REACHED();
464 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
465 fillAction = DoNothingForFill;
467 #elif USE(JSVALUE32_64)
468 ASSERT(info.gpr() == source);
469 if (node->hasConstant()) {
470 ASSERT(node->isBooleanConstant());
471 fillAction = SetBooleanConstant;
473 fillAction = Load32Payload;
475 } else if (registerFormat == DataFormatCell) {
476 ASSERT(info.gpr() == source);
477 if (node->hasConstant()) {
478 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
479 node->asCell(); // To get the assertion.
480 fillAction = SetCellConstant;
483 fillAction = LoadPtr;
485 fillAction = Load32Payload;
488 } else if (registerFormat == DataFormatStorage) {
489 ASSERT(info.gpr() == source);
490 fillAction = LoadPtr;
491 } else if (registerFormat == DataFormatInt52) {
492 if (node->hasConstant())
493 fillAction = SetInt52Constant;
494 else if (info.spillFormat() == DataFormatInt52)
496 else if (info.spillFormat() == DataFormatStrictInt52)
497 fillAction = Load64ShiftInt52Left;
498 else if (info.spillFormat() == DataFormatNone)
501 RELEASE_ASSERT_NOT_REACHED();
502 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
503 fillAction = Load64; // Make GCC happy.
506 } else if (registerFormat == DataFormatStrictInt52) {
507 if (node->hasConstant())
508 fillAction = SetStrictInt52Constant;
509 else if (info.spillFormat() == DataFormatInt52)
510 fillAction = Load64ShiftInt52Right;
511 else if (info.spillFormat() == DataFormatStrictInt52)
513 else if (info.spillFormat() == DataFormatNone)
516 RELEASE_ASSERT_NOT_REACHED();
517 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
518 fillAction = Load64; // Make GCC happy.
522 ASSERT(registerFormat & DataFormatJS);
524 ASSERT(info.gpr() == source);
525 if (node->hasConstant()) {
526 if (node->isCellConstant())
527 fillAction = SetTrustedJSConstant;
529 fillAction = SetJSConstant;
530 } else if (info.spillFormat() == DataFormatInt32) {
531 ASSERT(registerFormat == DataFormatJSInt32);
532 fillAction = Load32PayloadBoxInt;
536 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
537 if (node->hasConstant())
538 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
539 else if (info.payloadGPR() == source)
540 fillAction = Load32Payload;
541 else { // Fill the Tag
542 switch (info.spillFormat()) {
543 case DataFormatInt32:
544 ASSERT(registerFormat == DataFormatJSInt32);
545 fillAction = SetInt32Tag;
548 ASSERT(registerFormat == DataFormatJSCell);
549 fillAction = SetCellTag;
551 case DataFormatBoolean:
552 ASSERT(registerFormat == DataFormatJSBoolean);
553 fillAction = SetBooleanTag;
556 fillAction = Load32Tag;
563 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
566 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
568 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
569 Node* node = info.node();
570 ASSERT(info.registerFormat() == DataFormatDouble);
572 SilentSpillAction spillAction;
573 SilentFillAction fillAction;
575 if (!info.needsSpill())
576 spillAction = DoNothingForSpill;
578 ASSERT(!node->hasConstant());
579 ASSERT(info.spillFormat() == DataFormatNone);
580 ASSERT(info.fpr() == source);
581 spillAction = StoreDouble;
585 if (node->hasConstant()) {
586 node->asNumber(); // To get the assertion.
587 fillAction = SetDoubleConstant;
589 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
590 fillAction = LoadDouble;
592 #elif USE(JSVALUE32_64)
593 ASSERT(info.registerFormat() == DataFormatDouble);
594 if (node->hasConstant()) {
595 node->asNumber(); // To get the assertion.
596 fillAction = SetDoubleConstant;
598 fillAction = LoadDouble;
601 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
604 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
606 switch (plan.spillAction()) {
607 case DoNothingForSpill:
610 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
613 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
616 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
620 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
624 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
627 RELEASE_ASSERT_NOT_REACHED();
631 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
633 switch (plan.fillAction()) {
634 case DoNothingForFill:
636 case SetInt32Constant:
637 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
640 case SetInt52Constant:
641 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
643 case SetStrictInt52Constant:
644 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
646 #endif // USE(JSVALUE64)
647 case SetBooleanConstant:
648 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
650 case SetCellConstant:
651 ASSERT(plan.node()->constant()->value().isCell());
652 m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
655 case SetTrustedJSConstant:
656 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
659 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
661 case SetDoubleConstant:
662 m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
664 case Load32PayloadBoxInt:
665 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
666 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668 case Load32PayloadConvertToInt52:
669 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
670 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
671 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673 case Load32PayloadSignExtend:
674 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
675 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
678 case SetJSConstantTag:
679 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681 case SetJSConstantPayload:
682 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
685 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
688 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
691 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693 case SetDoubleConstant:
694 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
698 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
701 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
704 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
708 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710 case Load64ShiftInt52Right:
711 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
712 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714 case Load64ShiftInt52Left:
715 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
716 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
720 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
723 RELEASE_ASSERT_NOT_REACHED();
727 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 switch (arrayMode.arrayClass()) {
730 case Array::OriginalArray: {
732 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
733 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
739 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
740 return m_jit.branch32(
741 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743 case Array::NonArray:
744 case Array::OriginalNonArray:
745 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
746 return m_jit.branch32(
747 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749 case Array::PossiblyArray:
750 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
751 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
754 RELEASE_ASSERT_NOT_REACHED();
755 return JITCompiler::Jump();
758 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 JITCompiler::JumpList result;
762 switch (arrayMode.type()) {
765 case Array::Contiguous:
766 case Array::Undecided:
767 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
769 case Array::ArrayStorage:
770 case Array::SlowPutArrayStorage: {
771 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
773 if (arrayMode.isJSArray()) {
774 if (arrayMode.isSlowPut()) {
777 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
778 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
779 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
782 MacroAssembler::Above, tempGPR,
783 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
786 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
788 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
791 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
792 if (arrayMode.isSlowPut()) {
793 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
796 MacroAssembler::Above, tempGPR,
797 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
801 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
812 void SpeculativeJIT::checkArray(Node* node)
814 ASSERT(node->arrayMode().isSpecific());
815 ASSERT(!node->arrayMode().doesConversion());
817 SpeculateCellOperand base(this, node->child1());
818 GPRReg baseReg = base.gpr();
820 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
821 noResult(m_currentNode);
825 const ClassInfo* expectedClassInfo = 0;
827 switch (node->arrayMode().type()) {
828 case Array::AnyTypedArray:
830 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
834 case Array::Contiguous:
835 case Array::Undecided:
836 case Array::ArrayStorage:
837 case Array::SlowPutArrayStorage: {
838 GPRTemporary temp(this);
839 GPRReg tempGPR = temp.gpr();
840 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
842 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
843 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
845 noResult(m_currentNode);
848 case Array::DirectArguments:
849 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
850 noResult(m_currentNode);
852 case Array::ScopedArguments:
853 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
854 noResult(m_currentNode);
857 speculateCellTypeWithoutTypeFiltering(
858 node->child1(), baseReg,
859 typeForTypedArrayType(node->arrayMode().typedArrayType()));
860 noResult(m_currentNode);
864 RELEASE_ASSERT(expectedClassInfo);
866 GPRTemporary temp(this);
867 GPRTemporary temp2(this);
868 m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
870 BadType, JSValueSource::unboxedCell(baseReg), node,
872 MacroAssembler::NotEqual,
873 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
874 TrustedImmPtr(PoisonedClassInfoPtr(expectedClassInfo).bits())));
876 noResult(m_currentNode);
879 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
881 ASSERT(node->arrayMode().doesConversion());
883 GPRTemporary temp(this);
884 GPRTemporary structure;
885 GPRReg tempGPR = temp.gpr();
886 GPRReg structureGPR = InvalidGPRReg;
888 if (node->op() != ArrayifyToStructure) {
889 GPRTemporary realStructure(this);
890 structure.adopt(realStructure);
891 structureGPR = structure.gpr();
894 // We can skip all that comes next if we already have array storage.
895 MacroAssembler::JumpList slowPath;
897 if (node->op() == ArrayifyToStructure) {
898 slowPath.append(m_jit.branchWeakStructure(
899 JITCompiler::NotEqual,
900 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
904 MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
906 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
909 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
910 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
912 noResult(m_currentNode);
915 void SpeculativeJIT::arrayify(Node* node)
917 ASSERT(node->arrayMode().isSpecific());
919 SpeculateCellOperand base(this, node->child1());
921 if (!node->child2()) {
922 arrayify(node, base.gpr(), InvalidGPRReg);
926 SpeculateInt32Operand property(this, node->child2());
928 arrayify(node, base.gpr(), property.gpr());
931 GPRReg SpeculativeJIT::fillStorage(Edge edge)
933 VirtualRegister virtualRegister = edge->virtualRegister();
934 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
936 switch (info.registerFormat()) {
937 case DataFormatNone: {
938 if (info.spillFormat() == DataFormatStorage) {
939 GPRReg gpr = allocate();
940 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
941 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
942 info.fillStorage(*m_stream, gpr);
946 // Must be a cell; fill it as a cell and then return the pointer.
947 return fillSpeculateCell(edge);
950 case DataFormatStorage: {
951 GPRReg gpr = info.gpr();
957 return fillSpeculateCell(edge);
961 void SpeculativeJIT::useChildren(Node* node)
963 if (node->flags() & NodeHasVarArgs) {
964 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
965 if (!!m_jit.graph().m_varArgChildren[childIdx])
966 use(m_jit.graph().m_varArgChildren[childIdx]);
969 Edge child1 = node->child1();
971 ASSERT(!node->child2() && !node->child3());
976 Edge child2 = node->child2();
978 ASSERT(!node->child3());
983 Edge child3 = node->child3();
990 void SpeculativeJIT::compileTryGetById(Node* node)
992 switch (node->child1().useKind()) {
994 SpeculateCellOperand base(this, node->child1());
995 JSValueRegsTemporary result(this, Reuse, base);
997 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
998 JSValueRegs resultRegs = result.regs();
1002 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1004 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1009 JSValueOperand base(this, node->child1());
1010 JSValueRegsTemporary result(this, Reuse, base);
1012 JSValueRegs baseRegs = base.jsValueRegs();
1013 JSValueRegs resultRegs = result.regs();
1017 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1019 cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1021 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1026 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1031 void SpeculativeJIT::compileIn(Node* node)
1033 SpeculateCellOperand base(this, node->child1());
1034 GPRReg baseGPR = base.gpr();
1036 if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1037 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1038 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1040 GPRTemporary result(this);
1041 GPRReg resultGPR = result.gpr();
1043 use(node->child2());
1045 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1046 MacroAssembler::Label done = m_jit.label();
1048 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1049 // we can cast it to const AtomicStringImpl* safely.
1050 auto slowPath = slowPathCall(
1051 jump.m_jump, this, operationInOptimize,
1052 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1053 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1055 stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1056 stubInfo->codeOrigin = node->origin.semantic;
1057 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1058 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1059 stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1060 #if USE(JSVALUE32_64)
1061 stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1062 stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1063 stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1065 stubInfo->patch.usedRegisters = usedRegisters();
1067 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1068 addSlowPathGenerator(WTFMove(slowPath));
1072 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1077 JSValueOperand key(this, node->child2());
1078 JSValueRegs regs = key.jsValueRegs();
1080 GPRFlushedCallResult result(this);
1081 GPRReg resultGPR = result.gpr();
1088 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1090 m_jit.exceptionCheck();
1091 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1094 void SpeculativeJIT::compileDeleteById(Node* node)
1096 JSValueOperand value(this, node->child1());
1097 GPRFlushedCallResult result(this);
1099 JSValueRegs valueRegs = value.jsValueRegs();
1100 GPRReg resultGPR = result.gpr();
1105 callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1106 m_jit.exceptionCheck();
1108 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1111 void SpeculativeJIT::compileDeleteByVal(Node* node)
1113 JSValueOperand base(this, node->child1());
1114 JSValueOperand key(this, node->child2());
1115 GPRFlushedCallResult result(this);
1117 JSValueRegs baseRegs = base.jsValueRegs();
1118 JSValueRegs keyRegs = key.jsValueRegs();
1119 GPRReg resultGPR = result.gpr();
1125 callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1126 m_jit.exceptionCheck();
1128 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1131 void SpeculativeJIT::compilePushWithScope(Node* node)
1133 SpeculateCellOperand currentScope(this, node->child1());
1134 GPRReg currentScopeGPR = currentScope.gpr();
1136 GPRFlushedCallResult result(this);
1137 GPRReg resultGPR = result.gpr();
1139 auto objectEdge = node->child2();
1140 if (objectEdge.useKind() == ObjectUse) {
1141 SpeculateCellOperand object(this, objectEdge);
1142 GPRReg objectGPR = object.gpr();
1143 speculateObject(objectEdge, objectGPR);
1146 callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1147 // No exception check here as we did not have to call toObject().
1149 ASSERT(objectEdge.useKind() == UntypedUse);
1150 JSValueOperand object(this, objectEdge);
1151 JSValueRegs objectRegs = object.jsValueRegs();
1154 callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1155 m_jit.exceptionCheck();
1158 cellResult(resultGPR, node);
1161 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1163 unsigned branchIndexInBlock = detectPeepHoleBranch();
1164 if (branchIndexInBlock != UINT_MAX) {
1165 Node* branchNode = m_block->at(branchIndexInBlock);
1167 ASSERT(node->adjustedRefCount() == 1);
1169 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1171 m_indexInBlock = branchIndexInBlock;
1172 m_currentNode = branchNode;
1177 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1182 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1184 unsigned branchIndexInBlock = detectPeepHoleBranch();
1185 if (branchIndexInBlock != UINT_MAX) {
1186 Node* branchNode = m_block->at(branchIndexInBlock);
1188 ASSERT(node->adjustedRefCount() == 1);
1190 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1192 m_indexInBlock = branchIndexInBlock;
1193 m_currentNode = branchNode;
1198 nonSpeculativeNonPeepholeStrictEq(node, invert);
1203 static const char* dataFormatString(DataFormat format)
1205 // These values correspond to the DataFormat enum.
1206 const char* strings[] = {
1224 return strings[format];
1227 void SpeculativeJIT::dump(const char* label)
1230 dataLogF("<%s>\n", label);
1232 dataLogF(" gprs:\n");
1234 dataLogF(" fprs:\n");
1236 dataLogF(" VirtualRegisters:\n");
1237 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1238 GenerationInfo& info = m_generationInfo[i];
1240 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1242 dataLogF(" % 3d:[__][__]", i);
1243 if (info.registerFormat() == DataFormatDouble)
1244 dataLogF(":fpr%d\n", info.fpr());
1245 else if (info.registerFormat() != DataFormatNone
1246 #if USE(JSVALUE32_64)
1247 && !(info.registerFormat() & DataFormatJS)
1250 ASSERT(info.gpr() != InvalidGPRReg);
1251 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1256 dataLogF("</%s>\n", label);
1259 GPRTemporary::GPRTemporary()
1261 , m_gpr(InvalidGPRReg)
1265 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1267 , m_gpr(InvalidGPRReg)
1269 m_gpr = m_jit->allocate();
1272 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1274 , m_gpr(InvalidGPRReg)
1276 m_gpr = m_jit->allocate(specific);
1279 #if USE(JSVALUE32_64)
1280 GPRTemporary::GPRTemporary(
1281 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1283 , m_gpr(InvalidGPRReg)
1285 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1286 m_gpr = m_jit->reuse(op1.gpr(which));
1288 m_gpr = m_jit->allocate();
1290 #endif // USE(JSVALUE32_64)
1292 JSValueRegsTemporary::JSValueRegsTemporary() { }
1294 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1305 template<typename T>
1306 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1307 : m_gpr(jit, Reuse, operand)
1311 template<typename T>
1312 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1314 if (resultWord == PayloadWord) {
1315 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1316 m_tagGPR = GPRTemporary(jit);
1318 m_payloadGPR = GPRTemporary(jit);
1319 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1325 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1327 m_gpr = GPRTemporary(jit, Reuse, operand);
1330 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1332 if (jit->canReuse(operand.node())) {
1333 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1334 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1336 m_payloadGPR = GPRTemporary(jit);
1337 m_tagGPR = GPRTemporary(jit);
1342 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1344 JSValueRegs JSValueRegsTemporary::regs()
1347 return JSValueRegs(m_gpr.gpr());
1349 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1353 void GPRTemporary::adopt(GPRTemporary& other)
1356 ASSERT(m_gpr == InvalidGPRReg);
1357 ASSERT(other.m_jit);
1358 ASSERT(other.m_gpr != InvalidGPRReg);
1359 m_jit = other.m_jit;
1360 m_gpr = other.m_gpr;
1362 other.m_gpr = InvalidGPRReg;
1365 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1367 ASSERT(other.m_jit);
1368 ASSERT(other.m_fpr != InvalidFPRReg);
1369 m_jit = other.m_jit;
1370 m_fpr = other.m_fpr;
1372 other.m_jit = nullptr;
1375 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1377 , m_fpr(InvalidFPRReg)
1379 m_fpr = m_jit->fprAllocate();
1382 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1384 , m_fpr(InvalidFPRReg)
1386 if (m_jit->canReuse(op1.node()))
1387 m_fpr = m_jit->reuse(op1.fpr());
1389 m_fpr = m_jit->fprAllocate();
1392 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1394 , m_fpr(InvalidFPRReg)
1396 if (m_jit->canReuse(op1.node()))
1397 m_fpr = m_jit->reuse(op1.fpr());
1398 else if (m_jit->canReuse(op2.node()))
1399 m_fpr = m_jit->reuse(op2.fpr());
1400 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1401 m_fpr = m_jit->reuse(op1.fpr());
1403 m_fpr = m_jit->fprAllocate();
1406 #if USE(JSVALUE32_64)
1407 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1409 , m_fpr(InvalidFPRReg)
1411 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1412 m_fpr = m_jit->reuse(op1.fpr());
1414 m_fpr = m_jit->fprAllocate();
1418 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1420 BasicBlock* taken = branchNode->branchData()->taken.block;
1421 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1423 if (taken == nextBlock()) {
1424 condition = MacroAssembler::invert(condition);
1425 std::swap(taken, notTaken);
1428 SpeculateDoubleOperand op1(this, node->child1());
1429 SpeculateDoubleOperand op2(this, node->child2());
1431 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1435 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1437 BasicBlock* taken = branchNode->branchData()->taken.block;
1438 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1440 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1442 if (taken == nextBlock()) {
1443 condition = MacroAssembler::NotEqual;
1444 BasicBlock* tmp = taken;
1449 SpeculateCellOperand op1(this, node->child1());
1450 SpeculateCellOperand op2(this, node->child2());
1452 GPRReg op1GPR = op1.gpr();
1453 GPRReg op2GPR = op2.gpr();
1455 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1456 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1458 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1460 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1462 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1465 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1467 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1468 m_jit.branchIfNotObject(op1GPR));
1470 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1472 MacroAssembler::NonZero,
1473 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1474 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1476 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1478 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1479 m_jit.branchIfNotObject(op2GPR));
1481 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1483 MacroAssembler::NonZero,
1484 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1485 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1488 branchPtr(condition, op1GPR, op2GPR, taken);
1492 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1494 BasicBlock* taken = branchNode->branchData()->taken.block;
1495 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1497 // The branch instruction will branch to the taken block.
1498 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1499 if (taken == nextBlock()) {
1500 condition = JITCompiler::invert(condition);
1501 BasicBlock* tmp = taken;
1506 if (node->child1()->isInt32Constant()) {
1507 int32_t imm = node->child1()->asInt32();
1508 SpeculateBooleanOperand op2(this, node->child2());
1509 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1510 } else if (node->child2()->isInt32Constant()) {
1511 SpeculateBooleanOperand op1(this, node->child1());
1512 int32_t imm = node->child2()->asInt32();
1513 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1515 SpeculateBooleanOperand op1(this, node->child1());
1516 SpeculateBooleanOperand op2(this, node->child2());
1517 branch32(condition, op1.gpr(), op2.gpr(), taken);
1523 void SpeculativeJIT::compileStringSlice(Node* node)
1525 SpeculateCellOperand string(this, node->child1());
1526 GPRTemporary startIndex(this);
1527 GPRTemporary temp(this);
1528 GPRTemporary temp2(this);
1530 GPRReg stringGPR = string.gpr();
1531 GPRReg startIndexGPR = startIndex.gpr();
1532 GPRReg tempGPR = temp.gpr();
1533 GPRReg temp2GPR = temp2.gpr();
1535 speculateString(node->child1(), stringGPR);
1538 m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1540 emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1542 emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1544 m_jit.move(temp2GPR, tempGPR);
1547 CCallHelpers::JumpList doneCases;
1548 CCallHelpers::JumpList slowCases;
1550 auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1551 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1552 doneCases.append(m_jit.jump());
1554 nonEmptyCase.link(&m_jit);
1555 m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1556 slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1558 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1559 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1561 m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1563 // Load the character into scratchReg
1564 m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1565 auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1567 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1568 auto cont8Bit = m_jit.jump();
1570 is16Bit.link(&m_jit);
1571 m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1573 auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1575 // 8 bit string values don't need the isASCII check.
1576 cont8Bit.link(&m_jit);
1578 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1579 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1580 m_jit.loadPtr(tempGPR, tempGPR);
1582 addSlowPathGenerator(
1584 bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1586 addSlowPathGenerator(
1588 slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1590 doneCases.link(&m_jit);
1591 cellResult(tempGPR, node);
1594 void SpeculativeJIT::compileToLowerCase(Node* node)
1596 ASSERT(node->op() == ToLowerCase);
1597 SpeculateCellOperand string(this, node->child1());
1598 GPRTemporary temp(this);
1599 GPRTemporary index(this);
1600 GPRTemporary charReg(this);
1601 GPRTemporary length(this);
1603 GPRReg stringGPR = string.gpr();
1604 GPRReg tempGPR = temp.gpr();
1605 GPRReg indexGPR = index.gpr();
1606 GPRReg charGPR = charReg.gpr();
1607 GPRReg lengthGPR = length.gpr();
1609 speculateString(node->child1(), stringGPR);
1611 CCallHelpers::JumpList slowPath;
1613 m_jit.move(TrustedImmPtr(0), indexGPR);
1615 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1616 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1618 slowPath.append(m_jit.branchTest32(
1619 MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1620 MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1621 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1622 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1624 auto loopStart = m_jit.label();
1625 auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1626 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1627 slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1628 m_jit.sub32(TrustedImm32('A'), charGPR);
1629 slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1631 m_jit.add32(TrustedImm32(1), indexGPR);
1632 m_jit.jump().linkTo(loopStart, &m_jit);
1634 slowPath.link(&m_jit);
1635 silentSpillAllRegisters(lengthGPR);
1636 callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1637 silentFillAllRegisters();
1638 m_jit.exceptionCheck();
1639 auto done = m_jit.jump();
1641 loopDone.link(&m_jit);
1642 m_jit.move(stringGPR, lengthGPR);
1645 cellResult(lengthGPR, node);
1648 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1650 BasicBlock* taken = branchNode->branchData()->taken.block;
1651 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1653 // The branch instruction will branch to the taken block.
1654 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1655 if (taken == nextBlock()) {
1656 condition = JITCompiler::invert(condition);
1657 BasicBlock* tmp = taken;
1662 if (node->child1()->isInt32Constant()) {
1663 int32_t imm = node->child1()->asInt32();
1664 SpeculateInt32Operand op2(this, node->child2());
1665 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1666 } else if (node->child2()->isInt32Constant()) {
1667 SpeculateInt32Operand op1(this, node->child1());
1668 int32_t imm = node->child2()->asInt32();
1669 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1671 SpeculateInt32Operand op1(this, node->child1());
1672 SpeculateInt32Operand op2(this, node->child2());
1673 branch32(condition, op1.gpr(), op2.gpr(), taken);
1679 // Returns true if the compare is fused with a subsequent branch.
1680 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1682 // Fused compare & branch.
1683 unsigned branchIndexInBlock = detectPeepHoleBranch();
1684 if (branchIndexInBlock != UINT_MAX) {
1685 Node* branchNode = m_block->at(branchIndexInBlock);
1687 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1688 // so can be no intervening nodes to also reference the compare.
1689 ASSERT(node->adjustedRefCount() == 1);
1691 if (node->isBinaryUseKind(Int32Use))
1692 compilePeepHoleInt32Branch(node, branchNode, condition);
1694 else if (node->isBinaryUseKind(Int52RepUse))
1695 compilePeepHoleInt52Branch(node, branchNode, condition);
1696 #endif // USE(JSVALUE64)
1697 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1698 // Use non-peephole comparison, for now.
1700 } else if (node->isBinaryUseKind(DoubleRepUse))
1701 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1702 else if (node->op() == CompareEq) {
1703 if (node->isBinaryUseKind(BooleanUse))
1704 compilePeepHoleBooleanBranch(node, branchNode, condition);
1705 else if (node->isBinaryUseKind(SymbolUse))
1706 compilePeepHoleSymbolEquality(node, branchNode);
1707 else if (node->isBinaryUseKind(ObjectUse))
1708 compilePeepHoleObjectEquality(node, branchNode);
1709 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1710 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1711 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1712 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1713 else if (!needsTypeCheck(node->child1(), SpecOther))
1714 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1715 else if (!needsTypeCheck(node->child2(), SpecOther))
1716 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1718 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1722 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1726 use(node->child1());
1727 use(node->child2());
1728 m_indexInBlock = branchIndexInBlock;
1729 m_currentNode = branchNode;
1735 void SpeculativeJIT::noticeOSRBirth(Node* node)
1737 if (!node->hasVirtualRegister())
1740 VirtualRegister virtualRegister = node->virtualRegister();
1741 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1743 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1746 void SpeculativeJIT::compileMovHint(Node* node)
1748 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1750 Node* child = node->child1().node();
1751 noticeOSRBirth(child);
1753 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1756 void SpeculativeJIT::bail(AbortReason reason)
1758 if (verboseCompilationEnabled())
1759 dataLog("Bailing compilation.\n");
1760 m_compileOkay = true;
1761 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1762 clearGenerationInfo();
1765 void SpeculativeJIT::compileCurrentBlock()
1767 ASSERT(m_compileOkay);
1772 ASSERT(m_block->isReachable);
1774 m_jit.blockHeads()[m_block->index] = m_jit.label();
1776 if (!m_block->intersectionOfCFAHasVisited) {
1777 // Don't generate code for basic blocks that are unreachable according to CFA.
1778 // But to be sure that nobody has generated a jump to this block, drop in a
1780 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1784 if (m_block->isCatchEntrypoint) {
1785 m_jit.addPtr(CCallHelpers::TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1786 m_jit.emitSaveCalleeSaves();
1787 m_jit.emitMaterializeTagCheckRegisters();
1788 m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1791 m_stream->appendAndLog(VariableEvent::reset());
1793 m_jit.jitAssertHasValidCallFrame();
1794 m_jit.jitAssertTagsInPlace();
1795 m_jit.jitAssertArgumentCountSane();
1798 m_state.beginBasicBlock(m_block);
1800 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1801 int operand = m_block->variablesAtHead.operandForIndex(i);
1802 Node* node = m_block->variablesAtHead[i];
1804 continue; // No need to record dead SetLocal's.
1806 VariableAccessData* variable = node->variableAccessData();
1808 if (!node->refCount())
1809 continue; // No need to record dead SetLocal's.
1810 format = dataFormatFor(variable->flushFormat());
1811 m_stream->appendAndLog(
1812 VariableEvent::setLocal(
1813 VirtualRegister(operand),
1814 variable->machineLocal(),
1818 m_origin = NodeOrigin();
1820 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1821 m_currentNode = m_block->at(m_indexInBlock);
1823 // We may have hit a contradiction that the CFA was aware of but that the JIT
1824 // didn't cause directly.
1825 if (!m_state.isValid()) {
1826 bail(DFGBailedAtTopOfBlock);
1830 m_interpreter.startExecuting();
1831 m_interpreter.executeKnownEdgeTypes(m_currentNode);
1832 m_jit.setForNode(m_currentNode);
1833 m_origin = m_currentNode->origin;
1834 if (validationEnabled())
1835 m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1836 m_lastGeneratedNode = m_currentNode->op();
1838 ASSERT(m_currentNode->shouldGenerate());
1840 if (verboseCompilationEnabled()) {
1842 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1843 (int)m_currentNode->index(),
1844 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1848 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1849 m_jit.jitReleaseAssertNoException(*m_jit.vm());
1851 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1853 compile(m_currentNode);
1855 if (belongsInMinifiedGraph(m_currentNode->op()))
1856 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1858 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1859 m_jit.clearRegisterAllocationOffsets();
1862 if (!m_compileOkay) {
1863 bail(DFGBailedAtEndOfNode);
1867 // Make sure that the abstract state is rematerialized for the next node.
1868 m_interpreter.executeEffects(m_indexInBlock);
1871 // Perform the most basic verification that children have been used correctly.
1872 if (!ASSERT_DISABLED) {
1873 for (auto& info : m_generationInfo)
1874 RELEASE_ASSERT(!info.alive());
1878 // If we are making type predictions about our arguments then
1879 // we need to check that they are correct on function entry.
1880 void SpeculativeJIT::checkArgumentTypes()
1882 ASSERT(!m_currentNode);
1883 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1885 auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1886 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1887 Node* node = arguments[i];
1889 // The argument is dead. We don't do any checks for such arguments.
1893 ASSERT(node->op() == SetArgument);
1894 ASSERT(node->shouldGenerate());
1896 VariableAccessData* variableAccessData = node->variableAccessData();
1897 FlushFormat format = variableAccessData->flushFormat();
1899 if (format == FlushedJSValue)
1902 VirtualRegister virtualRegister = variableAccessData->local();
1904 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1908 case FlushedInt32: {
1909 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1912 case FlushedBoolean: {
1913 GPRTemporary temp(this);
1914 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1915 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1916 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1920 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1924 RELEASE_ASSERT_NOT_REACHED();
1929 case FlushedInt32: {
1930 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1933 case FlushedBoolean: {
1934 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1938 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1942 RELEASE_ASSERT_NOT_REACHED();
1948 m_origin = NodeOrigin();
1951 bool SpeculativeJIT::compile()
1953 checkArgumentTypes();
1955 ASSERT(!m_currentNode);
1956 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1957 m_jit.setForBlockIndex(blockIndex);
1958 m_block = m_jit.graph().block(blockIndex);
1959 compileCurrentBlock();
1965 void SpeculativeJIT::createOSREntries()
1967 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1968 BasicBlock* block = m_jit.graph().block(blockIndex);
1971 if (block->isOSRTarget || block->isCatchEntrypoint) {
1972 // Currently we don't have OSR entry trampolines. We could add them
1974 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1979 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1981 unsigned osrEntryIndex = 0;
1982 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1983 BasicBlock* block = m_jit.graph().block(blockIndex);
1986 if (!block->isOSRTarget && !block->isCatchEntrypoint)
1988 if (block->isCatchEntrypoint) {
1989 auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1990 Vector<FlushFormat> argumentFormats;
1991 argumentFormats.reserveInitialCapacity(argumentsVector.size());
1992 for (Node* setArgument : argumentsVector) {
1994 FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1995 ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1996 argumentFormats.uncheckedAppend(flushFormat);
1998 argumentFormats.uncheckedAppend(DeadFlush);
2000 m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
2002 ASSERT(block->isOSRTarget);
2003 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
2007 m_jit.jitCode()->finalizeOSREntrypoints();
2008 m_jit.jitCode()->common.finalizeCatchEntrypoints();
2010 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
2012 if (verboseCompilationEnabled()) {
2013 DumpContext dumpContext;
2014 dataLog("OSR Entries:\n");
2015 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
2016 dataLog(" ", inContext(entryData, &dumpContext), "\n");
2017 if (!dumpContext.isEmpty())
2018 dumpContext.dump(WTF::dataFile());
2022 void SpeculativeJIT::compileCheckTraps(Node*)
2024 ASSERT(Options::usePollingTraps());
2025 GPRTemporary unused(this);
2026 GPRReg unusedGPR = unused.gpr();
2028 JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2029 JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2031 addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2034 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2036 Edge child3 = m_jit.graph().varArgChild(node, 2);
2037 Edge child4 = m_jit.graph().varArgChild(node, 3);
2039 ArrayMode arrayMode = node->arrayMode();
2041 GPRReg baseReg = base.gpr();
2042 GPRReg propertyReg = property.gpr();
2044 SpeculateDoubleOperand value(this, child3);
2046 FPRReg valueReg = value.fpr();
2049 JSValueRegs(), child3, SpecFullRealNumber,
2051 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2056 StorageOperand storage(this, child4);
2057 GPRReg storageReg = storage.gpr();
2059 if (node->op() == PutByValAlias) {
2060 // Store the value to the array.
2061 GPRReg propertyReg = property.gpr();
2062 FPRReg valueReg = value.fpr();
2063 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2065 noResult(m_currentNode);
2069 GPRTemporary temporary;
2070 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2072 MacroAssembler::Jump slowCase;
2074 if (arrayMode.isInBounds()) {
2076 OutOfBounds, JSValueRegs(), 0,
2077 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2079 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2081 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2083 if (!arrayMode.isOutOfBounds())
2084 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2086 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2087 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2089 inBounds.link(&m_jit);
2092 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2099 if (arrayMode.isOutOfBounds()) {
2100 addSlowPathGenerator(
2103 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2104 NoResult, baseReg, propertyReg, valueReg));
2107 noResult(m_currentNode, UseChildrenCalledExplicitly);
2110 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2112 SpeculateCellOperand string(this, node->child1());
2113 SpeculateStrictInt32Operand index(this, node->child2());
2114 StorageOperand storage(this, node->child3());
2116 GPRReg stringReg = string.gpr();
2117 GPRReg indexReg = index.gpr();
2118 GPRReg storageReg = storage.gpr();
2120 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2122 // unsigned comparison so we can filter out negative indices and indices that are too large
2123 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2125 GPRTemporary scratch(this);
2126 GPRReg scratchReg = scratch.gpr();
2128 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2129 m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), indexReg);
2131 // Load the character into scratchReg
2132 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2134 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2135 JITCompiler::Jump cont8Bit = m_jit.jump();
2137 is16Bit.link(&m_jit);
2139 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2141 cont8Bit.link(&m_jit);
2143 int32Result(scratchReg, m_currentNode);
2146 void SpeculativeJIT::compileGetByValOnString(Node* node)
2148 SpeculateCellOperand base(this, node->child1());
2149 SpeculateStrictInt32Operand property(this, node->child2());
2150 StorageOperand storage(this, node->child3());
2151 GPRReg baseReg = base.gpr();
2152 GPRReg propertyReg = property.gpr();
2153 GPRReg storageReg = storage.gpr();
2155 GPRTemporary scratch(this);
2156 GPRReg scratchReg = scratch.gpr();
2157 #if USE(JSVALUE32_64)
2158 GPRTemporary resultTag;
2159 GPRReg resultTagReg = InvalidGPRReg;
2160 if (node->arrayMode().isOutOfBounds()) {
2161 GPRTemporary realResultTag(this);
2162 resultTag.adopt(realResultTag);
2163 resultTagReg = resultTag.gpr();
2167 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2169 // unsigned comparison so we can filter out negative indices and indices that are too large
2170 JITCompiler::Jump outOfBounds = m_jit.branch32(
2171 MacroAssembler::AboveOrEqual, propertyReg,
2172 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2173 if (node->arrayMode().isInBounds())
2174 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2176 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2177 m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), propertyReg);
2179 // Load the character into scratchReg
2180 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2182 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2183 JITCompiler::Jump cont8Bit = m_jit.jump();
2185 is16Bit.link(&m_jit);
2187 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2189 JITCompiler::Jump bigCharacter =
2190 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2192 // 8 bit string values don't need the isASCII check.
2193 cont8Bit.link(&m_jit);
2195 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2196 m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2197 m_jit.loadPtr(scratchReg, scratchReg);
2199 addSlowPathGenerator(
2201 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2203 if (node->arrayMode().isOutOfBounds()) {
2204 #if USE(JSVALUE32_64)
2205 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2208 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2209 bool prototypeChainIsSane = false;
2210 if (globalObject->stringPrototypeChainIsSane()) {
2211 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2212 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2213 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2214 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2215 // indexed properties either.
2216 // https://bugs.webkit.org/show_bug.cgi?id=144668
2217 m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2218 m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2219 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2221 if (prototypeChainIsSane) {
2223 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2224 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2226 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2227 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2228 baseReg, propertyReg));
2232 addSlowPathGenerator(
2234 outOfBounds, this, operationGetByValStringInt,
2235 scratchReg, baseReg, propertyReg));
2237 addSlowPathGenerator(
2239 outOfBounds, this, operationGetByValStringInt,
2240 JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2245 jsValueResult(scratchReg, m_currentNode);
2247 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2250 cellResult(scratchReg, m_currentNode);
2253 void SpeculativeJIT::compileFromCharCode(Node* node)
2255 Edge& child = node->child1();
2256 if (child.useKind() == UntypedUse) {
2257 JSValueOperand opr(this, child);
2258 JSValueRegs oprRegs = opr.jsValueRegs();
2261 JSValueRegsFlushedCallResult result(this);
2262 JSValueRegs resultRegs = result.regs();
2263 callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2264 m_jit.exceptionCheck();
2266 jsValueResult(resultRegs, node);
2270 SpeculateStrictInt32Operand property(this, child);
2271 GPRReg propertyReg = property.gpr();
2272 GPRTemporary smallStrings(this);
2273 GPRTemporary scratch(this);
2274 GPRReg scratchReg = scratch.gpr();
2275 GPRReg smallStringsReg = smallStrings.gpr();
2277 JITCompiler::JumpList slowCases;
2278 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2279 m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2280 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2282 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2283 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2284 cellResult(scratchReg, m_currentNode);
2287 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2289 VirtualRegister virtualRegister = node->virtualRegister();
2290 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2292 switch (info.registerFormat()) {
2293 case DataFormatStorage:
2294 RELEASE_ASSERT_NOT_REACHED();
2296 case DataFormatBoolean:
2297 case DataFormatCell:
2298 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2299 return GeneratedOperandTypeUnknown;
2301 case DataFormatNone:
2302 case DataFormatJSCell:
2304 case DataFormatJSBoolean:
2305 case DataFormatJSDouble:
2306 return GeneratedOperandJSValue;
2308 case DataFormatJSInt32:
2309 case DataFormatInt32:
2310 return GeneratedOperandInteger;
2313 RELEASE_ASSERT_NOT_REACHED();
2314 return GeneratedOperandTypeUnknown;
2318 void SpeculativeJIT::compileValueToInt32(Node* node)
2320 switch (node->child1().useKind()) {
2323 SpeculateStrictInt52Operand op1(this, node->child1());
2324 GPRTemporary result(this, Reuse, op1);
2325 GPRReg op1GPR = op1.gpr();
2326 GPRReg resultGPR = result.gpr();
2327 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2328 int32Result(resultGPR, node, DataFormatInt32);
2331 #endif // USE(JSVALUE64)
2333 case DoubleRepUse: {
2334 GPRTemporary result(this);
2335 SpeculateDoubleOperand op1(this, node->child1());
2336 FPRReg fpr = op1.fpr();
2337 GPRReg gpr = result.gpr();
2338 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2340 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2341 hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2343 int32Result(gpr, node);
2349 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2350 case GeneratedOperandInteger: {
2351 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2352 GPRTemporary result(this, Reuse, op1);
2353 m_jit.move(op1.gpr(), result.gpr());
2354 int32Result(result.gpr(), node, op1.format());
2357 case GeneratedOperandJSValue: {
2358 GPRTemporary result(this);
2360 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2362 GPRReg gpr = op1.gpr();
2363 GPRReg resultGpr = result.gpr();
2364 FPRTemporary tempFpr(this);
2365 FPRReg fpr = tempFpr.fpr();
2367 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2368 JITCompiler::JumpList converted;
2370 if (node->child1().useKind() == NumberUse) {
2372 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2374 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2376 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2379 JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2381 // It's not a cell: so true turns into 1 and all else turns into 0.
2382 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2383 converted.append(m_jit.jump());
2385 isNumber.link(&m_jit);
2388 // First, if we get here we have a double encoded as a JSValue
2389 unboxDouble(gpr, resultGpr, fpr);
2391 silentSpillAllRegisters(resultGpr);
2392 callOperation(operationToInt32, resultGpr, fpr);
2393 silentFillAllRegisters();
2395 converted.append(m_jit.jump());
2397 isInteger.link(&m_jit);
2398 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2400 converted.link(&m_jit);
2402 Node* childNode = node->child1().node();
2403 VirtualRegister virtualRegister = childNode->virtualRegister();
2404 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2406 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2408 GPRReg payloadGPR = op1.payloadGPR();
2409 GPRReg resultGpr = result.gpr();
2411 JITCompiler::JumpList converted;
2413 if (info.registerFormat() == DataFormatJSInt32)
2414 m_jit.move(payloadGPR, resultGpr);
2416 GPRReg tagGPR = op1.tagGPR();
2417 FPRTemporary tempFpr(this);
2418 FPRReg fpr = tempFpr.fpr();
2419 FPRTemporary scratch(this);
2421 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2423 if (node->child1().useKind() == NumberUse) {
2425 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2427 MacroAssembler::AboveOrEqual, tagGPR,
2428 TrustedImm32(JSValue::LowestTag)));
2430 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2433 op1.jsValueRegs(), node->child1(), ~SpecCell,
2434 m_jit.branchIfCell(op1.jsValueRegs()));
2436 // It's not a cell: so true turns into 1 and all else turns into 0.
2437 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2438 m_jit.move(TrustedImm32(0), resultGpr);
2439 converted.append(m_jit.jump());
2441 isBoolean.link(&m_jit);
2442 m_jit.move(payloadGPR, resultGpr);
2443 converted.append(m_jit.jump());
2445 isNumber.link(&m_jit);
2448 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2450 silentSpillAllRegisters(resultGpr);
2451 callOperation(operationToInt32, resultGpr, fpr);
2452 silentFillAllRegisters();
2454 converted.append(m_jit.jump());
2456 isInteger.link(&m_jit);
2457 m_jit.move(payloadGPR, resultGpr);
2459 converted.link(&m_jit);
2462 int32Result(resultGpr, node);
2465 case GeneratedOperandTypeUnknown:
2466 RELEASE_ASSERT(!m_compileOkay);
2469 RELEASE_ASSERT_NOT_REACHED();
2474 ASSERT(!m_compileOkay);
2479 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2481 if (doesOverflow(node->arithMode())) {
2482 if (enableInt52()) {
2483 SpeculateInt32Operand op1(this, node->child1());
2484 GPRTemporary result(this, Reuse, op1);
2485 m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2486 strictInt52Result(result.gpr(), node);
2489 SpeculateInt32Operand op1(this, node->child1());
2490 FPRTemporary result(this);
2492 GPRReg inputGPR = op1.gpr();
2493 FPRReg outputFPR = result.fpr();
2495 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2497 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2498 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2499 positive.link(&m_jit);
2501 doubleResult(outputFPR, node);
2505 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2507 SpeculateInt32Operand op1(this, node->child1());
2508 GPRTemporary result(this);
2510 m_jit.move(op1.gpr(), result.gpr());
2512 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2514 int32Result(result.gpr(), node, op1.format());
2517 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2519 SpeculateDoubleOperand op1(this, node->child1());
2520 FPRTemporary scratch(this);
2521 GPRTemporary result(this);
2523 FPRReg valueFPR = op1.fpr();
2524 FPRReg scratchFPR = scratch.fpr();
2525 GPRReg resultGPR = result.gpr();
2527 JITCompiler::JumpList failureCases;
2528 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2529 m_jit.branchConvertDoubleToInt32(
2530 valueFPR, resultGPR, failureCases, scratchFPR,
2531 shouldCheckNegativeZero(node->arithMode()));
2532 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2534 int32Result(resultGPR, node);
2537 void SpeculativeJIT::compileDoubleRep(Node* node)
2539 switch (node->child1().useKind()) {
2540 case RealNumberUse: {
2541 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2542 FPRTemporary result(this);
2544 JSValueRegs op1Regs = op1.jsValueRegs();
2545 FPRReg resultFPR = result.fpr();
2548 GPRTemporary temp(this);
2549 GPRReg tempGPR = temp.gpr();
2550 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2552 FPRTemporary temp(this);
2553 FPRReg tempFPR = temp.fpr();
2554 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2557 JITCompiler::Jump done = m_jit.branchDouble(
2558 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2561 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2562 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2566 doubleResult(resultFPR, node);
2572 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2574 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2575 if (isInt32Speculation(possibleTypes)) {
2576 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2577 FPRTemporary result(this);
2578 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2579 doubleResult(result.fpr(), node);
2583 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2584 FPRTemporary result(this);
2587 GPRTemporary temp(this);
2589 GPRReg op1GPR = op1.gpr();
2590 GPRReg tempGPR = temp.gpr();
2591 FPRReg resultFPR = result.fpr();
2592 JITCompiler::JumpList done;
2594 JITCompiler::Jump isInteger = m_jit.branch64(
2595 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2597 if (node->child1().useKind() == NotCellUse) {
2598 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2599 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2601 static const double zero = 0;
2602 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2604 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2605 done.append(isNull);
2607 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2608 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2610 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2611 static const double one = 1;
2612 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2613 done.append(m_jit.jump());
2614 done.append(isFalse);
2616 isUndefined.link(&m_jit);
2617 static const double NaN = PNaN;
2618 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2619 done.append(m_jit.jump());
2621 isNumber.link(&m_jit);
2622 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2624 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2625 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2628 unboxDouble(op1GPR, tempGPR, resultFPR);
2629 done.append(m_jit.jump());
2631 isInteger.link(&m_jit);
2632 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2634 #else // USE(JSVALUE64) -> this is the 32_64 case
2635 FPRTemporary temp(this);
2637 GPRReg op1TagGPR = op1.tagGPR();
2638 GPRReg op1PayloadGPR = op1.payloadGPR();
2639 FPRReg tempFPR = temp.fpr();
2640 FPRReg resultFPR = result.fpr();
2641 JITCompiler::JumpList done;
2643 JITCompiler::Jump isInteger = m_jit.branch32(
2644 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2646 if (node->child1().useKind() == NotCellUse) {
2647 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2648 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2650 static const double zero = 0;
2651 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2653 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2654 done.append(isNull);
2656 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2658 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2659 static const double one = 1;
2660 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2661 done.append(m_jit.jump());
2662 done.append(isFalse);
2664 isUndefined.link(&m_jit);
2665 static const double NaN = PNaN;
2666 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2667 done.append(m_jit.jump());
2669 isNumber.link(&m_jit);
2670 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2672 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2673 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2676 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2677 done.append(m_jit.jump());
2679 isInteger.link(&m_jit);
2680 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2682 #endif // USE(JSVALUE64)
2684 doubleResult(resultFPR, node);
2690 SpeculateStrictInt52Operand value(this, node->child1());
2691 FPRTemporary result(this);
2693 GPRReg valueGPR = value.gpr();
2694 FPRReg resultFPR = result.fpr();
2696 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2698 doubleResult(resultFPR, node);
2701 #endif // USE(JSVALUE64)
2704 RELEASE_ASSERT_NOT_REACHED();
2709 void SpeculativeJIT::compileValueRep(Node* node)
2711 switch (node->child1().useKind()) {
2712 case DoubleRepUse: {
2713 SpeculateDoubleOperand value(this, node->child1());
2714 JSValueRegsTemporary result(this);
2716 FPRReg valueFPR = value.fpr();
2717 JSValueRegs resultRegs = result.regs();
2719 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2720 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2721 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2722 // local was purified.
2723 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2724 m_jit.purifyNaN(valueFPR);
2726 boxDouble(valueFPR, resultRegs);
2728 jsValueResult(resultRegs, node);
2734 SpeculateStrictInt52Operand value(this, node->child1());
2735 GPRTemporary result(this);
2737 GPRReg valueGPR = value.gpr();
2738 GPRReg resultGPR = result.gpr();
2740 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2742 jsValueResult(resultGPR, node);
2745 #endif // USE(JSVALUE64)
2748 RELEASE_ASSERT_NOT_REACHED();
2753 static double clampDoubleToByte(double d)
2763 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2765 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2766 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2767 jit.xorPtr(result, result);
2768 MacroAssembler::Jump clamped = jit.jump();
2770 jit.move(JITCompiler::TrustedImm32(255), result);
2772 inBounds.link(&jit);
2775 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2777 // Unordered compare so we pick up NaN
2778 static const double zero = 0;
2779 static const double byteMax = 255;
2780 static const double half = 0.5;
2781 jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2782 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2783 jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2784 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2786 jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2787 // FIXME: This should probably just use a floating point round!
2788 // https://bugs.webkit.org/show_bug.cgi?id=72054
2789 jit.addDouble(source, scratch);
2790 jit.truncateDoubleToInt32(scratch, result);
2791 MacroAssembler::Jump truncatedInt = jit.jump();
2793 tooSmall.link(&jit);
2794 jit.xorPtr(result, result);
2795 MacroAssembler::Jump zeroed = jit.jump();
2798 jit.move(JITCompiler::TrustedImm32(255), result);
2800 truncatedInt.link(&jit);
2805 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2807 if (node->op() == PutByValAlias)
2808 return JITCompiler::Jump();
2809 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2810 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2812 uint32_t length = view->length();
2813 Node* indexNode = m_jit.graph().child(node, 1).node();
2814 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2815 return JITCompiler::Jump();
2816 return m_jit.branch32(
2817 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2819 return m_jit.branch32(
2820 MacroAssembler::AboveOrEqual, indexGPR,
2821 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2824 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2826 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2829 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2832 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2834 JITCompiler::Jump done;
2835 if (outOfBounds.isSet()) {
2836 done = m_jit.jump();
2837 if (node->arrayMode().isInBounds())
2838 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2840 outOfBounds.link(&m_jit);
2842 JITCompiler::Jump notWasteful = m_jit.branch32(
2843 MacroAssembler::NotEqual,
2844 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2845 TrustedImm32(WastefulTypedArray));
2847 JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2848 MacroAssembler::Zero,
2849 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2850 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2851 notWasteful.link(&m_jit);
2857 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2859 switch (elementSize(type)) {
2862 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2864 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2868 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2870 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2873 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2880 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2882 if (elementSize(type) < 4 || isSigned(type)) {
2883 int32Result(resultReg, node);
2887 ASSERT(elementSize(type) == 4 && !isSigned(type));
2888 if (node->shouldSpeculateInt32() && canSpeculate) {
2889 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2890 int32Result(resultReg, node);
2895 if (node->shouldSpeculateAnyInt()) {
2896 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2897 strictInt52Result(resultReg, node);
2902 FPRTemporary fresult(this);
2903 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2904 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2905 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2906 positive.link(&m_jit);
2907 doubleResult(fresult.fpr(), node);
2910 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2912 ASSERT(isInt(type));
2914 SpeculateCellOperand base(this, node->child1());
2915 SpeculateStrictInt32Operand property(this, node->child2());
2916 StorageOperand storage(this, node->child3());
2918 GPRReg baseReg = base.gpr();
2919 GPRReg propertyReg = property.gpr();
2920 GPRReg storageReg = storage.gpr();
2922 GPRTemporary result(this);
2923 GPRReg resultReg = result.gpr();
2925 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2927 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2928 loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2929 bool canSpeculate = true;
2930 setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2933 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2934 GPRTemporary& value,
2936 #if USE(JSVALUE32_64)
2937 GPRTemporary& propertyTag,
2938 GPRTemporary& valueTag,
2940 Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2942 bool isAppropriateConstant = false;
2943 if (valueUse->isConstant()) {
2944 JSValue jsValue = valueUse->asJSValue();
2945 SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2946 SpeculatedType actualType = speculationFromValue(jsValue);
2947 isAppropriateConstant = (expectedType | actualType) == expectedType;
2950 if (isAppropriateConstant) {
2951 JSValue jsValue = valueUse->asJSValue();
2952 if (!jsValue.isNumber()) {
2953 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2956 double d = jsValue.asNumber();
2958 d = clampDoubleToByte(d);
2959 GPRTemporary scratch(this);
2960 GPRReg scratchReg = scratch.gpr();
2961 m_jit.move(Imm32(toInt32(d)), scratchReg);
2962 value.adopt(scratch);
2964 switch (valueUse.useKind()) {
2966 SpeculateInt32Operand valueOp(this, valueUse);
2967 GPRTemporary scratch(this);
2968 GPRReg scratchReg = scratch.gpr();
2969 m_jit.move(valueOp.gpr(), scratchReg);
2971 compileClampIntegerToByte(m_jit, scratchReg);
2972 value.adopt(scratch);
2978 SpeculateStrictInt52Operand valueOp(this, valueUse);
2979 GPRTemporary scratch(this);
2980 GPRReg scratchReg = scratch.gpr();
2981 m_jit.move(valueOp.gpr(), scratchReg);
2983 MacroAssembler::Jump inBounds = m_jit.branch64(
2984 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2985 MacroAssembler::Jump tooBig = m_jit.branch64(
2986 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2987 m_jit.move(TrustedImm32(0), scratchReg);
2988 MacroAssembler::Jump clamped = m_jit.jump();
2989 tooBig.link(&m_jit);
2990 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2991 clamped.link(&m_jit);
2992 inBounds.link(&m_jit);
2994 value.adopt(scratch);
2997 #endif // USE(JSVALUE64)
2999 case DoubleRepUse: {
3000 RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
3002 SpeculateDoubleOperand valueOp(this, valueUse);
3003 GPRTemporary result(this);
3004 FPRTemporary floatScratch(this);
3005 FPRReg fpr = valueOp.fpr();
3006 GPRReg gpr = result.gpr();
3007 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
3008 value.adopt(result);
3010 #if USE(JSVALUE32_64)
3011 GPRTemporary realPropertyTag(this);
3012 propertyTag.adopt(realPropertyTag);
3013 GPRReg propertyTagGPR = propertyTag.gpr();
3015 GPRTemporary realValueTag(this);
3016 valueTag.adopt(realValueTag);
3017 GPRReg valueTagGPR = valueTag.gpr();
3019 SpeculateDoubleOperand valueOp(this, valueUse);
3020 GPRTemporary result(this);
3021 FPRReg fpr = valueOp.fpr();
3022 GPRReg gpr = result.gpr();
3023 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3024 m_jit.xorPtr(gpr, gpr);
3025 MacroAssembler::JumpList fixed(m_jit.jump());
3026 notNaN.link(&m_jit);
3028 fixed.append(m_jit.branchTruncateDoubleToInt32(
3029 fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3032 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3033 boxDouble(fpr, gpr);
3035 UNUSED_PARAM(property);
3036 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3037 boxDouble(fpr, valueTagGPR, gpr);
3039 slowPathCases.append(m_jit.jump());
3042 value.adopt(result);
3048 RELEASE_ASSERT_NOT_REACHED();
3055 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3057 ASSERT(isInt(type));
3059 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3060 GPRReg storageReg = storage.gpr();
3062 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3065 #if USE(JSVALUE32_64)
3066 GPRTemporary propertyTag;
3067 GPRTemporary valueTag;
3070 JITCompiler::JumpList slowPathCases;
3072 bool result = getIntTypedArrayStoreOperand(
3074 #if USE(JSVALUE32_64)
3075 propertyTag, valueTag,
3077 valueUse, slowPathCases, isClamped(type));
3083 GPRReg valueGPR = value.gpr();
3084 #if USE(JSVALUE32_64)
3085 GPRReg propertyTagGPR = propertyTag.gpr();
3086 GPRReg valueTagGPR = valueTag.gpr();
3089 ASSERT_UNUSED(valueGPR, valueGPR != property);
3090 ASSERT(valueGPR != base);
3091 ASSERT(valueGPR != storageReg);
3092 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3094 switch (elementSize(type)) {
3096 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3099 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3102 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3108 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3112 if (!slowPathCases.empty()) {
3114 if (node->op() == PutByValDirect) {
3115 addSlowPathGenerator(slowPathCall(
3116 slowPathCases, this,
3117 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3118 NoResult, base, property, valueGPR));
3120 addSlowPathGenerator(slowPathCall(
3121 slowPathCases, this,
3122 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3123 NoResult, base, property, valueGPR));
3125 #else // not USE(JSVALUE64)
3126 if (node->op() == PutByValDirect) {
3127 addSlowPathGenerator(slowPathCall(
3128 slowPathCases, this,
3129 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3130 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3132 addSlowPathGenerator(slowPathCall(
3133 slowPathCases, this,
3134 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3135 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3143 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3145 ASSERT(isFloat(type));
3147 SpeculateCellOperand base(this, node->child1());
3148 SpeculateStrictInt32Operand property(this, node->child2());
3149 StorageOperand storage(this, node->child3());
3151 GPRReg baseReg = base.gpr();
3152 GPRReg propertyReg = property.gpr();
3153 GPRReg storageReg = storage.gpr();
3155 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3157 FPRTemporary result(this);
3158 FPRReg resultReg = result.fpr();
3159 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3160 switch (elementSize(type)) {
3162 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3163 m_jit.convertFloatToDouble(resultReg, resultReg);
3166 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3170 RELEASE_ASSERT_NOT_REACHED();
3173 doubleResult(resultReg, node);
3176 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3178 ASSERT(isFloat(type));
3180 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3181 GPRReg storageReg = storage.gpr();
3183 Edge baseUse = m_jit.graph().varArgChild(node, 0);
3184 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3186 SpeculateDoubleOperand valueOp(this, valueUse);
3187 FPRTemporary scratch(this);
3188 FPRReg valueFPR = valueOp.fpr();
3189 FPRReg scratchFPR = scratch.fpr();
3191 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3193 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3195 switch (elementSize(type)) {
3197 m_jit.moveDouble(valueFPR, scratchFPR);
3198 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3199 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3203 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3206 RELEASE_ASSERT_NOT_REACHED();
3209 JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3215 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3217 SpeculateCellOperand arg1(this, node->child1());
3218 SpeculateCellOperand arg2(this, node->child2());
3220 GPRReg arg1GPR = arg1.gpr();
3221 GPRReg arg2GPR = arg2.gpr();
3223 speculateObject(node->child1(), arg1GPR);
3224 speculateString(node->child2(), arg2GPR);
3227 JSValueRegsFlushedCallResult result(this);
3228 JSValueRegs resultRegs = result.regs();
3229 callOperation(operationGetByValObjectString, resultRegs, arg1GPR, arg2GPR);
3230 m_jit.exceptionCheck();
3232 jsValueResult(resultRegs, node);
3235 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3237 SpeculateCellOperand arg1(this, node->child1());
3238 SpeculateCellOperand arg2(this, node->child2());
3240 GPRReg arg1GPR = arg1.gpr();
3241 GPRReg arg2GPR = arg2.gpr();
3243 speculateObject(node->child1(), arg1GPR);
3244 speculateSymbol(node->child2(), arg2GPR);
3247 JSValueRegsFlushedCallResult result(this);
3248 JSValueRegs resultRegs = result.regs();
3249 callOperation(operationGetByValObjectSymbol, resultRegs, arg1GPR, arg2GPR);
3250 m_jit.exceptionCheck();
3252 jsValueResult(resultRegs, node);
3255 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3257 SpeculateCellOperand arg1(this, child1);
3258 SpeculateCellOperand arg2(this, child2);
3259 JSValueOperand arg3(this, child3);
3261 GPRReg arg1GPR = arg1.gpr();
3262 GPRReg arg2GPR = arg2.gpr();
3263 JSValueRegs arg3Regs = arg3.jsValueRegs();
3265 speculateString(child2, arg2GPR);
3268 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3269 m_jit.exceptionCheck();
3274 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3276 SpeculateCellOperand arg1(this, child1);
3277 SpeculateCellOperand arg2(this, child2);
3278 JSValueOperand arg3(this, child3);
3280 GPRReg arg1GPR = arg1.gpr();
3281 GPRReg arg2GPR = arg2.gpr();
3282 JSValueRegs arg3Regs = arg3.jsValueRegs();
3284 speculateSymbol(child2, arg2GPR);
3287 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3288 m_jit.exceptionCheck();
3293 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3295 // Check that prototype is an object.
3296 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3298 // Initialize scratchReg with the value being checked.
3299 m_jit.move(valueReg, scratchReg);
3301 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3302 MacroAssembler::Label loop(&m_jit);
3303 MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3304 MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3305 m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3307 m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3308 auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3309 m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3310 hasMonoProto.link(&m_jit);
3311 m_jit.move(scratch3Reg, scratchReg);
3313 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3314 m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3315 auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3316 m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3317 hasMonoProto.link(&m_jit);
3318 m_jit.move(scratch3Reg, scratchReg);
3321 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3323 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3325 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3328 // No match - result is false.
3330 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3332 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3334 MacroAssembler::JumpList doneJumps;
3335 doneJumps.append(m_jit.jump());
3337 performDefaultHasInstance.link(&m_jit);
3338 silentSpillAllRegisters(scratchReg);
3339 callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg);
3340 silentFillAllRegisters();
3341 m_jit.exceptionCheck();
3343 m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3345 doneJumps.append(m_jit.jump());