2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
45 namespace JSC { namespace DFG {
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
51 , m_lastGeneratedNode(LastNodeType)
53 , m_generationInfo(m_jit.graph().frameRegisterCount())
54 , m_state(m_jit.graph())
55 , m_interpreter(m_jit.graph(), m_state)
56 , m_stream(&jit.jitCode()->variableEventStream)
57 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58 , m_isCheckingArgumentTypes(false)
62 SpeculativeJIT::~SpeculativeJIT()
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
68 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
70 GPRTemporary scratch(this);
71 GPRTemporary scratch2(this);
72 GPRReg scratchGPR = scratch.gpr();
73 GPRReg scratch2GPR = scratch2.gpr();
75 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
77 JITCompiler::JumpList slowCases;
80 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
84 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
87 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
89 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90 for (unsigned i = numElements; i < vectorLength; ++i)
91 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
93 EncodedValueDescriptor value;
94 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95 for (unsigned i = numElements; i < vectorLength; ++i) {
96 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
102 // I want a slow path that also loads out the storage pointer, and that's
103 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104 // of work for a very small piece of functionality. :-/
105 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
106 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
107 structure, numElements));
110 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
112 Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
114 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
115 m_jit.mul32(TrustedImm32(sizeof(JSValue)), scratchGPR1, scratchGPR1);
116 m_jit.add32(TrustedImm32(Arguments::offsetOfInlineRegisterArray()), scratchGPR1);
117 emitAllocateVariableSizedJSObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR1, scratchGPR2, slowPath);
119 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
121 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
122 m_jit.sub32(TrustedImm32(1), scratchGPR1);
123 m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
125 m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
126 if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
127 m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
129 m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
130 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
132 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
133 m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
137 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
141 ASSERT(m_isCheckingArgumentTypes || m_canExit);
142 m_jit.appendExitInfo(jumpToFail);
143 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
146 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
150 ASSERT(m_isCheckingArgumentTypes || m_canExit);
151 m_jit.appendExitInfo(jumpsToFail);
152 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
155 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
158 return OSRExitJumpPlaceholder();
159 ASSERT(m_isCheckingArgumentTypes || m_canExit);
160 unsigned index = m_jit.jitCode()->osrExit.size();
161 m_jit.appendExitInfo();
162 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
163 return OSRExitJumpPlaceholder(index);
166 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
168 ASSERT(m_isCheckingArgumentTypes || m_canExit);
169 return speculationCheck(kind, jsValueSource, nodeUse.node());
172 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
174 ASSERT(m_isCheckingArgumentTypes || m_canExit);
175 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
178 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
180 ASSERT(m_isCheckingArgumentTypes || m_canExit);
181 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
184 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
188 ASSERT(m_isCheckingArgumentTypes || m_canExit);
189 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
190 m_jit.appendExitInfo(jumpToFail);
191 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
196 ASSERT(m_isCheckingArgumentTypes || m_canExit);
197 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
200 void SpeculativeJIT::emitInvalidationPoint(Node* node)
205 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
206 m_jit.jitCode()->appendOSRExit(OSRExit(
207 UncountableInvalidation, JSValueSource(),
208 m_jit.graph().methodOfGettingAValueProfileFor(node),
209 this, m_stream->size()));
210 info.m_replacementSource = m_jit.watchpointLabel();
211 ASSERT(info.m_replacementSource.isSet());
215 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
217 ASSERT(m_isCheckingArgumentTypes || m_canExit);
220 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
221 m_compileOkay = false;
222 if (verboseCompilationEnabled())
223 dataLog("Bailing compilation.\n");
226 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
228 ASSERT(m_isCheckingArgumentTypes || m_canExit);
229 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
232 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
234 ASSERT(needsTypeCheck(edge, typesPassedThrough));
235 m_interpreter.filter(edge, typesPassedThrough);
236 speculationCheck(BadType, source, edge.node(), jumpToFail);
239 RegisterSet SpeculativeJIT::usedRegisters()
243 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
244 GPRReg gpr = GPRInfo::toRegister(i);
245 if (m_gprs.isInUse(gpr))
248 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
249 FPRReg fpr = FPRInfo::toRegister(i);
250 if (m_fprs.isInUse(fpr))
254 result.merge(RegisterSet::specialRegisters());
259 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
261 m_slowPathGenerators.append(WTF::move(slowPathGenerator));
264 void SpeculativeJIT::runSlowPathGenerators()
266 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
267 m_slowPathGenerators[i]->generate(this);
270 // On Windows we need to wrap fmod; on other platforms we can call it directly.
271 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
272 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
273 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
278 #define fmodAsDFGOperation fmod
281 void SpeculativeJIT::clearGenerationInfo()
283 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
284 m_generationInfo[i] = GenerationInfo();
285 m_gprs = RegisterBank<GPRInfo>();
286 m_fprs = RegisterBank<FPRInfo>();
289 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
291 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
292 Node* node = info.node();
293 DataFormat registerFormat = info.registerFormat();
294 ASSERT(registerFormat != DataFormatNone);
295 ASSERT(registerFormat != DataFormatDouble);
297 SilentSpillAction spillAction;
298 SilentFillAction fillAction;
300 if (!info.needsSpill())
301 spillAction = DoNothingForSpill;
304 ASSERT(info.gpr() == source);
305 if (registerFormat == DataFormatInt32)
306 spillAction = Store32Payload;
307 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
308 spillAction = StorePtr;
309 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
310 spillAction = Store64;
312 ASSERT(registerFormat & DataFormatJS);
313 spillAction = Store64;
315 #elif USE(JSVALUE32_64)
316 if (registerFormat & DataFormatJS) {
317 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
318 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
320 ASSERT(info.gpr() == source);
321 spillAction = Store32Payload;
326 if (registerFormat == DataFormatInt32) {
327 ASSERT(info.gpr() == source);
328 ASSERT(isJSInt32(info.registerFormat()));
329 if (node->hasConstant()) {
330 ASSERT(node->isInt32Constant());
331 fillAction = SetInt32Constant;
333 fillAction = Load32Payload;
334 } else if (registerFormat == DataFormatBoolean) {
336 RELEASE_ASSERT_NOT_REACHED();
337 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
338 fillAction = DoNothingForFill;
340 #elif USE(JSVALUE32_64)
341 ASSERT(info.gpr() == source);
342 if (node->hasConstant()) {
343 ASSERT(node->isBooleanConstant());
344 fillAction = SetBooleanConstant;
346 fillAction = Load32Payload;
348 } else if (registerFormat == DataFormatCell) {
349 ASSERT(info.gpr() == source);
350 if (node->hasConstant()) {
351 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
352 node->asCell(); // To get the assertion.
353 fillAction = SetCellConstant;
356 fillAction = LoadPtr;
358 fillAction = Load32Payload;
361 } else if (registerFormat == DataFormatStorage) {
362 ASSERT(info.gpr() == source);
363 fillAction = LoadPtr;
364 } else if (registerFormat == DataFormatInt52) {
365 if (node->hasConstant())
366 fillAction = SetInt52Constant;
367 else if (info.spillFormat() == DataFormatInt52)
369 else if (info.spillFormat() == DataFormatStrictInt52)
370 fillAction = Load64ShiftInt52Left;
371 else if (info.spillFormat() == DataFormatNone)
374 RELEASE_ASSERT_NOT_REACHED();
375 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
376 fillAction = Load64; // Make GCC happy.
379 } else if (registerFormat == DataFormatStrictInt52) {
380 if (node->hasConstant())
381 fillAction = SetStrictInt52Constant;
382 else if (info.spillFormat() == DataFormatInt52)
383 fillAction = Load64ShiftInt52Right;
384 else if (info.spillFormat() == DataFormatStrictInt52)
386 else if (info.spillFormat() == DataFormatNone)
389 RELEASE_ASSERT_NOT_REACHED();
390 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
391 fillAction = Load64; // Make GCC happy.
395 ASSERT(registerFormat & DataFormatJS);
397 ASSERT(info.gpr() == source);
398 if (node->hasConstant()) {
399 if (node->isCellConstant())
400 fillAction = SetTrustedJSConstant;
402 fillAction = SetJSConstant;
403 } else if (info.spillFormat() == DataFormatInt32) {
404 ASSERT(registerFormat == DataFormatJSInt32);
405 fillAction = Load32PayloadBoxInt;
409 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
410 if (node->hasConstant())
411 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
412 else if (info.payloadGPR() == source)
413 fillAction = Load32Payload;
414 else { // Fill the Tag
415 switch (info.spillFormat()) {
416 case DataFormatInt32:
417 ASSERT(registerFormat == DataFormatJSInt32);
418 fillAction = SetInt32Tag;
421 ASSERT(registerFormat == DataFormatJSCell);
422 fillAction = SetCellTag;
424 case DataFormatBoolean:
425 ASSERT(registerFormat == DataFormatJSBoolean);
426 fillAction = SetBooleanTag;
429 fillAction = Load32Tag;
436 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
439 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
441 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
442 Node* node = info.node();
443 ASSERT(info.registerFormat() == DataFormatDouble);
445 SilentSpillAction spillAction;
446 SilentFillAction fillAction;
448 if (!info.needsSpill())
449 spillAction = DoNothingForSpill;
451 ASSERT(!node->hasConstant());
452 ASSERT(info.spillFormat() == DataFormatNone);
453 ASSERT(info.fpr() == source);
454 spillAction = StoreDouble;
458 if (node->hasConstant()) {
459 node->asNumber(); // To get the assertion.
460 fillAction = SetDoubleConstant;
462 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
463 fillAction = LoadDouble;
465 #elif USE(JSVALUE32_64)
466 ASSERT(info.registerFormat() == DataFormatDouble);
467 if (node->hasConstant()) {
468 node->asNumber(); // To get the assertion.
469 fillAction = SetDoubleConstant;
471 fillAction = LoadDouble;
474 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
477 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
479 switch (plan.spillAction()) {
480 case DoNothingForSpill:
483 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
486 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
489 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
493 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
497 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
500 RELEASE_ASSERT_NOT_REACHED();
504 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
506 #if USE(JSVALUE32_64)
507 UNUSED_PARAM(canTrample);
509 switch (plan.fillAction()) {
510 case DoNothingForFill:
512 case SetInt32Constant:
513 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
516 case SetInt52Constant:
517 m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
519 case SetStrictInt52Constant:
520 m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
522 #endif // USE(JSVALUE64)
523 case SetBooleanConstant:
524 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
526 case SetCellConstant:
527 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
530 case SetTrustedJSConstant:
531 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
534 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
536 case SetDoubleConstant:
537 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
538 m_jit.move64ToDouble(canTrample, plan.fpr());
540 case Load32PayloadBoxInt:
541 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
542 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
544 case Load32PayloadConvertToInt52:
545 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
546 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
547 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
549 case Load32PayloadSignExtend:
550 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
551 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
554 case SetJSConstantTag:
555 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
557 case SetJSConstantPayload:
558 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
561 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
564 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
567 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
569 case SetDoubleConstant:
570 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
574 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
577 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
580 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
584 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
586 case Load64ShiftInt52Right:
587 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
588 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
590 case Load64ShiftInt52Left:
591 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
592 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
596 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
599 RELEASE_ASSERT_NOT_REACHED();
603 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
605 switch (arrayMode.arrayClass()) {
606 case Array::OriginalArray: {
608 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
609 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
615 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
616 return m_jit.branch32(
617 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
619 case Array::NonArray:
620 case Array::OriginalNonArray:
621 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
622 return m_jit.branch32(
623 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
625 case Array::PossiblyArray:
626 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
627 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
630 RELEASE_ASSERT_NOT_REACHED();
631 return JITCompiler::Jump();
634 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
636 JITCompiler::JumpList result;
638 switch (arrayMode.type()) {
640 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
643 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
645 case Array::Contiguous:
646 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
648 case Array::ArrayStorage:
649 case Array::SlowPutArrayStorage: {
650 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
652 if (arrayMode.isJSArray()) {
653 if (arrayMode.isSlowPut()) {
656 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
657 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
658 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
661 MacroAssembler::Above, tempGPR,
662 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
665 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
667 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
670 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
671 if (arrayMode.isSlowPut()) {
672 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
675 MacroAssembler::Above, tempGPR,
676 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
680 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
691 void SpeculativeJIT::checkArray(Node* node)
693 ASSERT(node->arrayMode().isSpecific());
694 ASSERT(!node->arrayMode().doesConversion());
696 SpeculateCellOperand base(this, node->child1());
697 GPRReg baseReg = base.gpr();
699 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
700 noResult(m_currentNode);
704 const ClassInfo* expectedClassInfo = 0;
706 switch (node->arrayMode().type()) {
708 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
712 case Array::Contiguous:
713 case Array::ArrayStorage:
714 case Array::SlowPutArrayStorage: {
715 GPRTemporary temp(this);
716 GPRReg tempGPR = temp.gpr();
717 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
719 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
720 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
722 noResult(m_currentNode);
725 case Array::Arguments:
726 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
728 noResult(m_currentNode);
731 speculateCellTypeWithoutTypeFiltering(
732 node->child1(), baseReg,
733 typeForTypedArrayType(node->arrayMode().typedArrayType()));
734 noResult(m_currentNode);
738 RELEASE_ASSERT(expectedClassInfo);
740 GPRTemporary temp(this);
741 GPRTemporary temp2(this);
742 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
744 BadType, JSValueSource::unboxedCell(baseReg), node,
746 MacroAssembler::NotEqual,
747 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
748 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
750 noResult(m_currentNode);
753 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
755 ASSERT(node->arrayMode().doesConversion());
757 GPRTemporary temp(this);
758 GPRTemporary structure;
759 GPRReg tempGPR = temp.gpr();
760 GPRReg structureGPR = InvalidGPRReg;
762 if (node->op() != ArrayifyToStructure) {
763 GPRTemporary realStructure(this);
764 structure.adopt(realStructure);
765 structureGPR = structure.gpr();
768 // We can skip all that comes next if we already have array storage.
769 MacroAssembler::JumpList slowPath;
771 if (node->op() == ArrayifyToStructure) {
772 slowPath.append(m_jit.branchWeakStructure(
773 JITCompiler::NotEqual,
774 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
778 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
780 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
783 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
784 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
786 noResult(m_currentNode);
789 void SpeculativeJIT::arrayify(Node* node)
791 ASSERT(node->arrayMode().isSpecific());
793 SpeculateCellOperand base(this, node->child1());
795 if (!node->child2()) {
796 arrayify(node, base.gpr(), InvalidGPRReg);
800 SpeculateInt32Operand property(this, node->child2());
802 arrayify(node, base.gpr(), property.gpr());
805 GPRReg SpeculativeJIT::fillStorage(Edge edge)
807 VirtualRegister virtualRegister = edge->virtualRegister();
808 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
810 switch (info.registerFormat()) {
811 case DataFormatNone: {
812 if (info.spillFormat() == DataFormatStorage) {
813 GPRReg gpr = allocate();
814 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
815 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
816 info.fillStorage(*m_stream, gpr);
820 // Must be a cell; fill it as a cell and then return the pointer.
821 return fillSpeculateCell(edge);
824 case DataFormatStorage: {
825 GPRReg gpr = info.gpr();
831 return fillSpeculateCell(edge);
835 void SpeculativeJIT::useChildren(Node* node)
837 if (node->flags() & NodeHasVarArgs) {
838 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
839 if (!!m_jit.graph().m_varArgChildren[childIdx])
840 use(m_jit.graph().m_varArgChildren[childIdx]);
843 Edge child1 = node->child1();
845 ASSERT(!node->child2() && !node->child3());
850 Edge child2 = node->child2();
852 ASSERT(!node->child3());
857 Edge child3 = node->child3();
864 void SpeculativeJIT::compileIn(Node* node)
866 SpeculateCellOperand base(this, node->child2());
867 GPRReg baseGPR = base.gpr();
869 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
870 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
871 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
873 GPRTemporary result(this);
874 GPRReg resultGPR = result.gpr();
878 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
879 MacroAssembler::Label done = m_jit.label();
881 auto slowPath = slowPathCall(
882 jump.m_jump, this, operationInOptimize,
883 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
884 string->tryGetValueImpl());
886 stubInfo->codeOrigin = node->origin.semantic;
887 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
888 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
889 stubInfo->patch.usedRegisters = usedRegisters();
890 stubInfo->patch.spillMode = NeedToSpill;
892 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
893 addSlowPathGenerator(WTF::move(slowPath));
897 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
902 JSValueOperand key(this, node->child1());
903 JSValueRegs regs = key.jsValueRegs();
905 GPRFlushedCallResult result(this);
906 GPRReg resultGPR = result.gpr();
913 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
915 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
918 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
920 unsigned branchIndexInBlock = detectPeepHoleBranch();
921 if (branchIndexInBlock != UINT_MAX) {
922 Node* branchNode = m_block->at(branchIndexInBlock);
924 ASSERT(node->adjustedRefCount() == 1);
926 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
928 m_indexInBlock = branchIndexInBlock;
929 m_currentNode = branchNode;
934 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
939 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
941 unsigned branchIndexInBlock = detectPeepHoleBranch();
942 if (branchIndexInBlock != UINT_MAX) {
943 Node* branchNode = m_block->at(branchIndexInBlock);
945 ASSERT(node->adjustedRefCount() == 1);
947 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
949 m_indexInBlock = branchIndexInBlock;
950 m_currentNode = branchNode;
955 nonSpeculativeNonPeepholeStrictEq(node, invert);
960 static const char* dataFormatString(DataFormat format)
962 // These values correspond to the DataFormat enum.
963 const char* strings[] = {
981 return strings[format];
984 void SpeculativeJIT::dump(const char* label)
987 dataLogF("<%s>\n", label);
989 dataLogF(" gprs:\n");
991 dataLogF(" fprs:\n");
993 dataLogF(" VirtualRegisters:\n");
994 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
995 GenerationInfo& info = m_generationInfo[i];
997 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
999 dataLogF(" % 3d:[__][__]", i);
1000 if (info.registerFormat() == DataFormatDouble)
1001 dataLogF(":fpr%d\n", info.fpr());
1002 else if (info.registerFormat() != DataFormatNone
1003 #if USE(JSVALUE32_64)
1004 && !(info.registerFormat() & DataFormatJS)
1007 ASSERT(info.gpr() != InvalidGPRReg);
1008 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1013 dataLogF("</%s>\n", label);
1016 GPRTemporary::GPRTemporary()
1018 , m_gpr(InvalidGPRReg)
1022 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1024 , m_gpr(InvalidGPRReg)
1026 m_gpr = m_jit->allocate();
1029 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1031 , m_gpr(InvalidGPRReg)
1033 m_gpr = m_jit->allocate(specific);
1036 #if USE(JSVALUE32_64)
1037 GPRTemporary::GPRTemporary(
1038 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1040 , m_gpr(InvalidGPRReg)
1042 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1043 m_gpr = m_jit->reuse(op1.gpr(which));
1045 m_gpr = m_jit->allocate();
1047 #endif // USE(JSVALUE32_64)
1049 JSValueRegsTemporary::JSValueRegsTemporary() { }
1051 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1061 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1063 JSValueRegs JSValueRegsTemporary::regs()
1066 return JSValueRegs(m_gpr.gpr());
1068 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1072 void GPRTemporary::adopt(GPRTemporary& other)
1075 ASSERT(m_gpr == InvalidGPRReg);
1076 ASSERT(other.m_jit);
1077 ASSERT(other.m_gpr != InvalidGPRReg);
1078 m_jit = other.m_jit;
1079 m_gpr = other.m_gpr;
1081 other.m_gpr = InvalidGPRReg;
1084 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1086 , m_fpr(InvalidFPRReg)
1088 m_fpr = m_jit->fprAllocate();
1091 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1093 , m_fpr(InvalidFPRReg)
1095 if (m_jit->canReuse(op1.node()))
1096 m_fpr = m_jit->reuse(op1.fpr());
1098 m_fpr = m_jit->fprAllocate();
1101 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1103 , m_fpr(InvalidFPRReg)
1105 if (m_jit->canReuse(op1.node()))
1106 m_fpr = m_jit->reuse(op1.fpr());
1107 else if (m_jit->canReuse(op2.node()))
1108 m_fpr = m_jit->reuse(op2.fpr());
1110 m_fpr = m_jit->fprAllocate();
1113 #if USE(JSVALUE32_64)
1114 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1116 , m_fpr(InvalidFPRReg)
1118 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1119 m_fpr = m_jit->reuse(op1.fpr());
1121 m_fpr = m_jit->fprAllocate();
1125 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1127 BasicBlock* taken = branchNode->branchData()->taken.block;
1128 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1130 SpeculateDoubleOperand op1(this, node->child1());
1131 SpeculateDoubleOperand op2(this, node->child2());
1133 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1137 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1139 BasicBlock* taken = branchNode->branchData()->taken.block;
1140 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1142 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1144 if (taken == nextBlock()) {
1145 condition = MacroAssembler::NotEqual;
1146 BasicBlock* tmp = taken;
1151 SpeculateCellOperand op1(this, node->child1());
1152 SpeculateCellOperand op2(this, node->child2());
1154 GPRReg op1GPR = op1.gpr();
1155 GPRReg op2GPR = op2.gpr();
1157 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1158 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1160 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1161 m_jit.branchStructurePtr(
1162 MacroAssembler::Equal,
1163 MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()),
1164 m_jit.vm()->stringStructure.get()));
1166 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1168 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1169 m_jit.branchStructurePtr(
1170 MacroAssembler::Equal,
1171 MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()),
1172 m_jit.vm()->stringStructure.get()));
1175 GPRTemporary structure(this);
1176 GPRTemporary temp(this);
1177 GPRReg structureGPR = structure.gpr();
1179 m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1180 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1182 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1184 MacroAssembler::Equal,
1186 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1188 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1190 MacroAssembler::NonZero,
1191 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1192 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1194 m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1195 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1197 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1199 MacroAssembler::Equal,
1201 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1203 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1205 MacroAssembler::NonZero,
1206 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1207 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1210 branchPtr(condition, op1GPR, op2GPR, taken);
1214 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1216 BasicBlock* taken = branchNode->branchData()->taken.block;
1217 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1219 // The branch instruction will branch to the taken block.
1220 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1221 if (taken == nextBlock()) {
1222 condition = JITCompiler::invert(condition);
1223 BasicBlock* tmp = taken;
1228 if (node->child1()->isBooleanConstant()) {
1229 bool imm = node->child1()->asBoolean();
1230 SpeculateBooleanOperand op2(this, node->child2());
1231 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1232 } else if (node->child2()->isBooleanConstant()) {
1233 SpeculateBooleanOperand op1(this, node->child1());
1234 bool imm = node->child2()->asBoolean();
1235 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1237 SpeculateBooleanOperand op1(this, node->child1());
1238 SpeculateBooleanOperand op2(this, node->child2());
1239 branch32(condition, op1.gpr(), op2.gpr(), taken);
1245 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1247 BasicBlock* taken = branchNode->branchData()->taken.block;
1248 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1250 // The branch instruction will branch to the taken block.
1251 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1252 if (taken == nextBlock()) {
1253 condition = JITCompiler::invert(condition);
1254 BasicBlock* tmp = taken;
1259 if (node->child1()->isInt32Constant()) {
1260 int32_t imm = node->child1()->asInt32();
1261 SpeculateInt32Operand op2(this, node->child2());
1262 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1263 } else if (node->child2()->isInt32Constant()) {
1264 SpeculateInt32Operand op1(this, node->child1());
1265 int32_t imm = node->child2()->asInt32();
1266 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1268 SpeculateInt32Operand op1(this, node->child1());
1269 SpeculateInt32Operand op2(this, node->child2());
1270 branch32(condition, op1.gpr(), op2.gpr(), taken);
1276 // Returns true if the compare is fused with a subsequent branch.
1277 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1279 // Fused compare & branch.
1280 unsigned branchIndexInBlock = detectPeepHoleBranch();
1281 if (branchIndexInBlock != UINT_MAX) {
1282 Node* branchNode = m_block->at(branchIndexInBlock);
1284 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1285 // so can be no intervening nodes to also reference the compare.
1286 ASSERT(node->adjustedRefCount() == 1);
1288 if (node->isBinaryUseKind(Int32Use))
1289 compilePeepHoleInt32Branch(node, branchNode, condition);
1291 else if (node->isBinaryUseKind(Int52RepUse))
1292 compilePeepHoleInt52Branch(node, branchNode, condition);
1293 #endif // USE(JSVALUE64)
1294 else if (node->isBinaryUseKind(DoubleRepUse))
1295 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1296 else if (node->op() == CompareEq) {
1297 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1298 // Use non-peephole comparison, for now.
1301 if (node->isBinaryUseKind(BooleanUse))
1302 compilePeepHoleBooleanBranch(node, branchNode, condition);
1303 else if (node->isBinaryUseKind(ObjectUse))
1304 compilePeepHoleObjectEquality(node, branchNode);
1305 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1306 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1307 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1308 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1310 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1314 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1318 use(node->child1());
1319 use(node->child2());
1320 m_indexInBlock = branchIndexInBlock;
1321 m_currentNode = branchNode;
1327 void SpeculativeJIT::noticeOSRBirth(Node* node)
1329 if (!node->hasVirtualRegister())
1332 VirtualRegister virtualRegister = node->virtualRegister();
1333 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1335 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1338 void SpeculativeJIT::compileMovHint(Node* node)
1340 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1342 Node* child = node->child1().node();
1343 noticeOSRBirth(child);
1345 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1348 void SpeculativeJIT::bail(AbortReason reason)
1350 if (verboseCompilationEnabled())
1351 dataLog("Bailing compilation.\n");
1352 m_compileOkay = true;
1353 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1354 clearGenerationInfo();
1357 void SpeculativeJIT::compileCurrentBlock()
1359 ASSERT(m_compileOkay);
1364 ASSERT(m_block->isReachable);
1366 m_jit.blockHeads()[m_block->index] = m_jit.label();
1368 if (!m_block->intersectionOfCFAHasVisited) {
1369 // Don't generate code for basic blocks that are unreachable according to CFA.
1370 // But to be sure that nobody has generated a jump to this block, drop in a
1372 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1376 m_stream->appendAndLog(VariableEvent::reset());
1378 m_jit.jitAssertHasValidCallFrame();
1379 m_jit.jitAssertTagsInPlace();
1380 m_jit.jitAssertArgumentCountSane();
1383 m_state.beginBasicBlock(m_block);
1385 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1386 int operand = m_block->variablesAtHead.operandForIndex(i);
1387 Node* node = m_block->variablesAtHead[i];
1389 continue; // No need to record dead SetLocal's.
1391 VariableAccessData* variable = node->variableAccessData();
1393 if (!node->refCount())
1394 continue; // No need to record dead SetLocal's.
1395 format = dataFormatFor(variable->flushFormat());
1396 m_stream->appendAndLog(
1397 VariableEvent::setLocal(
1398 VirtualRegister(operand),
1399 variable->machineLocal(),
1403 m_codeOriginForExitTarget = CodeOrigin();
1404 m_codeOriginForExitProfile = CodeOrigin();
1406 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1407 m_currentNode = m_block->at(m_indexInBlock);
1409 // We may have hit a contradiction that the CFA was aware of but that the JIT
1410 // didn't cause directly.
1411 if (!m_state.isValid()) {
1412 bail(DFGBailedAtTopOfBlock);
1416 if (ASSERT_DISABLED)
1417 m_canExit = true; // Essentially disable the assertions.
1419 m_canExit = mayExit(m_jit.graph(), m_currentNode);
1421 m_interpreter.startExecuting();
1422 m_jit.setForNode(m_currentNode);
1423 m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1424 m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1425 m_lastGeneratedNode = m_currentNode->op();
1426 if (!m_currentNode->shouldGenerate()) {
1427 switch (m_currentNode->op()) {
1429 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1433 RELEASE_ASSERT_NOT_REACHED();
1437 compileMovHint(m_currentNode);
1441 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1446 if (belongsInMinifiedGraph(m_currentNode->op()))
1447 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1452 if (verboseCompilationEnabled()) {
1454 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1455 (int)m_currentNode->index(),
1456 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1460 compile(m_currentNode);
1462 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1463 m_jit.clearRegisterAllocationOffsets();
1466 if (!m_compileOkay) {
1467 bail(DFGBailedAtEndOfNode);
1471 if (belongsInMinifiedGraph(m_currentNode->op())) {
1472 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1473 noticeOSRBirth(m_currentNode);
1477 // Make sure that the abstract state is rematerialized for the next node.
1478 m_interpreter.executeEffects(m_indexInBlock);
1481 // Perform the most basic verification that children have been used correctly.
1482 if (!ASSERT_DISABLED) {
1483 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1484 GenerationInfo& info = m_generationInfo[index];
1485 RELEASE_ASSERT(!info.alive());
1490 // If we are making type predictions about our arguments then
1491 // we need to check that they are correct on function entry.
1492 void SpeculativeJIT::checkArgumentTypes()
1494 ASSERT(!m_currentNode);
1495 m_isCheckingArgumentTypes = true;
1496 m_codeOriginForExitTarget = CodeOrigin(0);
1497 m_codeOriginForExitProfile = CodeOrigin(0);
1499 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1500 Node* node = m_jit.graph().m_arguments[i];
1502 // The argument is dead. We don't do any checks for such arguments.
1506 ASSERT(node->op() == SetArgument);
1507 ASSERT(node->shouldGenerate());
1509 VariableAccessData* variableAccessData = node->variableAccessData();
1510 FlushFormat format = variableAccessData->flushFormat();
1512 if (format == FlushedJSValue)
1515 VirtualRegister virtualRegister = variableAccessData->local();
1517 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1521 case FlushedInt32: {
1522 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1525 case FlushedBoolean: {
1526 GPRTemporary temp(this);
1527 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1528 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1529 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1533 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1537 RELEASE_ASSERT_NOT_REACHED();
1542 case FlushedInt32: {
1543 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1546 case FlushedBoolean: {
1547 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1551 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1555 RELEASE_ASSERT_NOT_REACHED();
1560 m_isCheckingArgumentTypes = false;
1563 bool SpeculativeJIT::compile()
1565 checkArgumentTypes();
1567 ASSERT(!m_currentNode);
1568 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1569 m_jit.setForBlockIndex(blockIndex);
1570 m_block = m_jit.graph().block(blockIndex);
1571 compileCurrentBlock();
1577 void SpeculativeJIT::createOSREntries()
1579 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1580 BasicBlock* block = m_jit.graph().block(blockIndex);
1583 if (!block->isOSRTarget)
1586 // Currently we don't have OSR entry trampolines. We could add them
1588 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1592 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1594 unsigned osrEntryIndex = 0;
1595 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1596 BasicBlock* block = m_jit.graph().block(blockIndex);
1599 if (!block->isOSRTarget)
1601 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1603 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1606 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1608 Edge child3 = m_jit.graph().varArgChild(node, 2);
1609 Edge child4 = m_jit.graph().varArgChild(node, 3);
1611 ArrayMode arrayMode = node->arrayMode();
1613 GPRReg baseReg = base.gpr();
1614 GPRReg propertyReg = property.gpr();
1616 SpeculateDoubleOperand value(this, child3);
1618 FPRReg valueReg = value.fpr();
1621 JSValueRegs(), child3, SpecFullRealNumber,
1623 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1628 StorageOperand storage(this, child4);
1629 GPRReg storageReg = storage.gpr();
1631 if (node->op() == PutByValAlias) {
1632 // Store the value to the array.
1633 GPRReg propertyReg = property.gpr();
1634 FPRReg valueReg = value.fpr();
1635 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1637 noResult(m_currentNode);
1641 GPRTemporary temporary;
1642 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1644 MacroAssembler::Jump slowCase;
1646 if (arrayMode.isInBounds()) {
1648 OutOfBounds, JSValueRegs(), 0,
1649 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1651 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1653 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1655 if (!arrayMode.isOutOfBounds())
1656 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1658 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1659 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1661 inBounds.link(&m_jit);
1664 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1671 if (arrayMode.isOutOfBounds()) {
1672 addSlowPathGenerator(
1675 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1676 NoResult, baseReg, propertyReg, valueReg));
1679 noResult(m_currentNode, UseChildrenCalledExplicitly);
1682 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1684 SpeculateCellOperand string(this, node->child1());
1685 SpeculateStrictInt32Operand index(this, node->child2());
1686 StorageOperand storage(this, node->child3());
1688 GPRReg stringReg = string.gpr();
1689 GPRReg indexReg = index.gpr();
1690 GPRReg storageReg = storage.gpr();
1692 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1694 // unsigned comparison so we can filter out negative indices and indices that are too large
1695 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1697 GPRTemporary scratch(this);
1698 GPRReg scratchReg = scratch.gpr();
1700 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1702 // Load the character into scratchReg
1703 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1705 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1706 JITCompiler::Jump cont8Bit = m_jit.jump();
1708 is16Bit.link(&m_jit);
1710 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1712 cont8Bit.link(&m_jit);
1714 int32Result(scratchReg, m_currentNode);
1717 void SpeculativeJIT::compileGetByValOnString(Node* node)
1719 SpeculateCellOperand base(this, node->child1());
1720 SpeculateStrictInt32Operand property(this, node->child2());
1721 StorageOperand storage(this, node->child3());
1722 GPRReg baseReg = base.gpr();
1723 GPRReg propertyReg = property.gpr();
1724 GPRReg storageReg = storage.gpr();
1726 GPRTemporary scratch(this);
1727 GPRReg scratchReg = scratch.gpr();
1728 #if USE(JSVALUE32_64)
1729 GPRTemporary resultTag;
1730 GPRReg resultTagReg = InvalidGPRReg;
1731 if (node->arrayMode().isOutOfBounds()) {
1732 GPRTemporary realResultTag(this);
1733 resultTag.adopt(realResultTag);
1734 resultTagReg = resultTag.gpr();
1738 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1740 // unsigned comparison so we can filter out negative indices and indices that are too large
1741 JITCompiler::Jump outOfBounds = m_jit.branch32(
1742 MacroAssembler::AboveOrEqual, propertyReg,
1743 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1744 if (node->arrayMode().isInBounds())
1745 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1747 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1749 // Load the character into scratchReg
1750 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1752 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1753 JITCompiler::Jump cont8Bit = m_jit.jump();
1755 is16Bit.link(&m_jit);
1757 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1759 JITCompiler::Jump bigCharacter =
1760 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1762 // 8 bit string values don't need the isASCII check.
1763 cont8Bit.link(&m_jit);
1765 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1766 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1767 m_jit.loadPtr(scratchReg, scratchReg);
1769 addSlowPathGenerator(
1771 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1773 if (node->arrayMode().isOutOfBounds()) {
1774 #if USE(JSVALUE32_64)
1775 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1778 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1779 if (globalObject->stringPrototypeChainIsSane()) {
1781 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1782 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1784 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1785 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1786 baseReg, propertyReg));
1790 addSlowPathGenerator(
1792 outOfBounds, this, operationGetByValStringInt,
1793 scratchReg, baseReg, propertyReg));
1795 addSlowPathGenerator(
1797 outOfBounds, this, operationGetByValStringInt,
1798 resultTagReg, scratchReg, baseReg, propertyReg));
1803 jsValueResult(scratchReg, m_currentNode);
1805 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1808 cellResult(scratchReg, m_currentNode);
1811 void SpeculativeJIT::compileFromCharCode(Node* node)
1813 SpeculateStrictInt32Operand property(this, node->child1());
1814 GPRReg propertyReg = property.gpr();
1815 GPRTemporary smallStrings(this);
1816 GPRTemporary scratch(this);
1817 GPRReg scratchReg = scratch.gpr();
1818 GPRReg smallStringsReg = smallStrings.gpr();
1820 JITCompiler::JumpList slowCases;
1821 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1822 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1823 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1825 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1826 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1827 cellResult(scratchReg, m_currentNode);
1830 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1832 VirtualRegister virtualRegister = node->virtualRegister();
1833 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1835 switch (info.registerFormat()) {
1836 case DataFormatStorage:
1837 RELEASE_ASSERT_NOT_REACHED();
1839 case DataFormatBoolean:
1840 case DataFormatCell:
1841 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1842 return GeneratedOperandTypeUnknown;
1844 case DataFormatNone:
1845 case DataFormatJSCell:
1847 case DataFormatJSBoolean:
1848 case DataFormatJSDouble:
1849 return GeneratedOperandJSValue;
1851 case DataFormatJSInt32:
1852 case DataFormatInt32:
1853 return GeneratedOperandInteger;
1856 RELEASE_ASSERT_NOT_REACHED();
1857 return GeneratedOperandTypeUnknown;
1861 void SpeculativeJIT::compileValueToInt32(Node* node)
1863 switch (node->child1().useKind()) {
1866 SpeculateStrictInt52Operand op1(this, node->child1());
1867 GPRTemporary result(this, Reuse, op1);
1868 GPRReg op1GPR = op1.gpr();
1869 GPRReg resultGPR = result.gpr();
1870 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1871 int32Result(resultGPR, node, DataFormatInt32);
1874 #endif // USE(JSVALUE64)
1876 case DoubleRepUse: {
1877 GPRTemporary result(this);
1878 SpeculateDoubleOperand op1(this, node->child1());
1879 FPRReg fpr = op1.fpr();
1880 GPRReg gpr = result.gpr();
1881 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1883 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1885 int32Result(gpr, node);
1891 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1892 case GeneratedOperandInteger: {
1893 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1894 GPRTemporary result(this, Reuse, op1);
1895 m_jit.move(op1.gpr(), result.gpr());
1896 int32Result(result.gpr(), node, op1.format());
1899 case GeneratedOperandJSValue: {
1900 GPRTemporary result(this);
1902 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1904 GPRReg gpr = op1.gpr();
1905 GPRReg resultGpr = result.gpr();
1906 FPRTemporary tempFpr(this);
1907 FPRReg fpr = tempFpr.fpr();
1909 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1910 JITCompiler::JumpList converted;
1912 if (node->child1().useKind() == NumberUse) {
1914 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1916 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1918 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1921 JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1923 // It's not a cell: so true turns into 1 and all else turns into 0.
1924 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1925 converted.append(m_jit.jump());
1927 isNumber.link(&m_jit);
1930 // First, if we get here we have a double encoded as a JSValue
1931 m_jit.move(gpr, resultGpr);
1932 unboxDouble(resultGpr, fpr);
1934 silentSpillAllRegisters(resultGpr);
1935 callOperation(toInt32, resultGpr, fpr);
1936 silentFillAllRegisters(resultGpr);
1938 converted.append(m_jit.jump());
1940 isInteger.link(&m_jit);
1941 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1943 converted.link(&m_jit);
1945 Node* childNode = node->child1().node();
1946 VirtualRegister virtualRegister = childNode->virtualRegister();
1947 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1949 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1951 GPRReg payloadGPR = op1.payloadGPR();
1952 GPRReg resultGpr = result.gpr();
1954 JITCompiler::JumpList converted;
1956 if (info.registerFormat() == DataFormatJSInt32)
1957 m_jit.move(payloadGPR, resultGpr);
1959 GPRReg tagGPR = op1.tagGPR();
1960 FPRTemporary tempFpr(this);
1961 FPRReg fpr = tempFpr.fpr();
1962 FPRTemporary scratch(this);
1964 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1966 if (node->child1().useKind() == NumberUse) {
1968 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1970 MacroAssembler::AboveOrEqual, tagGPR,
1971 TrustedImm32(JSValue::LowestTag)));
1973 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1976 op1.jsValueRegs(), node->child1(), ~SpecCell,
1977 branchIsCell(op1.jsValueRegs()));
1979 // It's not a cell: so true turns into 1 and all else turns into 0.
1980 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1981 m_jit.move(TrustedImm32(0), resultGpr);
1982 converted.append(m_jit.jump());
1984 isBoolean.link(&m_jit);
1985 m_jit.move(payloadGPR, resultGpr);
1986 converted.append(m_jit.jump());
1988 isNumber.link(&m_jit);
1991 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1993 silentSpillAllRegisters(resultGpr);
1994 callOperation(toInt32, resultGpr, fpr);
1995 silentFillAllRegisters(resultGpr);
1997 converted.append(m_jit.jump());
1999 isInteger.link(&m_jit);
2000 m_jit.move(payloadGPR, resultGpr);
2002 converted.link(&m_jit);
2005 int32Result(resultGpr, node);
2008 case GeneratedOperandTypeUnknown:
2009 RELEASE_ASSERT(!m_compileOkay);
2012 RELEASE_ASSERT_NOT_REACHED();
2017 ASSERT(!m_compileOkay);
2022 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2024 if (doesOverflow(node->arithMode())) {
2025 // We know that this sometimes produces doubles. So produce a double every
2026 // time. This at least allows subsequent code to not have weird conditionals.
2028 SpeculateInt32Operand op1(this, node->child1());
2029 FPRTemporary result(this);
2031 GPRReg inputGPR = op1.gpr();
2032 FPRReg outputFPR = result.fpr();
2034 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2036 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2037 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2038 positive.link(&m_jit);
2040 doubleResult(outputFPR, node);
2044 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2046 SpeculateInt32Operand op1(this, node->child1());
2047 GPRTemporary result(this);
2049 m_jit.move(op1.gpr(), result.gpr());
2051 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2053 int32Result(result.gpr(), node, op1.format());
2056 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2058 SpeculateDoubleOperand op1(this, node->child1());
2059 FPRTemporary scratch(this);
2060 GPRTemporary result(this);
2062 FPRReg valueFPR = op1.fpr();
2063 FPRReg scratchFPR = scratch.fpr();
2064 GPRReg resultGPR = result.gpr();
2066 JITCompiler::JumpList failureCases;
2067 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2068 m_jit.branchConvertDoubleToInt32(
2069 valueFPR, resultGPR, failureCases, scratchFPR,
2070 shouldCheckNegativeZero(node->arithMode()));
2071 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2073 int32Result(resultGPR, node);
2076 void SpeculativeJIT::compileDoubleRep(Node* node)
2078 switch (node->child1().useKind()) {
2080 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2082 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2083 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2084 FPRTemporary result(this);
2085 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2086 doubleResult(result.fpr(), node);
2090 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2091 FPRTemporary result(this);
2094 GPRTemporary temp(this);
2096 GPRReg op1GPR = op1.gpr();
2097 GPRReg tempGPR = temp.gpr();
2098 FPRReg resultFPR = result.fpr();
2100 JITCompiler::Jump isInteger = m_jit.branch64(
2101 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2103 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2105 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2106 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2109 m_jit.move(op1GPR, tempGPR);
2110 unboxDouble(tempGPR, resultFPR);
2111 JITCompiler::Jump done = m_jit.jump();
2113 isInteger.link(&m_jit);
2114 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2116 #else // USE(JSVALUE64) -> this is the 32_64 case
2117 FPRTemporary temp(this);
2119 GPRReg op1TagGPR = op1.tagGPR();
2120 GPRReg op1PayloadGPR = op1.payloadGPR();
2121 FPRReg tempFPR = temp.fpr();
2122 FPRReg resultFPR = result.fpr();
2124 JITCompiler::Jump isInteger = m_jit.branch32(
2125 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2127 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2129 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2130 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2133 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2134 JITCompiler::Jump done = m_jit.jump();
2136 isInteger.link(&m_jit);
2137 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2139 #endif // USE(JSVALUE64)
2141 doubleResult(resultFPR, node);
2147 SpeculateStrictInt52Operand value(this, node->child1());
2148 FPRTemporary result(this);
2150 GPRReg valueGPR = value.gpr();
2151 FPRReg resultFPR = result.fpr();
2153 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2155 doubleResult(resultFPR, node);
2158 #endif // USE(JSVALUE64)
2161 RELEASE_ASSERT_NOT_REACHED();
2166 void SpeculativeJIT::compileValueRep(Node* node)
2168 switch (node->child1().useKind()) {
2169 case DoubleRepUse: {
2170 SpeculateDoubleOperand value(this, node->child1());
2171 JSValueRegsTemporary result(this);
2173 FPRReg valueFPR = value.fpr();
2174 JSValueRegs resultRegs = result.regs();
2176 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2177 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2178 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2179 // local was purified.
2180 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2181 m_jit.purifyNaN(valueFPR);
2183 boxDouble(valueFPR, resultRegs);
2185 jsValueResult(resultRegs, node);
2191 SpeculateStrictInt52Operand value(this, node->child1());
2192 GPRTemporary result(this);
2194 GPRReg valueGPR = value.gpr();
2195 GPRReg resultGPR = result.gpr();
2197 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2199 jsValueResult(resultGPR, node);
2202 #endif // USE(JSVALUE64)
2205 RELEASE_ASSERT_NOT_REACHED();
2210 static double clampDoubleToByte(double d)
2220 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2222 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2223 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2224 jit.xorPtr(result, result);
2225 MacroAssembler::Jump clamped = jit.jump();
2227 jit.move(JITCompiler::TrustedImm32(255), result);
2229 inBounds.link(&jit);
2232 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2234 // Unordered compare so we pick up NaN
2235 static const double zero = 0;
2236 static const double byteMax = 255;
2237 static const double half = 0.5;
2238 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2239 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2240 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2241 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2243 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2244 // FIXME: This should probably just use a floating point round!
2245 // https://bugs.webkit.org/show_bug.cgi?id=72054
2246 jit.addDouble(source, scratch);
2247 jit.truncateDoubleToInt32(scratch, result);
2248 MacroAssembler::Jump truncatedInt = jit.jump();
2250 tooSmall.link(&jit);
2251 jit.xorPtr(result, result);
2252 MacroAssembler::Jump zeroed = jit.jump();
2255 jit.move(JITCompiler::TrustedImm32(255), result);
2257 truncatedInt.link(&jit);
2262 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2264 if (node->op() == PutByValAlias)
2265 return JITCompiler::Jump();
2266 if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2267 uint32_t length = view->length();
2268 Node* indexNode = m_jit.graph().child(node, 1).node();
2269 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2270 return JITCompiler::Jump();
2271 return m_jit.branch32(
2272 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2274 return m_jit.branch32(
2275 MacroAssembler::AboveOrEqual, indexGPR,
2276 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2279 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2281 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2284 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2287 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2289 ASSERT(isInt(type));
2291 SpeculateCellOperand base(this, node->child1());
2292 SpeculateStrictInt32Operand property(this, node->child2());
2293 StorageOperand storage(this, node->child3());
2295 GPRReg baseReg = base.gpr();
2296 GPRReg propertyReg = property.gpr();
2297 GPRReg storageReg = storage.gpr();
2299 GPRTemporary result(this);
2300 GPRReg resultReg = result.gpr();
2302 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2304 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2305 switch (elementSize(type)) {
2308 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2310 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2314 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2316 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2319 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2324 if (elementSize(type) < 4 || isSigned(type)) {
2325 int32Result(resultReg, node);
2329 ASSERT(elementSize(type) == 4 && !isSigned(type));
2330 if (node->shouldSpeculateInt32()) {
2331 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2332 int32Result(resultReg, node);
2337 if (node->shouldSpeculateMachineInt()) {
2338 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2339 strictInt52Result(resultReg, node);
2344 FPRTemporary fresult(this);
2345 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2346 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2347 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2348 positive.link(&m_jit);
2349 doubleResult(fresult.fpr(), node);
2352 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2354 ASSERT(isInt(type));
2356 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2357 GPRReg storageReg = storage.gpr();
2359 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2362 GPRReg valueGPR = InvalidGPRReg;
2364 if (valueUse->isConstant()) {
2365 JSValue jsValue = valueUse->asJSValue();
2366 if (!jsValue.isNumber()) {
2367 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2371 double d = jsValue.asNumber();
2372 if (isClamped(type)) {
2373 ASSERT(elementSize(type) == 1);
2374 d = clampDoubleToByte(d);
2376 GPRTemporary scratch(this);
2377 GPRReg scratchReg = scratch.gpr();
2378 m_jit.move(Imm32(toInt32(d)), scratchReg);
2379 value.adopt(scratch);
2380 valueGPR = scratchReg;
2382 switch (valueUse.useKind()) {
2384 SpeculateInt32Operand valueOp(this, valueUse);
2385 GPRTemporary scratch(this);
2386 GPRReg scratchReg = scratch.gpr();
2387 m_jit.move(valueOp.gpr(), scratchReg);
2388 if (isClamped(type)) {
2389 ASSERT(elementSize(type) == 1);
2390 compileClampIntegerToByte(m_jit, scratchReg);
2392 value.adopt(scratch);
2393 valueGPR = scratchReg;
2399 SpeculateStrictInt52Operand valueOp(this, valueUse);
2400 GPRTemporary scratch(this);
2401 GPRReg scratchReg = scratch.gpr();
2402 m_jit.move(valueOp.gpr(), scratchReg);
2403 if (isClamped(type)) {
2404 ASSERT(elementSize(type) == 1);
2405 MacroAssembler::Jump inBounds = m_jit.branch64(
2406 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2407 MacroAssembler::Jump tooBig = m_jit.branch64(
2408 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2409 m_jit.move(TrustedImm32(0), scratchReg);
2410 MacroAssembler::Jump clamped = m_jit.jump();
2411 tooBig.link(&m_jit);
2412 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2413 clamped.link(&m_jit);
2414 inBounds.link(&m_jit);
2416 value.adopt(scratch);
2417 valueGPR = scratchReg;
2420 #endif // USE(JSVALUE64)
2422 case DoubleRepUse: {
2423 if (isClamped(type)) {
2424 ASSERT(elementSize(type) == 1);
2425 SpeculateDoubleOperand valueOp(this, valueUse);
2426 GPRTemporary result(this);
2427 FPRTemporary floatScratch(this);
2428 FPRReg fpr = valueOp.fpr();
2429 GPRReg gpr = result.gpr();
2430 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2431 value.adopt(result);
2434 SpeculateDoubleOperand valueOp(this, valueUse);
2435 GPRTemporary result(this);
2436 FPRReg fpr = valueOp.fpr();
2437 GPRReg gpr = result.gpr();
2438 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2439 m_jit.xorPtr(gpr, gpr);
2440 MacroAssembler::Jump fixed = m_jit.jump();
2441 notNaN.link(&m_jit);
2443 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2444 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2446 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2449 value.adopt(result);
2456 RELEASE_ASSERT_NOT_REACHED();
2461 ASSERT_UNUSED(valueGPR, valueGPR != property);
2462 ASSERT(valueGPR != base);
2463 ASSERT(valueGPR != storageReg);
2464 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2465 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2466 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2467 outOfBounds = MacroAssembler::Jump();
2470 switch (elementSize(type)) {
2472 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2475 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2478 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2483 if (outOfBounds.isSet())
2484 outOfBounds.link(&m_jit);
2488 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2490 ASSERT(isFloat(type));
2492 SpeculateCellOperand base(this, node->child1());
2493 SpeculateStrictInt32Operand property(this, node->child2());
2494 StorageOperand storage(this, node->child3());
2496 GPRReg baseReg = base.gpr();
2497 GPRReg propertyReg = property.gpr();
2498 GPRReg storageReg = storage.gpr();
2500 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2502 FPRTemporary result(this);
2503 FPRReg resultReg = result.fpr();
2504 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2505 switch (elementSize(type)) {
2507 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2508 m_jit.convertFloatToDouble(resultReg, resultReg);
2511 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2515 RELEASE_ASSERT_NOT_REACHED();
2518 doubleResult(resultReg, node);
2521 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2523 ASSERT(isFloat(type));
2525 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2526 GPRReg storageReg = storage.gpr();
2528 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2529 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2531 SpeculateDoubleOperand valueOp(this, valueUse);
2532 FPRTemporary scratch(this);
2533 FPRReg valueFPR = valueOp.fpr();
2534 FPRReg scratchFPR = scratch.fpr();
2536 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2538 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2539 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2540 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2541 outOfBounds = MacroAssembler::Jump();
2544 switch (elementSize(type)) {
2546 m_jit.moveDouble(valueFPR, scratchFPR);
2547 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2548 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2552 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2555 RELEASE_ASSERT_NOT_REACHED();
2557 if (outOfBounds.isSet())
2558 outOfBounds.link(&m_jit);
2562 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2564 // Check that prototype is an object.
2565 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2567 // Initialize scratchReg with the value being checked.
2568 m_jit.move(valueReg, scratchReg);
2570 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2571 MacroAssembler::Label loop(&m_jit);
2572 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2573 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2574 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2576 branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2578 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2581 // No match - result is false.
2583 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2585 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2587 MacroAssembler::Jump putResult = m_jit.jump();
2589 isInstance.link(&m_jit);
2591 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2593 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2596 putResult.link(&m_jit);
2599 void SpeculativeJIT::compileInstanceOf(Node* node)
2601 if (node->child1().useKind() == UntypedUse) {
2602 // It might not be a cell. Speculate less aggressively.
2603 // Or: it might only be used once (i.e. by us), so we get zero benefit
2604 // from speculating any more aggressively than we absolutely need to.
2606 JSValueOperand value(this, node->child1());
2607 SpeculateCellOperand prototype(this, node->child2());
2608 GPRTemporary scratch(this);
2609 GPRTemporary scratch2(this);
2611 GPRReg prototypeReg = prototype.gpr();
2612 GPRReg scratchReg = scratch.gpr();
2613 GPRReg scratch2Reg = scratch2.gpr();
2615 MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2616 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2617 moveFalseTo(scratchReg);
2619 MacroAssembler::Jump done = m_jit.jump();
2621 isCell.link(&m_jit);
2623 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2627 blessedBooleanResult(scratchReg, node);
2631 SpeculateCellOperand value(this, node->child1());
2632 SpeculateCellOperand prototype(this, node->child2());
2634 GPRTemporary scratch(this);
2635 GPRTemporary scratch2(this);
2637 GPRReg valueReg = value.gpr();
2638 GPRReg prototypeReg = prototype.gpr();
2639 GPRReg scratchReg = scratch.gpr();
2640 GPRReg scratch2Reg = scratch2.gpr();
2642 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2644 blessedBooleanResult(scratchReg, node);
2647 void SpeculativeJIT::compileAdd(Node* node)
2649 switch (node->binaryUseKind()) {
2651 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2653 if (node->child1()->isInt32Constant()) {
2654 int32_t imm1 = node->child1()->asInt32();
2655 SpeculateInt32Operand op2(this, node->child2());
2656 GPRTemporary result(this);
2658 if (!shouldCheckOverflow(node->arithMode())) {
2659 m_jit.move(op2.gpr(), result.gpr());
2660 m_jit.add32(Imm32(imm1), result.gpr());
2662 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2664 int32Result(result.gpr(), node);
2668 if (node->child2()->isInt32Constant()) {
2669 SpeculateInt32Operand op1(this, node->child1());
2670 int32_t imm2 = node->child2()->asInt32();
2671 GPRTemporary result(this);
2673 if (!shouldCheckOverflow(node->arithMode())) {
2674 m_jit.move(op1.gpr(), result.gpr());
2675 m_jit.add32(Imm32(imm2), result.gpr());
2677 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2679 int32Result(result.gpr(), node);
2683 SpeculateInt32Operand op1(this, node->child1());
2684 SpeculateInt32Operand op2(this, node->child2());
2685 GPRTemporary result(this, Reuse, op1, op2);
2687 GPRReg gpr1 = op1.gpr();
2688 GPRReg gpr2 = op2.gpr();
2689 GPRReg gprResult = result.gpr();
2691 if (!shouldCheckOverflow(node->arithMode())) {
2692 if (gpr1 == gprResult)
2693 m_jit.add32(gpr2, gprResult);
2695 m_jit.move(gpr2, gprResult);
2696 m_jit.add32(gpr1, gprResult);
2699 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2701 if (gpr1 == gprResult)
2702 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2703 else if (gpr2 == gprResult)
2704 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2706 speculationCheck(Overflow, JSValueRegs(), 0, check);
2709 int32Result(gprResult, node);
2715 ASSERT(shouldCheckOverflow(node->arithMode()));
2716 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2718 // Will we need an overflow check? If we can prove that neither input can be
2719 // Int52 then the overflow check will not be necessary.
2720 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2721 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2722 SpeculateWhicheverInt52Operand op1(this, node->child1());
2723 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2724 GPRTemporary result(this, Reuse, op1);
2725 m_jit.move(op1.gpr(), result.gpr());
2726 m_jit.add64(op2.gpr(), result.gpr());
2727 int52Result(result.gpr(), node, op1.format());
2731 SpeculateInt52Operand op1(this, node->child1());
2732 SpeculateInt52Operand op2(this, node->child2());
2733 GPRTemporary result(this);
2734 m_jit.move(op1.gpr(), result.gpr());
2736 Int52Overflow, JSValueRegs(), 0,
2737 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2738 int52Result(result.gpr(), node);
2741 #endif // USE(JSVALUE64)
2743 case DoubleRepUse: {
2744 SpeculateDoubleOperand op1(this, node->child1());
2745 SpeculateDoubleOperand op2(this, node->child2());
2746 FPRTemporary result(this, op1, op2);
2748 FPRReg reg1 = op1.fpr();
2749 FPRReg reg2 = op2.fpr();
2750 m_jit.addDouble(reg1, reg2, result.fpr());
2752 doubleResult(result.fpr(), node);
2757 RELEASE_ASSERT_NOT_REACHED();
2762 void SpeculativeJIT::compileMakeRope(Node* node)
2764 ASSERT(node->child1().useKind() == KnownStringUse);
2765 ASSERT(node->child2().useKind() == KnownStringUse);
2766 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2768 SpeculateCellOperand op1(this, node->child1());
2769 SpeculateCellOperand op2(this, node->child2());
2770 SpeculateCellOperand op3(this, node->child3());
2771 GPRTemporary result(this);
2772 GPRTemporary allocator(this);
2773 GPRTemporary scratch(this);
2777 opGPRs[0] = op1.gpr();
2778 opGPRs[1] = op2.gpr();
2779 if (node->child3()) {
2780 opGPRs[2] = op3.gpr();
2783 opGPRs[2] = InvalidGPRReg;
2786 GPRReg resultGPR = result.gpr();
2787 GPRReg allocatorGPR = allocator.gpr();
2788 GPRReg scratchGPR = scratch.gpr();
2790 JITCompiler::JumpList slowPath;
2791 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2792 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2793 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2795 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2796 for (unsigned i = 0; i < numOpGPRs; ++i)
2797 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2798 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2799 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2800 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2801 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2802 if (!ASSERT_DISABLED) {
2803 JITCompiler::Jump ok = m_jit.branch32(
2804 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2805 m_jit.abortWithReason(DFGNegativeStringLength);
2808 for (unsigned i = 1; i < numOpGPRs; ++i) {
2809 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2811 Uncountable, JSValueSource(), nullptr,
2813 JITCompiler::Overflow,
2814 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2816 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2817 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2818 if (!ASSERT_DISABLED) {
2819 JITCompiler::Jump ok = m_jit.branch32(
2820 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2821 m_jit.abortWithReason(DFGNegativeStringLength);
2824 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2826 switch (numOpGPRs) {
2828 addSlowPathGenerator(slowPathCall(
2829 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2832 addSlowPathGenerator(slowPathCall(
2833 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2836 RELEASE_ASSERT_NOT_REACHED();
2840 cellResult(resultGPR, node);
2843 void SpeculativeJIT::compileArithSub(Node* node)
2845 switch (node->binaryUseKind()) {
2847 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2849 if (node->child2()->isNumberConstant()) {
2850 SpeculateInt32Operand op1(this, node->child1());
2851 int32_t imm2 = node->child2()->asInt32();
2852 GPRTemporary result(this);
2854 if (!shouldCheckOverflow(node->arithMode())) {
2855 m_jit.move(op1.gpr(), result.gpr());
2856 m_jit.sub32(Imm32(imm2), result.gpr());
2858 GPRTemporary scratch(this);
2859 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2862 int32Result(result.gpr(), node);
2866 if (node->child1()->isNumberConstant()) {
2867 int32_t imm1 = node->child1()->asInt32();
2868 SpeculateInt32Operand op2(this, node->child2());
2869 GPRTemporary result(this);
2871 m_jit.move(Imm32(imm1), result.gpr());
2872 if (!shouldCheckOverflow(node->arithMode()))
2873 m_jit.sub32(op2.gpr(), result.gpr());
2875 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2877 int32Result(result.gpr(), node);
2881 SpeculateInt32Operand op1(this, node->child1());
2882 SpeculateInt32Operand op2(this, node->child2());
2883 GPRTemporary result(this);
2885 if (!shouldCheckOverflow(node->arithMode())) {
2886 m_jit.move(op1.gpr(), result.gpr());
2887 m_jit.sub32(op2.gpr(), result.gpr());
2889 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2891 int32Result(result.gpr(), node);
2897 ASSERT(shouldCheckOverflow(node->arithMode()));
2898 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2900 // Will we need an overflow check? If we can prove that neither input can be
2901 // Int52 then the overflow check will not be necessary.
2902 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2903 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2904 SpeculateWhicheverInt52Operand op1(this, node->child1());
2905 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2906 GPRTemporary result(this, Reuse, op1);
2907 m_jit.move(op1.gpr(), result.gpr());
2908 m_jit.sub64(op2.gpr(), result.gpr());
2909 int52Result(result.gpr(), node, op1.format());
2913 SpeculateInt52Operand op1(this, node->child1());
2914 SpeculateInt52Operand op2(this, node->child2());
2915 GPRTemporary result(this);
2916 m_jit.move(op1.gpr(), result.gpr());
2918 Int52Overflow, JSValueRegs(), 0,
2919 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2920 int52Result(result.gpr(), node);
2923 #endif // USE(JSVALUE64)
2925 case DoubleRepUse: {
2926 SpeculateDoubleOperand op1(this, node->child1());
2927 SpeculateDoubleOperand op2(this, node->child2());
2928 FPRTemporary result(this, op1);
2930 FPRReg reg1 = op1.fpr();
2931 FPRReg reg2 = op2.fpr();
2932 m_jit.subDouble(reg1, reg2, result.fpr());
2934 doubleResult(result.fpr(), node);
2939 RELEASE_ASSERT_NOT_REACHED();
2944 void SpeculativeJIT::compileArithNegate(Node* node)
2946 switch (node->child1().useKind()) {
2948 SpeculateInt32Operand op1(this, node->child1());
2949 GPRTemporary result(this);
2951 m_jit.move(op1.gpr(), result.gpr());
2953 // Note: there is no notion of being not used as a number, but someone
2954 // caring about negative zero.
2956 if (!shouldCheckOverflow(node->arithMode()))
2957 m_jit.neg32(result.gpr());
2958 else if (!shouldCheckNegativeZero(node->arithMode()))
2959 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2961 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2962 m_jit.neg32(result.gpr());
2965 int32Result(result.gpr(), node);
2971 ASSERT(shouldCheckOverflow(node->arithMode()));
2973 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2974 SpeculateWhicheverInt52Operand op1(this, node->child1());
2975 GPRTemporary result(this);
2976 GPRReg op1GPR = op1.gpr();
2977 GPRReg resultGPR = result.gpr();
2978 m_jit.move(op1GPR, resultGPR);
2979 m_jit.neg64(resultGPR);
2980 if (shouldCheckNegativeZero(node->arithMode())) {
2982 NegativeZero, JSValueRegs(), 0,
2983 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2985 int52Result(resultGPR, node, op1.format());
2989 SpeculateInt52Operand op1(this, node->child1());
2990 GPRTemporary result(this);
2991 GPRReg op1GPR = op1.gpr();
2992 GPRReg resultGPR = result.gpr();
2993 m_jit.move(op1GPR, resultGPR);
2995 Int52Overflow, JSValueRegs(), 0,
2996 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2997 if (shouldCheckNegativeZero(node->arithMode())) {
2999 NegativeZero, JSValueRegs(), 0,
3000 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3002 int52Result(resultGPR, node);
3005 #endif // USE(JSVALUE64)
3007 case DoubleRepUse: {
3008 SpeculateDoubleOperand op1(this, node->child1());
3009 FPRTemporary result(this);
3011 m_jit.negateDouble(op1.fpr(), result.fpr());
3013 doubleResult(result.fpr(), node);
3018 RELEASE_ASSERT_NOT_REACHED();
3022 void SpeculativeJIT::compileArithMul(Node* node)
3024 switch (node->binaryUseKind()) {
3026 SpeculateInt32Operand op1(this, node->child1());
3027 SpeculateInt32Operand op2(this, node->child2());
3028 GPRTemporary result(this);
3030 GPRReg reg1 = op1.gpr();
3031 GPRReg reg2 = op2.gpr();
3033 // We can perform truncated multiplications if we get to this point, because if the
3034 // fixup phase could not prove that it would be safe, it would have turned us into
3035 // a double multiplication.
3036 if (!shouldCheckOverflow(node->arithMode())) {
3037 m_jit.move(reg1, result.gpr());
3038 m_jit.mul32(reg2, result.gpr());
3041 Overflow, JSValueRegs(), 0,
3042 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3045 // Check for negative zero, if the users of this node care about such things.
3046 if (shouldCheckNegativeZero(node->arithMode())) {
3047 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3048 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3049 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3050 resultNonZero.link(&m_jit);
3053 int32Result(result.gpr(), node);
3059 ASSERT(shouldCheckOverflow(node->arithMode()));
3061 // This is super clever. We want to do an int52 multiplication and check the
3062 // int52 overflow bit. There is no direct hardware support for this, but we do
3063 // have the ability to do an int64 multiplication and check the int64 overflow
3064 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3065 // registers, with the high 12 bits being sign-extended. We can do:
3069 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3070 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3071 // multiplication overflows is identical to whether the 'a * b' 52-bit
3072 // multiplication overflows.
3074 // In our nomenclature, this is:
3076 // strictInt52(a) * int52(b) => int52
3078 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3081 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3082 // we just do whatever is more convenient for op1 and have op2 do the
3083 // opposite. This ensures that we do at most one shift.
3085 SpeculateWhicheverInt52Operand op1(this, node->child1());
3086 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3087 GPRTemporary result(this);
3089 GPRReg op1GPR = op1.gpr();
3090 GPRReg op2GPR = op2.gpr();
3091 GPRReg resultGPR = result.gpr();
3093 m_jit.move(op1GPR, resultGPR);
3095 Int52Overflow, JSValueRegs(), 0,
3096 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3098 if (shouldCheckNegativeZero(node->arithMode())) {
3099 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3100 MacroAssembler::NonZero, resultGPR);
3102 NegativeZero, JSValueRegs(), 0,
3103 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3105 NegativeZero, JSValueRegs(), 0,
3106 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3107 resultNonZero.link(&m_jit);
3110 int52Result(resultGPR, node);
3113 #endif // USE(JSVALUE64)
3115 case DoubleRepUse: {
3116 SpeculateDoubleOperand op1(this, node->child1());
3117 SpeculateDoubleOperand op2(this, node->child2());
3118 FPRTemporary result(this, op1, op2);
3120 FPRReg reg1 = op1.fpr();
3121 FPRReg reg2 = op2.fpr();
3123 m_jit.mulDouble(reg1, reg2, result.fpr());
3125 doubleResult(result.fpr(), node);
3130 RELEASE_ASSERT_NOT_REACHED();
3135 void SpeculativeJIT::compileArithDiv(Node* node)
3137 switch (node->binaryUseKind()) {
3139 #if CPU(X86) || CPU(X86_64)
3140 SpeculateInt32Operand op1(this, node->child1());
3141 SpeculateInt32Operand op2(this, node->child2());
3142 GPRTemporary eax(this, X86Registers::eax);
3143 GPRTemporary edx(this, X86Registers::edx);
3144 GPRReg op1GPR = op1.gpr();
3145 GPRReg op2GPR = op2.gpr();
3149 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3150 op2TempGPR = allocate();
3153 op2TempGPR = InvalidGPRReg;
3154 if (op1GPR == X86Registers::eax)
3155 temp = X86Registers::edx;
3157 temp = X86Registers::eax;
3160 ASSERT(temp != op1GPR);
3161 ASSERT(temp != op2GPR);
3163 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3165 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3167 JITCompiler::JumpList done;
3168 if (shouldCheckOverflow(node->arithMode())) {
3169 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3170 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3172 // This is the case where we convert the result to an int after we're done, and we
3173 // already know that the denominator is either -1 or 0. So, if the denominator is
3174 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3175 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3176 // are happy to fall through to a normal division, since we're just dividing
3177 // something by negative 1.
3179 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3180 m_jit.move(TrustedImm32(0), eax.gpr());
3181 done.append(m_jit.jump());
3183 notZero.link(&m_jit);
3184 JITCompiler::Jump notNeg2ToThe31 =
3185 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3186 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3187 done.append(m_jit.jump());
3189 notNeg2ToThe31.link(&m_jit);
3192 safeDenominator.link(&m_jit);
3194 // If the user cares about negative zero, then speculate that we're not about
3195 // to produce negative zero.
3196 if (shouldCheckNegativeZero(node->arithMode())) {
3197 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3198 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3199 numeratorNonZero.link(&m_jit);
3202 if (op2TempGPR != InvalidGPRReg) {
3203 m_jit.move(op2GPR, op2TempGPR);
3204 op2GPR = op2TempGPR;
3207 m_jit.move(op1GPR, eax.gpr());
3208 m_jit.assembler().cdq();
3209 m_jit.assembler().idivl_r(op2GPR);
3211 if (op2TempGPR != InvalidGPRReg)
3214 // Check that there was no remainder. If there had been, then we'd be obligated to
3215 // produce a double result instead.
3216 if (shouldCheckOverflow(node->arithMode()))
3217 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3220 int32Result(eax.gpr(), node);
3221 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3222 SpeculateInt32Operand op1(this, node->child1());
3223 SpeculateInt32Operand op2(this, node->child2());
3224 GPRReg op1GPR = op1.gpr();
3225 GPRReg op2GPR = op2.gpr();
3226 GPRTemporary quotient(this);
3227 GPRTemporary multiplyAnswer(this);
3229 // If the user cares about negative zero, then speculate that we're not about
3230 // to produce negative zero.
3231 if (shouldCheckNegativeZero(node->arithMode())) {
3232 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3233 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3234 numeratorNonZero.link(&m_jit);
3237 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3239 // Check that there was no remainder. If there had been, then we'd be obligated to
3240 // produce a double result instead.
3241 if (shouldCheckOverflow(node->arithMode())) {
3242 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3243 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3246 int32Result(quotient.gpr(), node);
3248 RELEASE_ASSERT_NOT_REACHED();
3253 case DoubleRepUse: {
3254 SpeculateDoubleOperand op1(this, node->child1());
3255 SpeculateDoubleOperand op2(this, node->child2());
3256 FPRTemporary result(this, op1);
3258 FPRReg reg1 = op1.fpr();
3259 FPRReg reg2 = op2.fpr();
3260 m_jit.divDouble(reg1, reg2, result.fpr());
3262 doubleResult(result.fpr(), node);
3267 RELEASE_ASSERT_NOT_REACHED();
3272 void SpeculativeJIT::compileArithMod(Node* node)
3274 switch (node->binaryUseKind()) {
3276 // In the fast path, the dividend value could be the final result
3277 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3278 SpeculateStrictInt32Operand op1(this, node->child1());
3280 if (node->child2()->isInt32Constant()) {
3281 int32_t divisor = node->child2()->asInt32();
3282 if (divisor > 1 && hasOneBitSet(divisor)) {
3283 unsigned logarithm = WTF::fastLog2(divisor);
3284 GPRReg dividendGPR = op1.gpr();
3285 GPRTemporary result(this);
3286 GPRReg resultGPR = result.gpr();
3288 // This is what LLVM generates. It's pretty crazy. Here's my
3289 // attempt at understanding it.
3291 // First, compute either divisor - 1, or 0, depending on whether
3292 // the dividend is negative:
3294 // If dividend < 0: resultGPR = divisor - 1
3295 // If dividend >= 0: resultGPR = 0
3296 m_jit.move(dividendGPR, resultGPR);
3297 m_jit.rshift32(TrustedImm32(31), resultGPR);
3298 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3300 // Add in the dividend, so that:
3302 // If dividend < 0: resultGPR = dividend + divisor - 1
3303 // If dividend >= 0: resultGPR = dividend
3304 m_jit.add32(dividendGPR, resultGPR);
3306 // Mask so as to only get the *high* bits. This rounds down
3307 // (towards negative infinity) resultGPR to the nearest multiple
3308 // of divisor, so that:
3310 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3311 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3313 // Note that this can be simplified to:
3315 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3316 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3318 // Note that if the dividend is negative, resultGPR will also be negative.
3319 // Regardless of the sign of dividend, resultGPR will be rounded towards
3320 // zero, because of how things are conditionalized.
3321 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3323 // Subtract resultGPR from dividendGPR, which yields the remainder:
3325 // resultGPR = dividendGPR - resultGPR
3326 m_jit.neg32(resultGPR);
3327 m_jit.add32(dividendGPR, resultGPR);
3329 if (shouldCheckNegativeZero(node->arithMode())) {
3330 // Check that we're not about to create negative zero.
3331 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3332 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3333 numeratorPositive.link(&m_jit);
3336 int32Result(resultGPR, node);
3341 #if CPU(X86) || CPU(X86_64)
3342 if (node->child2()->isInt32Constant()) {
3343 int32_t divisor = node->child2()->asInt32();
3344 if (divisor && divisor != -1) {
3345 GPRReg op1Gpr = op1.gpr();
3347 GPRTemporary eax(this, X86Registers::eax);
3348 GPRTemporary edx(this, X86Registers::edx);
3349 GPRTemporary scratch(this);
3350 GPRReg scratchGPR = scratch.gpr();
3353 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3354 op1SaveGPR = allocate();
3355 ASSERT(op1Gpr != op1SaveGPR);
3356 m_jit.move(op1Gpr, op1SaveGPR);
3358 op1SaveGPR = op1Gpr;
3359 ASSERT(op1SaveGPR != X86Registers::eax);
3360 ASSERT(op1SaveGPR != X86Registers::edx);
3362 m_jit.move(op1Gpr, eax.gpr());
3363 m_jit.move(TrustedImm32(divisor), scratchGPR);
3364 m_jit.assembler().cdq();
3365 m_jit.assembler().idivl_r(scratchGPR);
3366 if (shouldCheckNegativeZero(node->arithMode())) {
3367 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3368 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3369 numeratorPositive.link(&m_jit);
3372 if (op1SaveGPR != op1Gpr)
3375 int32Result(edx.gpr(), node);
3381 SpeculateInt32Operand op2(this, node->child2());
3382 #if CPU(X86) || CPU(X86_64)
3383 GPRTemporary eax(this, X86Registers::eax);
3384 GPRTemporary edx(this, X86Registers::edx);
3385 GPRReg op1GPR = op1.gpr();
3386 GPRReg op2GPR = op2.gpr();
3392 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3393 op2TempGPR = allocate();
3396 op2TempGPR = InvalidGPRReg;
3397 if (op1GPR == X86Registers::eax)
3398 temp = X86Registers::edx;
3400 temp = X86Registers::eax;
3403 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3404 op1SaveGPR = allocate();
3405 ASSERT(op1GPR != op1SaveGPR);
3406 m_jit.move(op1GPR, op1SaveGPR);
3408 op1SaveGPR = op1GPR;
3410 ASSERT(temp != op1GPR);
3411 ASSERT(temp != op2GPR);
3412 ASSERT(op1SaveGPR != X86Registers::eax);
3413 ASSERT(op1SaveGPR != X86Registers::edx);
3415 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3417 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3419 JITCompiler::JumpList done;
3421 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3422 // separate case for that. But it probably doesn't matter so much.
3423 if (shouldCheckOverflow(node->arithMode())) {
3424 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3425 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3427 // This is the case where we convert the result to an int after we're done, and we
3428 // already know that the denominator is either -1 or 0. So, if the denominator is
3429 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3430 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3431 // happy to fall through to a normal division, since we're just dividing something
3434 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3435 m_jit.move(TrustedImm32(0), edx.gpr());
3436 done.append(m_jit.jump());
3438 notZero.link(&m_jit);
3439 JITCompiler::Jump notNeg2ToThe31 =
3440 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3441 m_jit.move(TrustedImm32(0), edx.gpr());
3442 done.append(m_jit.jump());
3444 notNeg2ToThe31.link(&m_jit);
3447 safeDenominator.link(&m_jit);
3449 if (op2TempGPR != InvalidGPRReg) {
3450 m_jit.move(op2GPR, op2TempGPR);
3451 op2GPR = op2TempGPR;
3454 m_jit.move(op1GPR, eax.gpr());
3455 m_jit.assembler().cdq();
3456 m_jit.assembler().idivl_r(op2GPR);
3458 if (op2TempGPR != InvalidGPRReg)
3461 // Check that we're not about to create negative zero.
3462 if (shouldCheckNegativeZero(node->arithMode())) {
3463 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3464 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3465 numeratorPositive.link(&m_jit);
3468 if (op1SaveGPR != op1GPR)
3472 int32Result(edx.gpr(), node);
3474 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3475 GPRTemporary temp(this);
3476 GPRTemporary quotientThenRemainder(this);
3477 GPRTemporary multiplyAnswer(this);
3478 GPRReg dividendGPR = op1.gpr();
3479 GPRReg divisorGPR = op2.gpr();
3480 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3481 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3483 JITCompiler::JumpList done;
3485 if (shouldCheckOverflow(node->arithMode()))
3486 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3488 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3489 m_jit.move(divisorGPR, quotientThenRemainderGPR);
3490 done.append(m_jit.jump());
3491 denominatorNotZero.link(&m_jit);
3494 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3495 // FIXME: It seems like there are cases where we don't need this? What if we have
3496 // arithMode() == Arith::Unchecked?
3497 // https://bugs.webkit.org/show_bug.cgi?id=126444
3498 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3499 #if CPU(APPLE_ARMV7S)
3500 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3502 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3505 // If the user cares about negative zero, then speculate that we're not about
3506 // to produce negative zero.
3507 if (shouldCheckNegativeZero(node->arithMode())) {
3508 // Check that we're not about to create negative zero.
3509 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3510 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3511 numeratorPositive.link(&m_jit);
3516 int32Result(quotientThenRemainderGPR, node);
3517 #else // not architecture that can do integer division
3518 RELEASE_ASSERT_NOT_REACHED();
3523 case DoubleRepUse: {
3524 SpeculateDoubleOperand op1(this, node->child1());
3525 SpeculateDoubleOperand op2(this, node->child2());
3527 FPRReg op1FPR = op1.fpr();
3528 FPRReg op2FPR = op2.fpr();
3532 FPRResult result(this);
3534 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3536 doubleResult(result.fpr(), node);
3541 RELEASE_ASSERT_NOT_REACHED();
3546 // Returns true if the compare is fused with a subsequent branch.
3547 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3549 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3552 if (node->isBinaryUseKind(Int32Use)) {
3553 compileInt32Compare(node, condition);
3558 if (node->isBinaryUseKind(Int52RepUse)) {
3559 compileInt52Compare(node, condition);
3562 #endif // USE(JSVALUE64)
3564 if (node->isBinaryUseKind(DoubleRepUse)) {
3565 compileDoubleCompare(node, doubleCondition);
3569 if (node->op() == CompareEq) {
3570 if (node->isBinaryUseKind(StringUse)) {
3571 compileStringEquality(node);
3575 if (node->isBinaryUseKind(BooleanUse)) {
3576 compileBooleanCompare(node, condition);
3580 if (node->isBinaryUseKind(StringIdentUse)) {
3581 compileStringIdentEquality(node);
3585 if (node->isBinaryUseKind(ObjectUse)) {
3586 compileObjectEquality(node);
3590 if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3591 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3595 if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3596 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3601 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3605 bool SpeculativeJIT::compileStrictEq(Node* node)
3607 if (node->isBinaryUseKind(BooleanUse)) {
3608 unsigned branchIndexInBlock = detectPeepHoleBranch();
3609 if (branchIndexInBlock != UINT_MAX) {
3610 Node* branchNode = m_block->at(branchIndexInBlock);
3611 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3612 use(node->child1());
3613 use(node->child2());
3614 m_indexInBlock = branchIndexInBlock;
3615 m_currentNode = branchNode;
3618 compileBooleanCompare(node, MacroAssembler::Equal);
3622 if (node->isBinaryUseKind(Int32Use)) {
3623 unsigned branchIndexInBlock = detectPeepHoleBranch();
3624 if (branchIndexInBlock != UINT_MAX) {
3625 Node* branchNode = m_block->at(branchIndexInBlock);
3626 compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3627 use(node->child1());
3628 use(node->child2());
3629 m_indexInBlock = branchIndexInBlock;
3630 m_currentNode = branchNode;
3633 compileInt32Compare(node, MacroAssembler::Equal);
3638 if (node->isBinaryUseKind(Int52RepUse)) {
3639 unsigned branchIndexInBlock = detectPeepHoleBranch();
3640 if (branchIndexInBlock != UINT_MAX) {
3641 Node* branchNode = m_block->at(branchIndexInBlock);
3642 compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3643 use(node->child1());
3644 use(node->child2());
3645 m_indexInBlock = branchIndexInBlock;
3646 m_currentNode = branchNode;
3649 compileInt52Compare(node, MacroAssembler::Equal);
3652 #endif // USE(JSVALUE64)
3654 if (node->isBinaryUseKind(DoubleRepUse)) {
3655 unsigned branchIndexInBlock = detectPeepHoleBranch();
3656 if (branchIndexInBlock != UINT_MAX) {
3657 Node* branchNode = m_block->at(branchIndexInBlock);
3658 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3659 use(node->child1());
3660 use(node->child2());
3661 m_indexInBlock = branchIndexInBlock;
3662 m_currentNode = branchNode;
3665 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3669 if (node->isBinaryUseKind(StringUse)) {
3670 compileStringEquality(node);
3674 if (node->isBinaryUseKind(StringIdentUse)) {
3675 compileStringIdentEquality(node);
3679 if (node->isBinaryUseKind(ObjectUse)) {
3680 unsigned branchIndexInBlock = detectPeepHoleBranch();
3681 if (branchIndexInBlock != UINT_MAX) {
3682 Node* branchNode = m_block->at(branchIndexInBlock);
3683 compilePeepHoleObjectEquality(node, branchNode);
3684 use(node->child1());
3685 use(node->child2());
3686 m_indexInBlock = branchIndexInBlock;
3687 m_currentNode = branchNode;
3690 compileObjectEquality(node);
3694 if (node->isBinaryUseKind(MiscUse, UntypedUse)
3695 || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3696 compileMiscStrictEq(node);
3700 if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3701 compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3705 if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3706 compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3710 if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3711 compileStringToUntypedEquality(node, node->child1(), node->child2());
3715 if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3716 compileStringToUntypedEquality(node, node->child2(), node->child1());
3720 RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3721 return nonSpeculativeStrictEq(node);
3724 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3726 SpeculateBooleanOperand op1(this, node->child1());
3727 SpeculateBooleanOperand op2(this, node->child2());
3728 GPRTemporary result(this);
3730 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3732 unblessedBooleanResult(result.gpr(), node);
3735 void SpeculativeJIT::compileStringEquality(
3736 Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3737 GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3738 JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3740 JITCompiler::JumpList trueCase;
3741 JITCompiler::JumpList falseCase;
3742 JITCompiler::JumpList slowCase;
3744 trueCase.append(fastTrue);
3745 falseCase.append(fastFalse);
3747 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3749 falseCase.append(m_jit.branch32(
3750 MacroAssembler::NotEqual,
3751 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3754 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3756 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3757 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3759 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3760 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3762 slowCase.append(m_jit.branchTest32(
3763 MacroAssembler::Zero,
3764 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3765 TrustedImm32(StringImpl::flagIs8Bit())));
3766 slowCase.append(m_jit.branchTest32(
3767 MacroAssembler::Zero,
3768 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3769 TrustedImm32(StringImpl::flagIs8Bit())));
3771 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3772 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3774 MacroAssembler::Label loop = m_jit.label();
3776 m_jit.sub32(TrustedImm32(1), lengthGPR);
3778 // This isn't going to generate the best code on x86. But that's OK, it's still better
3779 // than not inlining.
3780 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3781 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3782 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3784 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3786 trueCase.link(&m_jit);
3787 moveTrueTo(leftTempGPR);
3789 JITCompiler::Jump done = m_jit.jump();
3791 falseCase.link(&m_jit);
3792 moveFalseTo(leftTempGPR);
3795 addSlowPathGenerator(