2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
45 namespace JSC { namespace DFG {
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
51 , m_lastGeneratedNode(LastNodeType)
53 , m_generationInfo(m_jit.graph().frameRegisterCount())
54 , m_state(m_jit.graph())
55 , m_interpreter(m_jit.graph(), m_state)
56 , m_stream(&jit.jitCode()->variableEventStream)
57 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58 , m_isCheckingArgumentTypes(false)
62 SpeculativeJIT::~SpeculativeJIT()
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
68 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
70 GPRTemporary scratch(this);
71 GPRTemporary scratch2(this);
72 GPRReg scratchGPR = scratch.gpr();
73 GPRReg scratch2GPR = scratch2.gpr();
75 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
77 JITCompiler::JumpList slowCases;
80 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
84 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
87 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
89 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90 for (unsigned i = numElements; i < vectorLength; ++i)
91 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
93 EncodedValueDescriptor value;
94 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95 for (unsigned i = numElements; i < vectorLength; ++i) {
96 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
102 // I want a slow path that also loads out the storage pointer, and that's
103 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104 // of work for a very small piece of functionality. :-/
105 addSlowPathGenerator(adoptPtr(
106 new CallArrayAllocatorSlowPathGenerator(
107 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
108 structure, numElements)));
111 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
113 Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
115 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
116 m_jit.mul32(TrustedImm32(sizeof(JSValue)), scratchGPR1, scratchGPR1);
117 m_jit.add32(TrustedImm32(Arguments::offsetOfInlineRegisterArray()), scratchGPR1);
118 emitAllocateVariableSizedJSObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR1, scratchGPR2, slowPath);
120 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
122 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
123 m_jit.sub32(TrustedImm32(1), scratchGPR1);
124 m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
126 m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
127 if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
128 m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
130 m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
131 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
133 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
134 m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
138 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
142 ASSERT(m_isCheckingArgumentTypes || m_canExit);
143 m_jit.appendExitInfo(jumpToFail);
144 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
147 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
151 ASSERT(m_isCheckingArgumentTypes || m_canExit);
152 m_jit.appendExitInfo(jumpsToFail);
153 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
156 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
159 return OSRExitJumpPlaceholder();
160 ASSERT(m_isCheckingArgumentTypes || m_canExit);
161 unsigned index = m_jit.jitCode()->osrExit.size();
162 m_jit.appendExitInfo();
163 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
164 return OSRExitJumpPlaceholder(index);
167 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
169 ASSERT(m_isCheckingArgumentTypes || m_canExit);
170 return speculationCheck(kind, jsValueSource, nodeUse.node());
173 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
175 ASSERT(m_isCheckingArgumentTypes || m_canExit);
176 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
179 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
181 ASSERT(m_isCheckingArgumentTypes || m_canExit);
182 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
185 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
189 ASSERT(m_isCheckingArgumentTypes || m_canExit);
190 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
191 m_jit.appendExitInfo(jumpToFail);
192 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
195 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
197 ASSERT(m_isCheckingArgumentTypes || m_canExit);
198 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
201 void SpeculativeJIT::emitInvalidationPoint(Node* node)
206 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
207 m_jit.jitCode()->appendOSRExit(OSRExit(
208 UncountableInvalidation, JSValueSource(),
209 m_jit.graph().methodOfGettingAValueProfileFor(node),
210 this, m_stream->size()));
211 info.m_replacementSource = m_jit.watchpointLabel();
212 ASSERT(info.m_replacementSource.isSet());
216 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
218 ASSERT(m_isCheckingArgumentTypes || m_canExit);
221 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
222 m_compileOkay = false;
223 if (verboseCompilationEnabled())
224 dataLog("Bailing compilation.\n");
227 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
229 ASSERT(m_isCheckingArgumentTypes || m_canExit);
230 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
233 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
235 ASSERT(needsTypeCheck(edge, typesPassedThrough));
236 m_interpreter.filter(edge, typesPassedThrough);
237 speculationCheck(BadType, source, edge.node(), jumpToFail);
240 RegisterSet SpeculativeJIT::usedRegisters()
244 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
245 GPRReg gpr = GPRInfo::toRegister(i);
246 if (m_gprs.isInUse(gpr))
249 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
250 FPRReg fpr = FPRInfo::toRegister(i);
251 if (m_fprs.isInUse(fpr))
255 result.merge(RegisterSet::specialRegisters());
260 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
262 m_slowPathGenerators.append(slowPathGenerator);
265 void SpeculativeJIT::runSlowPathGenerators()
267 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
268 m_slowPathGenerators[i]->generate(this);
271 // On Windows we need to wrap fmod; on other platforms we can call it directly.
272 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
273 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
274 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
279 #define fmodAsDFGOperation fmod
282 void SpeculativeJIT::clearGenerationInfo()
284 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
285 m_generationInfo[i] = GenerationInfo();
286 m_gprs = RegisterBank<GPRInfo>();
287 m_fprs = RegisterBank<FPRInfo>();
290 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
292 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
293 Node* node = info.node();
294 DataFormat registerFormat = info.registerFormat();
295 ASSERT(registerFormat != DataFormatNone);
296 ASSERT(registerFormat != DataFormatDouble);
298 SilentSpillAction spillAction;
299 SilentFillAction fillAction;
301 if (!info.needsSpill())
302 spillAction = DoNothingForSpill;
305 ASSERT(info.gpr() == source);
306 if (registerFormat == DataFormatInt32)
307 spillAction = Store32Payload;
308 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
309 spillAction = StorePtr;
310 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
311 spillAction = Store64;
313 ASSERT(registerFormat & DataFormatJS);
314 spillAction = Store64;
316 #elif USE(JSVALUE32_64)
317 if (registerFormat & DataFormatJS) {
318 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
319 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
321 ASSERT(info.gpr() == source);
322 spillAction = Store32Payload;
327 if (registerFormat == DataFormatInt32) {
328 ASSERT(info.gpr() == source);
329 ASSERT(isJSInt32(info.registerFormat()));
330 if (node->hasConstant()) {
331 ASSERT(node->isInt32Constant());
332 fillAction = SetInt32Constant;
334 fillAction = Load32Payload;
335 } else if (registerFormat == DataFormatBoolean) {
337 RELEASE_ASSERT_NOT_REACHED();
338 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
339 fillAction = DoNothingForFill;
341 #elif USE(JSVALUE32_64)
342 ASSERT(info.gpr() == source);
343 if (node->hasConstant()) {
344 ASSERT(node->isBooleanConstant());
345 fillAction = SetBooleanConstant;
347 fillAction = Load32Payload;
349 } else if (registerFormat == DataFormatCell) {
350 ASSERT(info.gpr() == source);
351 if (node->hasConstant()) {
352 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
353 node->asCell(); // To get the assertion.
354 fillAction = SetCellConstant;
357 fillAction = LoadPtr;
359 fillAction = Load32Payload;
362 } else if (registerFormat == DataFormatStorage) {
363 ASSERT(info.gpr() == source);
364 fillAction = LoadPtr;
365 } else if (registerFormat == DataFormatInt52) {
366 if (node->hasConstant())
367 fillAction = SetInt52Constant;
368 else if (info.spillFormat() == DataFormatInt52)
370 else if (info.spillFormat() == DataFormatStrictInt52)
371 fillAction = Load64ShiftInt52Left;
372 else if (info.spillFormat() == DataFormatNone)
375 RELEASE_ASSERT_NOT_REACHED();
376 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
377 fillAction = Load64; // Make GCC happy.
380 } else if (registerFormat == DataFormatStrictInt52) {
381 if (node->hasConstant())
382 fillAction = SetStrictInt52Constant;
383 else if (info.spillFormat() == DataFormatInt52)
384 fillAction = Load64ShiftInt52Right;
385 else if (info.spillFormat() == DataFormatStrictInt52)
387 else if (info.spillFormat() == DataFormatNone)
390 RELEASE_ASSERT_NOT_REACHED();
391 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
392 fillAction = Load64; // Make GCC happy.
396 ASSERT(registerFormat & DataFormatJS);
398 ASSERT(info.gpr() == source);
399 if (node->hasConstant()) {
400 if (node->isCellConstant())
401 fillAction = SetTrustedJSConstant;
403 fillAction = SetJSConstant;
404 } else if (info.spillFormat() == DataFormatInt32) {
405 ASSERT(registerFormat == DataFormatJSInt32);
406 fillAction = Load32PayloadBoxInt;
410 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
411 if (node->hasConstant())
412 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
413 else if (info.payloadGPR() == source)
414 fillAction = Load32Payload;
415 else { // Fill the Tag
416 switch (info.spillFormat()) {
417 case DataFormatInt32:
418 ASSERT(registerFormat == DataFormatJSInt32);
419 fillAction = SetInt32Tag;
422 ASSERT(registerFormat == DataFormatJSCell);
423 fillAction = SetCellTag;
425 case DataFormatBoolean:
426 ASSERT(registerFormat == DataFormatJSBoolean);
427 fillAction = SetBooleanTag;
430 fillAction = Load32Tag;
437 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
440 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
442 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
443 Node* node = info.node();
444 ASSERT(info.registerFormat() == DataFormatDouble);
446 SilentSpillAction spillAction;
447 SilentFillAction fillAction;
449 if (!info.needsSpill())
450 spillAction = DoNothingForSpill;
452 ASSERT(!node->hasConstant());
453 ASSERT(info.spillFormat() == DataFormatNone);
454 ASSERT(info.fpr() == source);
455 spillAction = StoreDouble;
459 if (node->hasConstant()) {
460 node->asNumber(); // To get the assertion.
461 fillAction = SetDoubleConstant;
463 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
464 fillAction = LoadDouble;
466 #elif USE(JSVALUE32_64)
467 ASSERT(info.registerFormat() == DataFormatDouble);
468 if (node->hasConstant()) {
469 node->asNumber(); // To get the assertion.
470 fillAction = SetDoubleConstant;
472 fillAction = LoadDouble;
475 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
478 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
480 switch (plan.spillAction()) {
481 case DoNothingForSpill:
484 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
487 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
490 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
494 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
498 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
501 RELEASE_ASSERT_NOT_REACHED();
505 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
507 #if USE(JSVALUE32_64)
508 UNUSED_PARAM(canTrample);
510 switch (plan.fillAction()) {
511 case DoNothingForFill:
513 case SetInt32Constant:
514 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
517 case SetInt52Constant:
518 m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
520 case SetStrictInt52Constant:
521 m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
523 #endif // USE(JSVALUE64)
524 case SetBooleanConstant:
525 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
527 case SetCellConstant:
528 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
531 case SetTrustedJSConstant:
532 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
535 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
537 case SetDoubleConstant:
538 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
539 m_jit.move64ToDouble(canTrample, plan.fpr());
541 case Load32PayloadBoxInt:
542 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
543 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
545 case Load32PayloadConvertToInt52:
546 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
547 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
548 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
550 case Load32PayloadSignExtend:
551 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
552 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
555 case SetJSConstantTag:
556 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
558 case SetJSConstantPayload:
559 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
562 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
565 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
568 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
570 case SetDoubleConstant:
571 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
575 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
578 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
581 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
585 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
587 case Load64ShiftInt52Right:
588 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
589 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
591 case Load64ShiftInt52Left:
592 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
593 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
597 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
600 RELEASE_ASSERT_NOT_REACHED();
604 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
606 switch (arrayMode.arrayClass()) {
607 case Array::OriginalArray: {
609 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
610 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
616 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
617 return m_jit.branch32(
618 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
620 case Array::NonArray:
621 case Array::OriginalNonArray:
622 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
623 return m_jit.branch32(
624 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
626 case Array::PossiblyArray:
627 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
628 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
631 RELEASE_ASSERT_NOT_REACHED();
632 return JITCompiler::Jump();
635 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
637 JITCompiler::JumpList result;
639 switch (arrayMode.type()) {
641 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
644 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
646 case Array::Contiguous:
647 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
649 case Array::ArrayStorage:
650 case Array::SlowPutArrayStorage: {
651 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
653 if (arrayMode.isJSArray()) {
654 if (arrayMode.isSlowPut()) {
657 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
658 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
659 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
662 MacroAssembler::Above, tempGPR,
663 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
666 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
668 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
671 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
672 if (arrayMode.isSlowPut()) {
673 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
676 MacroAssembler::Above, tempGPR,
677 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
681 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
692 void SpeculativeJIT::checkArray(Node* node)
694 ASSERT(node->arrayMode().isSpecific());
695 ASSERT(!node->arrayMode().doesConversion());
697 SpeculateCellOperand base(this, node->child1());
698 GPRReg baseReg = base.gpr();
700 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
701 noResult(m_currentNode);
705 const ClassInfo* expectedClassInfo = 0;
707 switch (node->arrayMode().type()) {
709 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
713 case Array::Contiguous:
714 case Array::ArrayStorage:
715 case Array::SlowPutArrayStorage: {
716 GPRTemporary temp(this);
717 GPRReg tempGPR = temp.gpr();
718 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
720 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
721 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
723 noResult(m_currentNode);
726 case Array::Arguments:
727 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
729 noResult(m_currentNode);
732 speculateCellTypeWithoutTypeFiltering(
733 node->child1(), baseReg,
734 typeForTypedArrayType(node->arrayMode().typedArrayType()));
735 noResult(m_currentNode);
739 RELEASE_ASSERT(expectedClassInfo);
741 GPRTemporary temp(this);
742 GPRTemporary temp2(this);
743 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
745 BadType, JSValueSource::unboxedCell(baseReg), node,
747 MacroAssembler::NotEqual,
748 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
749 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
751 noResult(m_currentNode);
754 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
756 ASSERT(node->arrayMode().doesConversion());
758 GPRTemporary temp(this);
759 GPRTemporary structure;
760 GPRReg tempGPR = temp.gpr();
761 GPRReg structureGPR = InvalidGPRReg;
763 if (node->op() != ArrayifyToStructure) {
764 GPRTemporary realStructure(this);
765 structure.adopt(realStructure);
766 structureGPR = structure.gpr();
769 // We can skip all that comes next if we already have array storage.
770 MacroAssembler::JumpList slowPath;
772 if (node->op() == ArrayifyToStructure) {
773 slowPath.append(m_jit.branchWeakStructure(
774 JITCompiler::NotEqual,
775 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
779 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
781 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
784 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
785 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
787 noResult(m_currentNode);
790 void SpeculativeJIT::arrayify(Node* node)
792 ASSERT(node->arrayMode().isSpecific());
794 SpeculateCellOperand base(this, node->child1());
796 if (!node->child2()) {
797 arrayify(node, base.gpr(), InvalidGPRReg);
801 SpeculateInt32Operand property(this, node->child2());
803 arrayify(node, base.gpr(), property.gpr());
806 GPRReg SpeculativeJIT::fillStorage(Edge edge)
808 VirtualRegister virtualRegister = edge->virtualRegister();
809 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
811 switch (info.registerFormat()) {
812 case DataFormatNone: {
813 if (info.spillFormat() == DataFormatStorage) {
814 GPRReg gpr = allocate();
815 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
816 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
817 info.fillStorage(*m_stream, gpr);
821 // Must be a cell; fill it as a cell and then return the pointer.
822 return fillSpeculateCell(edge);
825 case DataFormatStorage: {
826 GPRReg gpr = info.gpr();
832 return fillSpeculateCell(edge);
836 void SpeculativeJIT::useChildren(Node* node)
838 if (node->flags() & NodeHasVarArgs) {
839 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
840 if (!!m_jit.graph().m_varArgChildren[childIdx])
841 use(m_jit.graph().m_varArgChildren[childIdx]);
844 Edge child1 = node->child1();
846 ASSERT(!node->child2() && !node->child3());
851 Edge child2 = node->child2();
853 ASSERT(!node->child3());
858 Edge child3 = node->child3();
865 void SpeculativeJIT::compileIn(Node* node)
867 SpeculateCellOperand base(this, node->child2());
868 GPRReg baseGPR = base.gpr();
870 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
871 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
872 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
874 GPRTemporary result(this);
875 GPRReg resultGPR = result.gpr();
879 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
880 MacroAssembler::Label done = m_jit.label();
882 OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
883 jump.m_jump, this, operationInOptimize,
884 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
885 string->tryGetValueImpl());
887 stubInfo->codeOrigin = node->origin.semantic;
888 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
889 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
890 stubInfo->patch.usedRegisters = usedRegisters();
891 stubInfo->patch.spillMode = NeedToSpill;
893 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
894 addSlowPathGenerator(slowPath.release());
898 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
903 JSValueOperand key(this, node->child1());
904 JSValueRegs regs = key.jsValueRegs();
906 GPRFlushedCallResult result(this);
907 GPRReg resultGPR = result.gpr();
914 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
916 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
919 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
921 unsigned branchIndexInBlock = detectPeepHoleBranch();
922 if (branchIndexInBlock != UINT_MAX) {
923 Node* branchNode = m_block->at(branchIndexInBlock);
925 ASSERT(node->adjustedRefCount() == 1);
927 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
929 m_indexInBlock = branchIndexInBlock;
930 m_currentNode = branchNode;
935 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
940 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
942 unsigned branchIndexInBlock = detectPeepHoleBranch();
943 if (branchIndexInBlock != UINT_MAX) {
944 Node* branchNode = m_block->at(branchIndexInBlock);
946 ASSERT(node->adjustedRefCount() == 1);
948 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
950 m_indexInBlock = branchIndexInBlock;
951 m_currentNode = branchNode;
956 nonSpeculativeNonPeepholeStrictEq(node, invert);
961 static const char* dataFormatString(DataFormat format)
963 // These values correspond to the DataFormat enum.
964 const char* strings[] = {
982 return strings[format];
985 void SpeculativeJIT::dump(const char* label)
988 dataLogF("<%s>\n", label);
990 dataLogF(" gprs:\n");
992 dataLogF(" fprs:\n");
994 dataLogF(" VirtualRegisters:\n");
995 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
996 GenerationInfo& info = m_generationInfo[i];
998 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1000 dataLogF(" % 3d:[__][__]", i);
1001 if (info.registerFormat() == DataFormatDouble)
1002 dataLogF(":fpr%d\n", info.fpr());
1003 else if (info.registerFormat() != DataFormatNone
1004 #if USE(JSVALUE32_64)
1005 && !(info.registerFormat() & DataFormatJS)
1008 ASSERT(info.gpr() != InvalidGPRReg);
1009 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1014 dataLogF("</%s>\n", label);
1017 GPRTemporary::GPRTemporary()
1019 , m_gpr(InvalidGPRReg)
1023 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1025 , m_gpr(InvalidGPRReg)
1027 m_gpr = m_jit->allocate();
1030 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1032 , m_gpr(InvalidGPRReg)
1034 m_gpr = m_jit->allocate(specific);
1037 #if USE(JSVALUE32_64)
1038 GPRTemporary::GPRTemporary(
1039 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1041 , m_gpr(InvalidGPRReg)
1043 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1044 m_gpr = m_jit->reuse(op1.gpr(which));
1046 m_gpr = m_jit->allocate();
1048 #endif // USE(JSVALUE32_64)
1050 JSValueRegsTemporary::JSValueRegsTemporary() { }
1052 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1062 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1064 JSValueRegs JSValueRegsTemporary::regs()
1067 return JSValueRegs(m_gpr.gpr());
1069 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1073 void GPRTemporary::adopt(GPRTemporary& other)
1076 ASSERT(m_gpr == InvalidGPRReg);
1077 ASSERT(other.m_jit);
1078 ASSERT(other.m_gpr != InvalidGPRReg);
1079 m_jit = other.m_jit;
1080 m_gpr = other.m_gpr;
1082 other.m_gpr = InvalidGPRReg;
1085 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1087 , m_fpr(InvalidFPRReg)
1089 m_fpr = m_jit->fprAllocate();
1092 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1094 , m_fpr(InvalidFPRReg)
1096 if (m_jit->canReuse(op1.node()))
1097 m_fpr = m_jit->reuse(op1.fpr());
1099 m_fpr = m_jit->fprAllocate();
1102 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1104 , m_fpr(InvalidFPRReg)
1106 if (m_jit->canReuse(op1.node()))
1107 m_fpr = m_jit->reuse(op1.fpr());
1108 else if (m_jit->canReuse(op2.node()))
1109 m_fpr = m_jit->reuse(op2.fpr());
1111 m_fpr = m_jit->fprAllocate();
1114 #if USE(JSVALUE32_64)
1115 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1117 , m_fpr(InvalidFPRReg)
1119 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1120 m_fpr = m_jit->reuse(op1.fpr());
1122 m_fpr = m_jit->fprAllocate();
1126 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1128 BasicBlock* taken = branchNode->branchData()->taken.block;
1129 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1131 SpeculateDoubleOperand op1(this, node->child1());
1132 SpeculateDoubleOperand op2(this, node->child2());
1134 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1138 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1140 BasicBlock* taken = branchNode->branchData()->taken.block;
1141 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1143 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1145 if (taken == nextBlock()) {
1146 condition = MacroAssembler::NotEqual;
1147 BasicBlock* tmp = taken;
1152 SpeculateCellOperand op1(this, node->child1());
1153 SpeculateCellOperand op2(this, node->child2());
1155 GPRReg op1GPR = op1.gpr();
1156 GPRReg op2GPR = op2.gpr();
1158 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1159 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1161 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1162 m_jit.branchStructurePtr(
1163 MacroAssembler::Equal,
1164 MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()),
1165 m_jit.vm()->stringStructure.get()));
1167 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1169 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1170 m_jit.branchStructurePtr(
1171 MacroAssembler::Equal,
1172 MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()),
1173 m_jit.vm()->stringStructure.get()));
1176 GPRTemporary structure(this);
1177 GPRTemporary temp(this);
1178 GPRReg structureGPR = structure.gpr();
1180 m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1181 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1183 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1185 MacroAssembler::Equal,
1187 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1189 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1191 MacroAssembler::NonZero,
1192 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1193 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1195 m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1196 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1198 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1200 MacroAssembler::Equal,
1202 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1204 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1206 MacroAssembler::NonZero,
1207 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1208 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1211 branchPtr(condition, op1GPR, op2GPR, taken);
1215 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1217 BasicBlock* taken = branchNode->branchData()->taken.block;
1218 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1220 // The branch instruction will branch to the taken block.
1221 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1222 if (taken == nextBlock()) {
1223 condition = JITCompiler::invert(condition);
1224 BasicBlock* tmp = taken;
1229 if (node->child1()->isBooleanConstant()) {
1230 bool imm = node->child1()->asBoolean();
1231 SpeculateBooleanOperand op2(this, node->child2());
1232 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1233 } else if (node->child2()->isBooleanConstant()) {
1234 SpeculateBooleanOperand op1(this, node->child1());
1235 bool imm = node->child2()->asBoolean();
1236 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1238 SpeculateBooleanOperand op1(this, node->child1());
1239 SpeculateBooleanOperand op2(this, node->child2());
1240 branch32(condition, op1.gpr(), op2.gpr(), taken);
1246 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1248 BasicBlock* taken = branchNode->branchData()->taken.block;
1249 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1251 // The branch instruction will branch to the taken block.
1252 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1253 if (taken == nextBlock()) {
1254 condition = JITCompiler::invert(condition);
1255 BasicBlock* tmp = taken;
1260 if (node->child1()->isInt32Constant()) {
1261 int32_t imm = node->child1()->asInt32();
1262 SpeculateInt32Operand op2(this, node->child2());
1263 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1264 } else if (node->child2()->isInt32Constant()) {
1265 SpeculateInt32Operand op1(this, node->child1());
1266 int32_t imm = node->child2()->asInt32();
1267 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1269 SpeculateInt32Operand op1(this, node->child1());
1270 SpeculateInt32Operand op2(this, node->child2());
1271 branch32(condition, op1.gpr(), op2.gpr(), taken);
1277 // Returns true if the compare is fused with a subsequent branch.
1278 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1280 // Fused compare & branch.
1281 unsigned branchIndexInBlock = detectPeepHoleBranch();
1282 if (branchIndexInBlock != UINT_MAX) {
1283 Node* branchNode = m_block->at(branchIndexInBlock);
1285 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1286 // so can be no intervening nodes to also reference the compare.
1287 ASSERT(node->adjustedRefCount() == 1);
1289 if (node->isBinaryUseKind(Int32Use))
1290 compilePeepHoleInt32Branch(node, branchNode, condition);
1292 else if (node->isBinaryUseKind(Int52RepUse))
1293 compilePeepHoleInt52Branch(node, branchNode, condition);
1294 #endif // USE(JSVALUE64)
1295 else if (node->isBinaryUseKind(DoubleRepUse))
1296 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1297 else if (node->op() == CompareEq) {
1298 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1299 // Use non-peephole comparison, for now.
1302 if (node->isBinaryUseKind(BooleanUse))
1303 compilePeepHoleBooleanBranch(node, branchNode, condition);
1304 else if (node->isBinaryUseKind(ObjectUse))
1305 compilePeepHoleObjectEquality(node, branchNode);
1306 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1307 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1308 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1309 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1311 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1315 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1319 use(node->child1());
1320 use(node->child2());
1321 m_indexInBlock = branchIndexInBlock;
1322 m_currentNode = branchNode;
1328 void SpeculativeJIT::noticeOSRBirth(Node* node)
1330 if (!node->hasVirtualRegister())
1333 VirtualRegister virtualRegister = node->virtualRegister();
1334 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1336 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1339 void SpeculativeJIT::compileMovHint(Node* node)
1341 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1343 Node* child = node->child1().node();
1344 noticeOSRBirth(child);
1346 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1349 void SpeculativeJIT::bail(AbortReason reason)
1351 if (verboseCompilationEnabled())
1352 dataLog("Bailing compilation.\n");
1353 m_compileOkay = true;
1354 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1355 clearGenerationInfo();
1358 void SpeculativeJIT::compileCurrentBlock()
1360 ASSERT(m_compileOkay);
1365 ASSERT(m_block->isReachable);
1367 m_jit.blockHeads()[m_block->index] = m_jit.label();
1369 if (!m_block->intersectionOfCFAHasVisited) {
1370 // Don't generate code for basic blocks that are unreachable according to CFA.
1371 // But to be sure that nobody has generated a jump to this block, drop in a
1373 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1377 m_stream->appendAndLog(VariableEvent::reset());
1379 m_jit.jitAssertHasValidCallFrame();
1380 m_jit.jitAssertTagsInPlace();
1381 m_jit.jitAssertArgumentCountSane();
1384 m_state.beginBasicBlock(m_block);
1386 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1387 int operand = m_block->variablesAtHead.operandForIndex(i);
1388 Node* node = m_block->variablesAtHead[i];
1390 continue; // No need to record dead SetLocal's.
1392 VariableAccessData* variable = node->variableAccessData();
1394 if (!node->refCount())
1395 continue; // No need to record dead SetLocal's.
1396 format = dataFormatFor(variable->flushFormat());
1397 m_stream->appendAndLog(
1398 VariableEvent::setLocal(
1399 VirtualRegister(operand),
1400 variable->machineLocal(),
1404 m_codeOriginForExitTarget = CodeOrigin();
1405 m_codeOriginForExitProfile = CodeOrigin();
1407 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1408 m_currentNode = m_block->at(m_indexInBlock);
1410 // We may have hit a contradiction that the CFA was aware of but that the JIT
1411 // didn't cause directly.
1412 if (!m_state.isValid()) {
1413 bail(DFGBailedAtTopOfBlock);
1417 if (ASSERT_DISABLED)
1418 m_canExit = true; // Essentially disable the assertions.
1420 m_canExit = mayExit(m_jit.graph(), m_currentNode);
1422 m_interpreter.startExecuting();
1423 m_jit.setForNode(m_currentNode);
1424 m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1425 m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1426 m_lastGeneratedNode = m_currentNode->op();
1427 if (!m_currentNode->shouldGenerate()) {
1428 switch (m_currentNode->op()) {
1430 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1434 RELEASE_ASSERT_NOT_REACHED();
1438 compileMovHint(m_currentNode);
1442 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1447 if (belongsInMinifiedGraph(m_currentNode->op()))
1448 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1453 if (verboseCompilationEnabled()) {
1455 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1456 (int)m_currentNode->index(),
1457 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1461 compile(m_currentNode);
1463 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1464 m_jit.clearRegisterAllocationOffsets();
1467 if (!m_compileOkay) {
1468 bail(DFGBailedAtEndOfNode);
1472 if (belongsInMinifiedGraph(m_currentNode->op())) {
1473 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1474 noticeOSRBirth(m_currentNode);
1478 // Make sure that the abstract state is rematerialized for the next node.
1479 m_interpreter.executeEffects(m_indexInBlock);
1482 // Perform the most basic verification that children have been used correctly.
1483 if (!ASSERT_DISABLED) {
1484 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1485 GenerationInfo& info = m_generationInfo[index];
1486 RELEASE_ASSERT(!info.alive());
1491 // If we are making type predictions about our arguments then
1492 // we need to check that they are correct on function entry.
1493 void SpeculativeJIT::checkArgumentTypes()
1495 ASSERT(!m_currentNode);
1496 m_isCheckingArgumentTypes = true;
1497 m_codeOriginForExitTarget = CodeOrigin(0);
1498 m_codeOriginForExitProfile = CodeOrigin(0);
1500 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1501 Node* node = m_jit.graph().m_arguments[i];
1503 // The argument is dead. We don't do any checks for such arguments.
1507 ASSERT(node->op() == SetArgument);
1508 ASSERT(node->shouldGenerate());
1510 VariableAccessData* variableAccessData = node->variableAccessData();
1511 FlushFormat format = variableAccessData->flushFormat();
1513 if (format == FlushedJSValue)
1516 VirtualRegister virtualRegister = variableAccessData->local();
1518 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1522 case FlushedInt32: {
1523 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1526 case FlushedBoolean: {
1527 GPRTemporary temp(this);
1528 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1529 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1530 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1534 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1538 RELEASE_ASSERT_NOT_REACHED();
1543 case FlushedInt32: {
1544 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1547 case FlushedBoolean: {
1548 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1552 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1556 RELEASE_ASSERT_NOT_REACHED();
1561 m_isCheckingArgumentTypes = false;
1564 bool SpeculativeJIT::compile()
1566 checkArgumentTypes();
1568 ASSERT(!m_currentNode);
1569 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1570 m_jit.setForBlockIndex(blockIndex);
1571 m_block = m_jit.graph().block(blockIndex);
1572 compileCurrentBlock();
1578 void SpeculativeJIT::createOSREntries()
1580 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1581 BasicBlock* block = m_jit.graph().block(blockIndex);
1584 if (!block->isOSRTarget)
1587 // Currently we don't have OSR entry trampolines. We could add them
1589 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1593 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1595 unsigned osrEntryIndex = 0;
1596 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1597 BasicBlock* block = m_jit.graph().block(blockIndex);
1600 if (!block->isOSRTarget)
1602 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1604 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1607 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1609 Edge child3 = m_jit.graph().varArgChild(node, 2);
1610 Edge child4 = m_jit.graph().varArgChild(node, 3);
1612 ArrayMode arrayMode = node->arrayMode();
1614 GPRReg baseReg = base.gpr();
1615 GPRReg propertyReg = property.gpr();
1617 SpeculateDoubleOperand value(this, child3);
1619 FPRReg valueReg = value.fpr();
1622 JSValueRegs(), child3, SpecFullRealNumber,
1624 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1629 StorageOperand storage(this, child4);
1630 GPRReg storageReg = storage.gpr();
1632 if (node->op() == PutByValAlias) {
1633 // Store the value to the array.
1634 GPRReg propertyReg = property.gpr();
1635 FPRReg valueReg = value.fpr();
1636 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1638 noResult(m_currentNode);
1642 GPRTemporary temporary;
1643 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1645 MacroAssembler::Jump slowCase;
1647 if (arrayMode.isInBounds()) {
1649 OutOfBounds, JSValueRegs(), 0,
1650 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1652 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1654 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1656 if (!arrayMode.isOutOfBounds())
1657 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1659 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1660 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1662 inBounds.link(&m_jit);
1665 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1672 if (arrayMode.isOutOfBounds()) {
1673 addSlowPathGenerator(
1676 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1677 NoResult, baseReg, propertyReg, valueReg));
1680 noResult(m_currentNode, UseChildrenCalledExplicitly);
1683 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1685 SpeculateCellOperand string(this, node->child1());
1686 SpeculateStrictInt32Operand index(this, node->child2());
1687 StorageOperand storage(this, node->child3());
1689 GPRReg stringReg = string.gpr();
1690 GPRReg indexReg = index.gpr();
1691 GPRReg storageReg = storage.gpr();
1693 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1695 // unsigned comparison so we can filter out negative indices and indices that are too large
1696 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1698 GPRTemporary scratch(this);
1699 GPRReg scratchReg = scratch.gpr();
1701 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1703 // Load the character into scratchReg
1704 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1706 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1707 JITCompiler::Jump cont8Bit = m_jit.jump();
1709 is16Bit.link(&m_jit);
1711 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1713 cont8Bit.link(&m_jit);
1715 int32Result(scratchReg, m_currentNode);
1718 void SpeculativeJIT::compileGetByValOnString(Node* node)
1720 SpeculateCellOperand base(this, node->child1());
1721 SpeculateStrictInt32Operand property(this, node->child2());
1722 StorageOperand storage(this, node->child3());
1723 GPRReg baseReg = base.gpr();
1724 GPRReg propertyReg = property.gpr();
1725 GPRReg storageReg = storage.gpr();
1727 GPRTemporary scratch(this);
1728 GPRReg scratchReg = scratch.gpr();
1729 #if USE(JSVALUE32_64)
1730 GPRTemporary resultTag;
1731 GPRReg resultTagReg = InvalidGPRReg;
1732 if (node->arrayMode().isOutOfBounds()) {
1733 GPRTemporary realResultTag(this);
1734 resultTag.adopt(realResultTag);
1735 resultTagReg = resultTag.gpr();
1739 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1741 // unsigned comparison so we can filter out negative indices and indices that are too large
1742 JITCompiler::Jump outOfBounds = m_jit.branch32(
1743 MacroAssembler::AboveOrEqual, propertyReg,
1744 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1745 if (node->arrayMode().isInBounds())
1746 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1748 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1750 // Load the character into scratchReg
1751 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1753 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1754 JITCompiler::Jump cont8Bit = m_jit.jump();
1756 is16Bit.link(&m_jit);
1758 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1760 JITCompiler::Jump bigCharacter =
1761 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1763 // 8 bit string values don't need the isASCII check.
1764 cont8Bit.link(&m_jit);
1766 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1767 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1768 m_jit.loadPtr(scratchReg, scratchReg);
1770 addSlowPathGenerator(
1772 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1774 if (node->arrayMode().isOutOfBounds()) {
1775 #if USE(JSVALUE32_64)
1776 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1779 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1780 if (globalObject->stringPrototypeChainIsSane()) {
1782 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1783 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1785 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1786 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1787 baseReg, propertyReg)));
1791 addSlowPathGenerator(
1793 outOfBounds, this, operationGetByValStringInt,
1794 scratchReg, baseReg, propertyReg));
1796 addSlowPathGenerator(
1798 outOfBounds, this, operationGetByValStringInt,
1799 resultTagReg, scratchReg, baseReg, propertyReg));
1804 jsValueResult(scratchReg, m_currentNode);
1806 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1809 cellResult(scratchReg, m_currentNode);
1812 void SpeculativeJIT::compileFromCharCode(Node* node)
1814 SpeculateStrictInt32Operand property(this, node->child1());
1815 GPRReg propertyReg = property.gpr();
1816 GPRTemporary smallStrings(this);
1817 GPRTemporary scratch(this);
1818 GPRReg scratchReg = scratch.gpr();
1819 GPRReg smallStringsReg = smallStrings.gpr();
1821 JITCompiler::JumpList slowCases;
1822 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1823 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1824 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1826 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1827 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1828 cellResult(scratchReg, m_currentNode);
1831 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1833 VirtualRegister virtualRegister = node->virtualRegister();
1834 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1836 switch (info.registerFormat()) {
1837 case DataFormatStorage:
1838 RELEASE_ASSERT_NOT_REACHED();
1840 case DataFormatBoolean:
1841 case DataFormatCell:
1842 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1843 return GeneratedOperandTypeUnknown;
1845 case DataFormatNone:
1846 case DataFormatJSCell:
1848 case DataFormatJSBoolean:
1849 case DataFormatJSDouble:
1850 return GeneratedOperandJSValue;
1852 case DataFormatJSInt32:
1853 case DataFormatInt32:
1854 return GeneratedOperandInteger;
1857 RELEASE_ASSERT_NOT_REACHED();
1858 return GeneratedOperandTypeUnknown;
1862 void SpeculativeJIT::compileValueToInt32(Node* node)
1864 switch (node->child1().useKind()) {
1867 SpeculateStrictInt52Operand op1(this, node->child1());
1868 GPRTemporary result(this, Reuse, op1);
1869 GPRReg op1GPR = op1.gpr();
1870 GPRReg resultGPR = result.gpr();
1871 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1872 int32Result(resultGPR, node, DataFormatInt32);
1875 #endif // USE(JSVALUE64)
1877 case DoubleRepUse: {
1878 GPRTemporary result(this);
1879 SpeculateDoubleOperand op1(this, node->child1());
1880 FPRReg fpr = op1.fpr();
1881 GPRReg gpr = result.gpr();
1882 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1884 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1886 int32Result(gpr, node);
1892 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1893 case GeneratedOperandInteger: {
1894 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1895 GPRTemporary result(this, Reuse, op1);
1896 m_jit.move(op1.gpr(), result.gpr());
1897 int32Result(result.gpr(), node, op1.format());
1900 case GeneratedOperandJSValue: {
1901 GPRTemporary result(this);
1903 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1905 GPRReg gpr = op1.gpr();
1906 GPRReg resultGpr = result.gpr();
1907 FPRTemporary tempFpr(this);
1908 FPRReg fpr = tempFpr.fpr();
1910 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1911 JITCompiler::JumpList converted;
1913 if (node->child1().useKind() == NumberUse) {
1915 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1917 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1919 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1922 JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1924 // It's not a cell: so true turns into 1 and all else turns into 0.
1925 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1926 converted.append(m_jit.jump());
1928 isNumber.link(&m_jit);
1931 // First, if we get here we have a double encoded as a JSValue
1932 m_jit.move(gpr, resultGpr);
1933 unboxDouble(resultGpr, fpr);
1935 silentSpillAllRegisters(resultGpr);
1936 callOperation(toInt32, resultGpr, fpr);
1937 silentFillAllRegisters(resultGpr);
1939 converted.append(m_jit.jump());
1941 isInteger.link(&m_jit);
1942 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1944 converted.link(&m_jit);
1946 Node* childNode = node->child1().node();
1947 VirtualRegister virtualRegister = childNode->virtualRegister();
1948 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1950 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1952 GPRReg payloadGPR = op1.payloadGPR();
1953 GPRReg resultGpr = result.gpr();
1955 JITCompiler::JumpList converted;
1957 if (info.registerFormat() == DataFormatJSInt32)
1958 m_jit.move(payloadGPR, resultGpr);
1960 GPRReg tagGPR = op1.tagGPR();
1961 FPRTemporary tempFpr(this);
1962 FPRReg fpr = tempFpr.fpr();
1963 FPRTemporary scratch(this);
1965 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1967 if (node->child1().useKind() == NumberUse) {
1969 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1971 MacroAssembler::AboveOrEqual, tagGPR,
1972 TrustedImm32(JSValue::LowestTag)));
1974 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1977 op1.jsValueRegs(), node->child1(), ~SpecCell,
1978 branchIsCell(op1.jsValueRegs()));
1980 // It's not a cell: so true turns into 1 and all else turns into 0.
1981 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1982 m_jit.move(TrustedImm32(0), resultGpr);
1983 converted.append(m_jit.jump());
1985 isBoolean.link(&m_jit);
1986 m_jit.move(payloadGPR, resultGpr);
1987 converted.append(m_jit.jump());
1989 isNumber.link(&m_jit);
1992 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1994 silentSpillAllRegisters(resultGpr);
1995 callOperation(toInt32, resultGpr, fpr);
1996 silentFillAllRegisters(resultGpr);
1998 converted.append(m_jit.jump());
2000 isInteger.link(&m_jit);
2001 m_jit.move(payloadGPR, resultGpr);
2003 converted.link(&m_jit);
2006 int32Result(resultGpr, node);
2009 case GeneratedOperandTypeUnknown:
2010 RELEASE_ASSERT(!m_compileOkay);
2013 RELEASE_ASSERT_NOT_REACHED();
2018 ASSERT(!m_compileOkay);
2023 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2025 if (doesOverflow(node->arithMode())) {
2026 // We know that this sometimes produces doubles. So produce a double every
2027 // time. This at least allows subsequent code to not have weird conditionals.
2029 SpeculateInt32Operand op1(this, node->child1());
2030 FPRTemporary result(this);
2032 GPRReg inputGPR = op1.gpr();
2033 FPRReg outputFPR = result.fpr();
2035 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2037 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2038 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2039 positive.link(&m_jit);
2041 doubleResult(outputFPR, node);
2045 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2047 SpeculateInt32Operand op1(this, node->child1());
2048 GPRTemporary result(this);
2050 m_jit.move(op1.gpr(), result.gpr());
2052 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2054 int32Result(result.gpr(), node, op1.format());
2057 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2059 SpeculateDoubleOperand op1(this, node->child1());
2060 FPRTemporary scratch(this);
2061 GPRTemporary result(this);
2063 FPRReg valueFPR = op1.fpr();
2064 FPRReg scratchFPR = scratch.fpr();
2065 GPRReg resultGPR = result.gpr();
2067 JITCompiler::JumpList failureCases;
2068 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2069 m_jit.branchConvertDoubleToInt32(
2070 valueFPR, resultGPR, failureCases, scratchFPR,
2071 shouldCheckNegativeZero(node->arithMode()));
2072 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2074 int32Result(resultGPR, node);
2077 void SpeculativeJIT::compileDoubleRep(Node* node)
2079 switch (node->child1().useKind()) {
2081 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2083 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2084 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2085 FPRTemporary result(this);
2086 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2087 doubleResult(result.fpr(), node);
2091 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2092 FPRTemporary result(this);
2095 GPRTemporary temp(this);
2097 GPRReg op1GPR = op1.gpr();
2098 GPRReg tempGPR = temp.gpr();
2099 FPRReg resultFPR = result.fpr();
2101 JITCompiler::Jump isInteger = m_jit.branch64(
2102 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2104 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2106 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2107 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2110 m_jit.move(op1GPR, tempGPR);
2111 unboxDouble(tempGPR, resultFPR);
2112 JITCompiler::Jump done = m_jit.jump();
2114 isInteger.link(&m_jit);
2115 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2117 #else // USE(JSVALUE64) -> this is the 32_64 case
2118 FPRTemporary temp(this);
2120 GPRReg op1TagGPR = op1.tagGPR();
2121 GPRReg op1PayloadGPR = op1.payloadGPR();
2122 FPRReg tempFPR = temp.fpr();
2123 FPRReg resultFPR = result.fpr();
2125 JITCompiler::Jump isInteger = m_jit.branch32(
2126 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2128 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2130 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2131 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2134 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2135 JITCompiler::Jump done = m_jit.jump();
2137 isInteger.link(&m_jit);
2138 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2140 #endif // USE(JSVALUE64)
2142 doubleResult(resultFPR, node);
2148 SpeculateStrictInt52Operand value(this, node->child1());
2149 FPRTemporary result(this);
2151 GPRReg valueGPR = value.gpr();
2152 FPRReg resultFPR = result.fpr();
2154 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2156 doubleResult(resultFPR, node);
2159 #endif // USE(JSVALUE64)
2162 RELEASE_ASSERT_NOT_REACHED();
2167 void SpeculativeJIT::compileValueRep(Node* node)
2169 switch (node->child1().useKind()) {
2170 case DoubleRepUse: {
2171 SpeculateDoubleOperand value(this, node->child1());
2172 JSValueRegsTemporary result(this);
2174 FPRReg valueFPR = value.fpr();
2175 JSValueRegs resultRegs = result.regs();
2177 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2178 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2179 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2180 // local was purified.
2181 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2182 m_jit.purifyNaN(valueFPR);
2184 boxDouble(valueFPR, resultRegs);
2186 jsValueResult(resultRegs, node);
2192 SpeculateStrictInt52Operand value(this, node->child1());
2193 GPRTemporary result(this);
2195 GPRReg valueGPR = value.gpr();
2196 GPRReg resultGPR = result.gpr();
2198 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2200 jsValueResult(resultGPR, node);
2203 #endif // USE(JSVALUE64)
2206 RELEASE_ASSERT_NOT_REACHED();
2211 static double clampDoubleToByte(double d)
2221 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2223 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2224 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2225 jit.xorPtr(result, result);
2226 MacroAssembler::Jump clamped = jit.jump();
2228 jit.move(JITCompiler::TrustedImm32(255), result);
2230 inBounds.link(&jit);
2233 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2235 // Unordered compare so we pick up NaN
2236 static const double zero = 0;
2237 static const double byteMax = 255;
2238 static const double half = 0.5;
2239 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2240 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2241 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2242 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2244 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2245 // FIXME: This should probably just use a floating point round!
2246 // https://bugs.webkit.org/show_bug.cgi?id=72054
2247 jit.addDouble(source, scratch);
2248 jit.truncateDoubleToInt32(scratch, result);
2249 MacroAssembler::Jump truncatedInt = jit.jump();
2251 tooSmall.link(&jit);
2252 jit.xorPtr(result, result);
2253 MacroAssembler::Jump zeroed = jit.jump();
2256 jit.move(JITCompiler::TrustedImm32(255), result);
2258 truncatedInt.link(&jit);
2263 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2265 if (node->op() == PutByValAlias)
2266 return JITCompiler::Jump();
2267 if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2268 uint32_t length = view->length();
2269 Node* indexNode = m_jit.graph().child(node, 1).node();
2270 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2271 return JITCompiler::Jump();
2272 return m_jit.branch32(
2273 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2275 return m_jit.branch32(
2276 MacroAssembler::AboveOrEqual, indexGPR,
2277 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2280 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2282 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2285 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2288 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2290 ASSERT(isInt(type));
2292 SpeculateCellOperand base(this, node->child1());
2293 SpeculateStrictInt32Operand property(this, node->child2());
2294 StorageOperand storage(this, node->child3());
2296 GPRReg baseReg = base.gpr();
2297 GPRReg propertyReg = property.gpr();
2298 GPRReg storageReg = storage.gpr();
2300 GPRTemporary result(this);
2301 GPRReg resultReg = result.gpr();
2303 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2305 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2306 switch (elementSize(type)) {
2309 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2311 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2315 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2317 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2320 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2325 if (elementSize(type) < 4 || isSigned(type)) {
2326 int32Result(resultReg, node);
2330 ASSERT(elementSize(type) == 4 && !isSigned(type));
2331 if (node->shouldSpeculateInt32()) {
2332 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2333 int32Result(resultReg, node);
2338 if (node->shouldSpeculateMachineInt()) {
2339 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2340 strictInt52Result(resultReg, node);
2345 FPRTemporary fresult(this);
2346 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2347 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2348 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2349 positive.link(&m_jit);
2350 doubleResult(fresult.fpr(), node);
2353 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2355 ASSERT(isInt(type));
2357 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2358 GPRReg storageReg = storage.gpr();
2360 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2363 GPRReg valueGPR = InvalidGPRReg;
2365 if (valueUse->isConstant()) {
2366 JSValue jsValue = valueUse->asJSValue();
2367 if (!jsValue.isNumber()) {
2368 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2372 double d = jsValue.asNumber();
2373 if (isClamped(type)) {
2374 ASSERT(elementSize(type) == 1);
2375 d = clampDoubleToByte(d);
2377 GPRTemporary scratch(this);
2378 GPRReg scratchReg = scratch.gpr();
2379 m_jit.move(Imm32(toInt32(d)), scratchReg);
2380 value.adopt(scratch);
2381 valueGPR = scratchReg;
2383 switch (valueUse.useKind()) {
2385 SpeculateInt32Operand valueOp(this, valueUse);
2386 GPRTemporary scratch(this);
2387 GPRReg scratchReg = scratch.gpr();
2388 m_jit.move(valueOp.gpr(), scratchReg);
2389 if (isClamped(type)) {
2390 ASSERT(elementSize(type) == 1);
2391 compileClampIntegerToByte(m_jit, scratchReg);
2393 value.adopt(scratch);
2394 valueGPR = scratchReg;
2400 SpeculateStrictInt52Operand valueOp(this, valueUse);
2401 GPRTemporary scratch(this);
2402 GPRReg scratchReg = scratch.gpr();
2403 m_jit.move(valueOp.gpr(), scratchReg);
2404 if (isClamped(type)) {
2405 ASSERT(elementSize(type) == 1);
2406 MacroAssembler::Jump inBounds = m_jit.branch64(
2407 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2408 MacroAssembler::Jump tooBig = m_jit.branch64(
2409 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2410 m_jit.move(TrustedImm32(0), scratchReg);
2411 MacroAssembler::Jump clamped = m_jit.jump();
2412 tooBig.link(&m_jit);
2413 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2414 clamped.link(&m_jit);
2415 inBounds.link(&m_jit);
2417 value.adopt(scratch);
2418 valueGPR = scratchReg;
2421 #endif // USE(JSVALUE64)
2423 case DoubleRepUse: {
2424 if (isClamped(type)) {
2425 ASSERT(elementSize(type) == 1);
2426 SpeculateDoubleOperand valueOp(this, valueUse);
2427 GPRTemporary result(this);
2428 FPRTemporary floatScratch(this);
2429 FPRReg fpr = valueOp.fpr();
2430 GPRReg gpr = result.gpr();
2431 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2432 value.adopt(result);
2435 SpeculateDoubleOperand valueOp(this, valueUse);
2436 GPRTemporary result(this);
2437 FPRReg fpr = valueOp.fpr();
2438 GPRReg gpr = result.gpr();
2439 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2440 m_jit.xorPtr(gpr, gpr);
2441 MacroAssembler::Jump fixed = m_jit.jump();
2442 notNaN.link(&m_jit);
2444 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2445 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2447 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2450 value.adopt(result);
2457 RELEASE_ASSERT_NOT_REACHED();
2462 ASSERT_UNUSED(valueGPR, valueGPR != property);
2463 ASSERT(valueGPR != base);
2464 ASSERT(valueGPR != storageReg);
2465 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2466 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2467 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2468 outOfBounds = MacroAssembler::Jump();
2471 switch (elementSize(type)) {
2473 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2476 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2479 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2484 if (outOfBounds.isSet())
2485 outOfBounds.link(&m_jit);
2489 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2491 ASSERT(isFloat(type));
2493 SpeculateCellOperand base(this, node->child1());
2494 SpeculateStrictInt32Operand property(this, node->child2());
2495 StorageOperand storage(this, node->child3());
2497 GPRReg baseReg = base.gpr();
2498 GPRReg propertyReg = property.gpr();
2499 GPRReg storageReg = storage.gpr();
2501 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2503 FPRTemporary result(this);
2504 FPRReg resultReg = result.fpr();
2505 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2506 switch (elementSize(type)) {
2508 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2509 m_jit.convertFloatToDouble(resultReg, resultReg);
2512 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2516 RELEASE_ASSERT_NOT_REACHED();
2519 doubleResult(resultReg, node);
2522 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2524 ASSERT(isFloat(type));
2526 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2527 GPRReg storageReg = storage.gpr();
2529 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2530 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2532 SpeculateDoubleOperand valueOp(this, valueUse);
2533 FPRTemporary scratch(this);
2534 FPRReg valueFPR = valueOp.fpr();
2535 FPRReg scratchFPR = scratch.fpr();
2537 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2539 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2540 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2541 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2542 outOfBounds = MacroAssembler::Jump();
2545 switch (elementSize(type)) {
2547 m_jit.moveDouble(valueFPR, scratchFPR);
2548 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2549 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2553 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2556 RELEASE_ASSERT_NOT_REACHED();
2558 if (outOfBounds.isSet())
2559 outOfBounds.link(&m_jit);
2563 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2565 // Check that prototype is an object.
2566 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2568 // Initialize scratchReg with the value being checked.
2569 m_jit.move(valueReg, scratchReg);
2571 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2572 MacroAssembler::Label loop(&m_jit);
2573 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2574 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2575 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2577 branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2579 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2582 // No match - result is false.
2584 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2586 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2588 MacroAssembler::Jump putResult = m_jit.jump();
2590 isInstance.link(&m_jit);
2592 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2594 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2597 putResult.link(&m_jit);
2600 void SpeculativeJIT::compileInstanceOf(Node* node)
2602 if (node->child1().useKind() == UntypedUse) {
2603 // It might not be a cell. Speculate less aggressively.
2604 // Or: it might only be used once (i.e. by us), so we get zero benefit
2605 // from speculating any more aggressively than we absolutely need to.
2607 JSValueOperand value(this, node->child1());
2608 SpeculateCellOperand prototype(this, node->child2());
2609 GPRTemporary scratch(this);
2610 GPRTemporary scratch2(this);
2612 GPRReg prototypeReg = prototype.gpr();
2613 GPRReg scratchReg = scratch.gpr();
2614 GPRReg scratch2Reg = scratch2.gpr();
2616 MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2617 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2618 moveFalseTo(scratchReg);
2620 MacroAssembler::Jump done = m_jit.jump();
2622 isCell.link(&m_jit);
2624 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2628 blessedBooleanResult(scratchReg, node);
2632 SpeculateCellOperand value(this, node->child1());
2633 SpeculateCellOperand prototype(this, node->child2());
2635 GPRTemporary scratch(this);
2636 GPRTemporary scratch2(this);
2638 GPRReg valueReg = value.gpr();
2639 GPRReg prototypeReg = prototype.gpr();
2640 GPRReg scratchReg = scratch.gpr();
2641 GPRReg scratch2Reg = scratch2.gpr();
2643 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2645 blessedBooleanResult(scratchReg, node);
2648 void SpeculativeJIT::compileAdd(Node* node)
2650 switch (node->binaryUseKind()) {
2652 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2654 if (node->child1()->isInt32Constant()) {
2655 int32_t imm1 = node->child1()->asInt32();
2656 SpeculateInt32Operand op2(this, node->child2());
2657 GPRTemporary result(this);
2659 if (!shouldCheckOverflow(node->arithMode())) {
2660 m_jit.move(op2.gpr(), result.gpr());
2661 m_jit.add32(Imm32(imm1), result.gpr());
2663 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2665 int32Result(result.gpr(), node);
2669 if (node->child2()->isInt32Constant()) {
2670 SpeculateInt32Operand op1(this, node->child1());
2671 int32_t imm2 = node->child2()->asInt32();
2672 GPRTemporary result(this);
2674 if (!shouldCheckOverflow(node->arithMode())) {
2675 m_jit.move(op1.gpr(), result.gpr());
2676 m_jit.add32(Imm32(imm2), result.gpr());
2678 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2680 int32Result(result.gpr(), node);
2684 SpeculateInt32Operand op1(this, node->child1());
2685 SpeculateInt32Operand op2(this, node->child2());
2686 GPRTemporary result(this, Reuse, op1, op2);
2688 GPRReg gpr1 = op1.gpr();
2689 GPRReg gpr2 = op2.gpr();
2690 GPRReg gprResult = result.gpr();
2692 if (!shouldCheckOverflow(node->arithMode())) {
2693 if (gpr1 == gprResult)
2694 m_jit.add32(gpr2, gprResult);
2696 m_jit.move(gpr2, gprResult);
2697 m_jit.add32(gpr1, gprResult);
2700 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2702 if (gpr1 == gprResult)
2703 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2704 else if (gpr2 == gprResult)
2705 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2707 speculationCheck(Overflow, JSValueRegs(), 0, check);
2710 int32Result(gprResult, node);
2716 ASSERT(shouldCheckOverflow(node->arithMode()));
2717 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2719 // Will we need an overflow check? If we can prove that neither input can be
2720 // Int52 then the overflow check will not be necessary.
2721 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2722 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2723 SpeculateWhicheverInt52Operand op1(this, node->child1());
2724 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2725 GPRTemporary result(this, Reuse, op1);
2726 m_jit.move(op1.gpr(), result.gpr());
2727 m_jit.add64(op2.gpr(), result.gpr());
2728 int52Result(result.gpr(), node, op1.format());
2732 SpeculateInt52Operand op1(this, node->child1());
2733 SpeculateInt52Operand op2(this, node->child2());
2734 GPRTemporary result(this);
2735 m_jit.move(op1.gpr(), result.gpr());
2737 Int52Overflow, JSValueRegs(), 0,
2738 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2739 int52Result(result.gpr(), node);
2742 #endif // USE(JSVALUE64)
2744 case DoubleRepUse: {
2745 SpeculateDoubleOperand op1(this, node->child1());
2746 SpeculateDoubleOperand op2(this, node->child2());
2747 FPRTemporary result(this, op1, op2);
2749 FPRReg reg1 = op1.fpr();
2750 FPRReg reg2 = op2.fpr();
2751 m_jit.addDouble(reg1, reg2, result.fpr());
2753 doubleResult(result.fpr(), node);
2758 RELEASE_ASSERT_NOT_REACHED();
2763 void SpeculativeJIT::compileMakeRope(Node* node)
2765 ASSERT(node->child1().useKind() == KnownStringUse);
2766 ASSERT(node->child2().useKind() == KnownStringUse);
2767 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2769 SpeculateCellOperand op1(this, node->child1());
2770 SpeculateCellOperand op2(this, node->child2());
2771 SpeculateCellOperand op3(this, node->child3());
2772 GPRTemporary result(this);
2773 GPRTemporary allocator(this);
2774 GPRTemporary scratch(this);
2778 opGPRs[0] = op1.gpr();
2779 opGPRs[1] = op2.gpr();
2780 if (node->child3()) {
2781 opGPRs[2] = op3.gpr();
2784 opGPRs[2] = InvalidGPRReg;
2787 GPRReg resultGPR = result.gpr();
2788 GPRReg allocatorGPR = allocator.gpr();
2789 GPRReg scratchGPR = scratch.gpr();
2791 JITCompiler::JumpList slowPath;
2792 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2793 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2794 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2796 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2797 for (unsigned i = 0; i < numOpGPRs; ++i)
2798 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2799 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2800 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2801 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2802 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2803 if (!ASSERT_DISABLED) {
2804 JITCompiler::Jump ok = m_jit.branch32(
2805 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2806 m_jit.abortWithReason(DFGNegativeStringLength);
2809 for (unsigned i = 1; i < numOpGPRs; ++i) {
2810 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2812 Uncountable, JSValueSource(), nullptr,
2814 JITCompiler::Overflow,
2815 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2817 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2818 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2819 if (!ASSERT_DISABLED) {
2820 JITCompiler::Jump ok = m_jit.branch32(
2821 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2822 m_jit.abortWithReason(DFGNegativeStringLength);
2825 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2827 switch (numOpGPRs) {
2829 addSlowPathGenerator(slowPathCall(
2830 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2833 addSlowPathGenerator(slowPathCall(
2834 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2837 RELEASE_ASSERT_NOT_REACHED();
2841 cellResult(resultGPR, node);
2844 void SpeculativeJIT::compileArithSub(Node* node)
2846 switch (node->binaryUseKind()) {
2848 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2850 if (node->child2()->isNumberConstant()) {
2851 SpeculateInt32Operand op1(this, node->child1());
2852 int32_t imm2 = node->child2()->asInt32();
2853 GPRTemporary result(this);
2855 if (!shouldCheckOverflow(node->arithMode())) {
2856 m_jit.move(op1.gpr(), result.gpr());
2857 m_jit.sub32(Imm32(imm2), result.gpr());
2859 GPRTemporary scratch(this);
2860 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2863 int32Result(result.gpr(), node);
2867 if (node->child1()->isNumberConstant()) {
2868 int32_t imm1 = node->child1()->asInt32();
2869 SpeculateInt32Operand op2(this, node->child2());
2870 GPRTemporary result(this);
2872 m_jit.move(Imm32(imm1), result.gpr());
2873 if (!shouldCheckOverflow(node->arithMode()))
2874 m_jit.sub32(op2.gpr(), result.gpr());
2876 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2878 int32Result(result.gpr(), node);
2882 SpeculateInt32Operand op1(this, node->child1());
2883 SpeculateInt32Operand op2(this, node->child2());
2884 GPRTemporary result(this);
2886 if (!shouldCheckOverflow(node->arithMode())) {
2887 m_jit.move(op1.gpr(), result.gpr());
2888 m_jit.sub32(op2.gpr(), result.gpr());
2890 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2892 int32Result(result.gpr(), node);
2898 ASSERT(shouldCheckOverflow(node->arithMode()));
2899 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2901 // Will we need an overflow check? If we can prove that neither input can be
2902 // Int52 then the overflow check will not be necessary.
2903 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2904 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2905 SpeculateWhicheverInt52Operand op1(this, node->child1());
2906 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2907 GPRTemporary result(this, Reuse, op1);
2908 m_jit.move(op1.gpr(), result.gpr());
2909 m_jit.sub64(op2.gpr(), result.gpr());
2910 int52Result(result.gpr(), node, op1.format());
2914 SpeculateInt52Operand op1(this, node->child1());
2915 SpeculateInt52Operand op2(this, node->child2());
2916 GPRTemporary result(this);
2917 m_jit.move(op1.gpr(), result.gpr());
2919 Int52Overflow, JSValueRegs(), 0,
2920 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2921 int52Result(result.gpr(), node);
2924 #endif // USE(JSVALUE64)
2926 case DoubleRepUse: {
2927 SpeculateDoubleOperand op1(this, node->child1());
2928 SpeculateDoubleOperand op2(this, node->child2());
2929 FPRTemporary result(this, op1);
2931 FPRReg reg1 = op1.fpr();
2932 FPRReg reg2 = op2.fpr();
2933 m_jit.subDouble(reg1, reg2, result.fpr());
2935 doubleResult(result.fpr(), node);
2940 RELEASE_ASSERT_NOT_REACHED();
2945 void SpeculativeJIT::compileArithNegate(Node* node)
2947 switch (node->child1().useKind()) {
2949 SpeculateInt32Operand op1(this, node->child1());
2950 GPRTemporary result(this);
2952 m_jit.move(op1.gpr(), result.gpr());
2954 // Note: there is no notion of being not used as a number, but someone
2955 // caring about negative zero.
2957 if (!shouldCheckOverflow(node->arithMode()))
2958 m_jit.neg32(result.gpr());
2959 else if (!shouldCheckNegativeZero(node->arithMode()))
2960 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2962 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2963 m_jit.neg32(result.gpr());
2966 int32Result(result.gpr(), node);
2972 ASSERT(shouldCheckOverflow(node->arithMode()));
2974 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2975 SpeculateWhicheverInt52Operand op1(this, node->child1());
2976 GPRTemporary result(this);
2977 GPRReg op1GPR = op1.gpr();
2978 GPRReg resultGPR = result.gpr();
2979 m_jit.move(op1GPR, resultGPR);
2980 m_jit.neg64(resultGPR);
2981 if (shouldCheckNegativeZero(node->arithMode())) {
2983 NegativeZero, JSValueRegs(), 0,
2984 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2986 int52Result(resultGPR, node, op1.format());
2990 SpeculateInt52Operand op1(this, node->child1());
2991 GPRTemporary result(this);
2992 GPRReg op1GPR = op1.gpr();
2993 GPRReg resultGPR = result.gpr();
2994 m_jit.move(op1GPR, resultGPR);
2996 Int52Overflow, JSValueRegs(), 0,
2997 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2998 if (shouldCheckNegativeZero(node->arithMode())) {
3000 NegativeZero, JSValueRegs(), 0,
3001 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3003 int52Result(resultGPR, node);
3006 #endif // USE(JSVALUE64)
3008 case DoubleRepUse: {
3009 SpeculateDoubleOperand op1(this, node->child1());
3010 FPRTemporary result(this);
3012 m_jit.negateDouble(op1.fpr(), result.fpr());
3014 doubleResult(result.fpr(), node);
3019 RELEASE_ASSERT_NOT_REACHED();
3023 void SpeculativeJIT::compileArithMul(Node* node)
3025 switch (node->binaryUseKind()) {
3027 SpeculateInt32Operand op1(this, node->child1());
3028 SpeculateInt32Operand op2(this, node->child2());
3029 GPRTemporary result(this);
3031 GPRReg reg1 = op1.gpr();
3032 GPRReg reg2 = op2.gpr();
3034 // We can perform truncated multiplications if we get to this point, because if the
3035 // fixup phase could not prove that it would be safe, it would have turned us into
3036 // a double multiplication.
3037 if (!shouldCheckOverflow(node->arithMode())) {
3038 m_jit.move(reg1, result.gpr());
3039 m_jit.mul32(reg2, result.gpr());
3042 Overflow, JSValueRegs(), 0,
3043 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3046 // Check for negative zero, if the users of this node care about such things.
3047 if (shouldCheckNegativeZero(node->arithMode())) {
3048 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3049 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3050 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3051 resultNonZero.link(&m_jit);
3054 int32Result(result.gpr(), node);
3060 ASSERT(shouldCheckOverflow(node->arithMode()));
3062 // This is super clever. We want to do an int52 multiplication and check the
3063 // int52 overflow bit. There is no direct hardware support for this, but we do
3064 // have the ability to do an int64 multiplication and check the int64 overflow
3065 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3066 // registers, with the high 12 bits being sign-extended. We can do:
3070 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3071 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3072 // multiplication overflows is identical to whether the 'a * b' 52-bit
3073 // multiplication overflows.
3075 // In our nomenclature, this is:
3077 // strictInt52(a) * int52(b) => int52
3079 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3082 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3083 // we just do whatever is more convenient for op1 and have op2 do the
3084 // opposite. This ensures that we do at most one shift.
3086 SpeculateWhicheverInt52Operand op1(this, node->child1());
3087 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3088 GPRTemporary result(this);
3090 GPRReg op1GPR = op1.gpr();
3091 GPRReg op2GPR = op2.gpr();
3092 GPRReg resultGPR = result.gpr();
3094 m_jit.move(op1GPR, resultGPR);
3096 Int52Overflow, JSValueRegs(), 0,
3097 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3099 if (shouldCheckNegativeZero(node->arithMode())) {
3100 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3101 MacroAssembler::NonZero, resultGPR);
3103 NegativeZero, JSValueRegs(), 0,
3104 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3106 NegativeZero, JSValueRegs(), 0,
3107 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3108 resultNonZero.link(&m_jit);
3111 int52Result(resultGPR, node);
3114 #endif // USE(JSVALUE64)
3116 case DoubleRepUse: {
3117 SpeculateDoubleOperand op1(this, node->child1());
3118 SpeculateDoubleOperand op2(this, node->child2());
3119 FPRTemporary result(this, op1, op2);
3121 FPRReg reg1 = op1.fpr();
3122 FPRReg reg2 = op2.fpr();
3124 m_jit.mulDouble(reg1, reg2, result.fpr());
3126 doubleResult(result.fpr(), node);
3131 RELEASE_ASSERT_NOT_REACHED();
3136 void SpeculativeJIT::compileArithDiv(Node* node)
3138 switch (node->binaryUseKind()) {
3140 #if CPU(X86) || CPU(X86_64)
3141 SpeculateInt32Operand op1(this, node->child1());
3142 SpeculateInt32Operand op2(this, node->child2());
3143 GPRTemporary eax(this, X86Registers::eax);
3144 GPRTemporary edx(this, X86Registers::edx);
3145 GPRReg op1GPR = op1.gpr();
3146 GPRReg op2GPR = op2.gpr();
3150 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3151 op2TempGPR = allocate();
3154 op2TempGPR = InvalidGPRReg;
3155 if (op1GPR == X86Registers::eax)
3156 temp = X86Registers::edx;
3158 temp = X86Registers::eax;
3161 ASSERT(temp != op1GPR);
3162 ASSERT(temp != op2GPR);
3164 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3166 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3168 JITCompiler::JumpList done;
3169 if (shouldCheckOverflow(node->arithMode())) {
3170 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3171 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3173 // This is the case where we convert the result to an int after we're done, and we
3174 // already know that the denominator is either -1 or 0. So, if the denominator is
3175 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3176 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3177 // are happy to fall through to a normal division, since we're just dividing
3178 // something by negative 1.
3180 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3181 m_jit.move(TrustedImm32(0), eax.gpr());
3182 done.append(m_jit.jump());
3184 notZero.link(&m_jit);
3185 JITCompiler::Jump notNeg2ToThe31 =
3186 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3187 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3188 done.append(m_jit.jump());
3190 notNeg2ToThe31.link(&m_jit);
3193 safeDenominator.link(&m_jit);
3195 // If the user cares about negative zero, then speculate that we're not about
3196 // to produce negative zero.
3197 if (shouldCheckNegativeZero(node->arithMode())) {
3198 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3199 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3200 numeratorNonZero.link(&m_jit);
3203 if (op2TempGPR != InvalidGPRReg) {
3204 m_jit.move(op2GPR, op2TempGPR);
3205 op2GPR = op2TempGPR;
3208 m_jit.move(op1GPR, eax.gpr());
3209 m_jit.assembler().cdq();
3210 m_jit.assembler().idivl_r(op2GPR);
3212 if (op2TempGPR != InvalidGPRReg)
3215 // Check that there was no remainder. If there had been, then we'd be obligated to
3216 // produce a double result instead.
3217 if (shouldCheckOverflow(node->arithMode()))
3218 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3221 int32Result(eax.gpr(), node);
3222 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3223 SpeculateInt32Operand op1(this, node->child1());
3224 SpeculateInt32Operand op2(this, node->child2());
3225 GPRReg op1GPR = op1.gpr();
3226 GPRReg op2GPR = op2.gpr();
3227 GPRTemporary quotient(this);
3228 GPRTemporary multiplyAnswer(this);
3230 // If the user cares about negative zero, then speculate that we're not about
3231 // to produce negative zero.
3232 if (shouldCheckNegativeZero(node->arithMode())) {
3233 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3234 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3235 numeratorNonZero.link(&m_jit);
3238 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3240 // Check that there was no remainder. If there had been, then we'd be obligated to
3241 // produce a double result instead.
3242 if (shouldCheckOverflow(node->arithMode())) {
3243 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3244 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3247 int32Result(quotient.gpr(), node);
3249 RELEASE_ASSERT_NOT_REACHED();
3254 case DoubleRepUse: {
3255 SpeculateDoubleOperand op1(this, node->child1());
3256 SpeculateDoubleOperand op2(this, node->child2());
3257 FPRTemporary result(this, op1);
3259 FPRReg reg1 = op1.fpr();
3260 FPRReg reg2 = op2.fpr();
3261 m_jit.divDouble(reg1, reg2, result.fpr());
3263 doubleResult(result.fpr(), node);
3268 RELEASE_ASSERT_NOT_REACHED();
3273 void SpeculativeJIT::compileArithMod(Node* node)
3275 switch (node->binaryUseKind()) {
3277 // In the fast path, the dividend value could be the final result
3278 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3279 SpeculateStrictInt32Operand op1(this, node->child1());
3281 if (node->child2()->isInt32Constant()) {
3282 int32_t divisor = node->child2()->asInt32();
3283 if (divisor > 1 && hasOneBitSet(divisor)) {
3284 unsigned logarithm = WTF::fastLog2(divisor);
3285 GPRReg dividendGPR = op1.gpr();
3286 GPRTemporary result(this);
3287 GPRReg resultGPR = result.gpr();
3289 // This is what LLVM generates. It's pretty crazy. Here's my
3290 // attempt at understanding it.
3292 // First, compute either divisor - 1, or 0, depending on whether
3293 // the dividend is negative:
3295 // If dividend < 0: resultGPR = divisor - 1
3296 // If dividend >= 0: resultGPR = 0
3297 m_jit.move(dividendGPR, resultGPR);
3298 m_jit.rshift32(TrustedImm32(31), resultGPR);
3299 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3301 // Add in the dividend, so that:
3303 // If dividend < 0: resultGPR = dividend + divisor - 1
3304 // If dividend >= 0: resultGPR = dividend
3305 m_jit.add32(dividendGPR, resultGPR);
3307 // Mask so as to only get the *high* bits. This rounds down
3308 // (towards negative infinity) resultGPR to the nearest multiple
3309 // of divisor, so that:
3311 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3312 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3314 // Note that this can be simplified to:
3316 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3317 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3319 // Note that if the dividend is negative, resultGPR will also be negative.
3320 // Regardless of the sign of dividend, resultGPR will be rounded towards
3321 // zero, because of how things are conditionalized.
3322 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3324 // Subtract resultGPR from dividendGPR, which yields the remainder:
3326 // resultGPR = dividendGPR - resultGPR
3327 m_jit.neg32(resultGPR);
3328 m_jit.add32(dividendGPR, resultGPR);
3330 if (shouldCheckNegativeZero(node->arithMode())) {
3331 // Check that we're not about to create negative zero.
3332 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3333 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3334 numeratorPositive.link(&m_jit);
3337 int32Result(resultGPR, node);
3342 #if CPU(X86) || CPU(X86_64)
3343 if (node->child2()->isInt32Constant()) {
3344 int32_t divisor = node->child2()->asInt32();
3345 if (divisor && divisor != -1) {
3346 GPRReg op1Gpr = op1.gpr();
3348 GPRTemporary eax(this, X86Registers::eax);
3349 GPRTemporary edx(this, X86Registers::edx);
3350 GPRTemporary scratch(this);
3351 GPRReg scratchGPR = scratch.gpr();
3354 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3355 op1SaveGPR = allocate();
3356 ASSERT(op1Gpr != op1SaveGPR);
3357 m_jit.move(op1Gpr, op1SaveGPR);
3359 op1SaveGPR = op1Gpr;
3360 ASSERT(op1SaveGPR != X86Registers::eax);
3361 ASSERT(op1SaveGPR != X86Registers::edx);
3363 m_jit.move(op1Gpr, eax.gpr());
3364 m_jit.move(TrustedImm32(divisor), scratchGPR);
3365 m_jit.assembler().cdq();
3366 m_jit.assembler().idivl_r(scratchGPR);
3367 if (shouldCheckNegativeZero(node->arithMode())) {
3368 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3369 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3370 numeratorPositive.link(&m_jit);
3373 if (op1SaveGPR != op1Gpr)
3376 int32Result(edx.gpr(), node);
3382 SpeculateInt32Operand op2(this, node->child2());
3383 #if CPU(X86) || CPU(X86_64)
3384 GPRTemporary eax(this, X86Registers::eax);
3385 GPRTemporary edx(this, X86Registers::edx);
3386 GPRReg op1GPR = op1.gpr();
3387 GPRReg op2GPR = op2.gpr();
3393 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3394 op2TempGPR = allocate();
3397 op2TempGPR = InvalidGPRReg;
3398 if (op1GPR == X86Registers::eax)
3399 temp = X86Registers::edx;
3401 temp = X86Registers::eax;
3404 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3405 op1SaveGPR = allocate();
3406 ASSERT(op1GPR != op1SaveGPR);
3407 m_jit.move(op1GPR, op1SaveGPR);
3409 op1SaveGPR = op1GPR;
3411 ASSERT(temp != op1GPR);
3412 ASSERT(temp != op2GPR);
3413 ASSERT(op1SaveGPR != X86Registers::eax);
3414 ASSERT(op1SaveGPR != X86Registers::edx);
3416 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3418 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3420 JITCompiler::JumpList done;
3422 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3423 // separate case for that. But it probably doesn't matter so much.
3424 if (shouldCheckOverflow(node->arithMode())) {
3425 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3426 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3428 // This is the case where we convert the result to an int after we're done, and we
3429 // already know that the denominator is either -1 or 0. So, if the denominator is
3430 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3431 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3432 // happy to fall through to a normal division, since we're just dividing something
3435 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3436 m_jit.move(TrustedImm32(0), edx.gpr());
3437 done.append(m_jit.jump());
3439 notZero.link(&m_jit);
3440 JITCompiler::Jump notNeg2ToThe31 =
3441 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3442 m_jit.move(TrustedImm32(0), edx.gpr());
3443 done.append(m_jit.jump());
3445 notNeg2ToThe31.link(&m_jit);
3448 safeDenominator.link(&m_jit);
3450 if (op2TempGPR != InvalidGPRReg) {
3451 m_jit.move(op2GPR, op2TempGPR);
3452 op2GPR = op2TempGPR;
3455 m_jit.move(op1GPR, eax.gpr());
3456 m_jit.assembler().cdq();
3457 m_jit.assembler().idivl_r(op2GPR);
3459 if (op2TempGPR != InvalidGPRReg)
3462 // Check that we're not about to create negative zero.
3463 if (shouldCheckNegativeZero(node->arithMode())) {
3464 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3465 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3466 numeratorPositive.link(&m_jit);
3469 if (op1SaveGPR != op1GPR)
3473 int32Result(edx.gpr(), node);
3475 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3476 GPRTemporary temp(this);
3477 GPRTemporary quotientThenRemainder(this);
3478 GPRTemporary multiplyAnswer(this);
3479 GPRReg dividendGPR = op1.gpr();
3480 GPRReg divisorGPR = op2.gpr();
3481 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3482 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3484 JITCompiler::JumpList done;
3486 if (shouldCheckOverflow(node->arithMode()))
3487 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3489 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3490 m_jit.move(divisorGPR, quotientThenRemainderGPR);
3491 done.append(m_jit.jump());
3492 denominatorNotZero.link(&m_jit);
3495 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3496 // FIXME: It seems like there are cases where we don't need this? What if we have
3497 // arithMode() == Arith::Unchecked?
3498 // https://bugs.webkit.org/show_bug.cgi?id=126444
3499 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3500 #if CPU(APPLE_ARMV7S)
3501 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3503 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3506 // If the user cares about negative zero, then speculate that we're not about
3507 // to produce negative zero.
3508 if (shouldCheckNegativeZero(node->arithMode())) {
3509 // Check that we're not about to create negative zero.
3510 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3511 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3512 numeratorPositive.link(&m_jit);
3517 int32Result(quotientThenRemainderGPR, node);
3518 #else // not architecture that can do integer division
3519 RELEASE_ASSERT_NOT_REACHED();
3524 case DoubleRepUse: {
3525 SpeculateDoubleOperand op1(this, node->child1());
3526 SpeculateDoubleOperand op2(this, node->child2());
3528 FPRReg op1FPR = op1.fpr();
3529 FPRReg op2FPR = op2.fpr();
3533 FPRResult result(this);
3535 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3537 doubleResult(result.fpr(), node);
3542 RELEASE_ASSERT_NOT_REACHED();
3547 // Returns true if the compare is fused with a subsequent branch.
3548 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3550 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3553 if (node->isBinaryUseKind(Int32Use)) {
3554 compileInt32Compare(node, condition);
3559 if (node->isBinaryUseKind(Int52RepUse)) {
3560 compileInt52Compare(node, condition);
3563 #endif // USE(JSVALUE64)
3565 if (node->isBinaryUseKind(DoubleRepUse)) {
3566 compileDoubleCompare(node, doubleCondition);
3570 if (node->op() == CompareEq) {
3571 if (node->isBinaryUseKind(StringUse)) {
3572 compileStringEquality(node);
3576 if (node->isBinaryUseKind(BooleanUse)) {
3577 compileBooleanCompare(node, condition);
3581 if (node->isBinaryUseKind(StringIdentUse)) {
3582 compileStringIdentEquality(node);
3586 if (node->isBinaryUseKind(ObjectUse)) {
3587 compileObjectEquality(node);
3591 if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3592 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3596 if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3597 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3602 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3606 bool SpeculativeJIT::compileStrictEq(Node* node)
3608 if (node->isBinaryUseKind(BooleanUse)) {
3609 unsigned branchIndexInBlock = detectPeepHoleBranch();
3610 if (branchIndexInBlock != UINT_MAX) {
3611 Node* branchNode = m_block->at(branchIndexInBlock);
3612 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3613 use(node->child1());
3614 use(node->child2());
3615 m_indexInBlock = branchIndexInBlock;
3616 m_currentNode = branchNode;
3619 compileBooleanCompare(node, MacroAssembler::Equal);
3623 if (node->isBinaryUseKind(Int32Use)) {
3624 unsigned branchIndexInBlock = detectPeepHoleBranch();
3625 if (branchIndexInBlock != UINT_MAX) {
3626 Node* branchNode = m_block->at(branchIndexInBlock);
3627 compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3628 use(node->child1());
3629 use(node->child2());
3630 m_indexInBlock = branchIndexInBlock;
3631 m_currentNode = branchNode;
3634 compileInt32Compare(node, MacroAssembler::Equal);
3639 if (node->isBinaryUseKind(Int52RepUse)) {
3640 unsigned branchIndexInBlock = detectPeepHoleBranch();
3641 if (branchIndexInBlock != UINT_MAX) {
3642 Node* branchNode = m_block->at(branchIndexInBlock);
3643 compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3644 use(node->child1());
3645 use(node->child2());
3646 m_indexInBlock = branchIndexInBlock;
3647 m_currentNode = branchNode;
3650 compileInt52Compare(node, MacroAssembler::Equal);
3653 #endif // USE(JSVALUE64)
3655 if (node->isBinaryUseKind(DoubleRepUse)) {
3656 unsigned branchIndexInBlock = detectPeepHoleBranch();
3657 if (branchIndexInBlock != UINT_MAX) {
3658 Node* branchNode = m_block->at(branchIndexInBlock);
3659 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3660 use(node->child1());
3661 use(node->child2());
3662 m_indexInBlock = branchIndexInBlock;
3663 m_currentNode = branchNode;
3666 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3670 if (node->isBinaryUseKind(StringUse)) {
3671 compileStringEquality(node);
3675 if (node->isBinaryUseKind(StringIdentUse)) {
3676 compileStringIdentEquality(node);
3680 if (node->isBinaryUseKind(ObjectUse)) {
3681 unsigned branchIndexInBlock = detectPeepHoleBranch();
3682 if (branchIndexInBlock != UINT_MAX) {
3683 Node* branchNode = m_block->at(branchIndexInBlock);
3684 compilePeepHoleObjectEquality(node, branchNode);
3685 use(node->child1());
3686 use(node->child2());
3687 m_indexInBlock = branchIndexInBlock;
3688 m_currentNode = branchNode;
3691 compileObjectEquality(node);
3695 if (node->isBinaryUseKind(MiscUse, UntypedUse)
3696 || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3697 compileMiscStrictEq(node);
3701 if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3702 compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3706 if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3707 compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3711 if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3712 compileStringToUntypedEquality(node, node->child1(), node->child2());
3716 if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3717 compileStringToUntypedEquality(node, node->child2(), node->child1());
3721 RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3722 return nonSpeculativeStrictEq(node);
3725 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3727 SpeculateBooleanOperand op1(this, node->child1());
3728 SpeculateBooleanOperand op2(this, node->child2());
3729 GPRTemporary result(this);
3731 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3733 unblessedBooleanResult(result.gpr(), node);
3736 void SpeculativeJIT::compileStringEquality(
3737 Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3738 GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3739 JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3741 JITCompiler::JumpList trueCase;
3742 JITCompiler::JumpList falseCase;
3743 JITCompiler::JumpList slowCase;
3745 trueCase.append(fastTrue);
3746 falseCase.append(fastFalse);
3748 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3750 falseCase.append(m_jit.branch32(
3751 MacroAssembler::NotEqual,
3752 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3755 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3757 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3758 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3760 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3761 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3763 slowCase.append(m_jit.branchTest32(
3764 MacroAssembler::Zero,
3765 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3766 TrustedImm32(StringImpl::flagIs8Bit())));
3767 slowCase.append(m_jit.branchTest32(
3768 MacroAssembler::Zero,
3769 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3770 TrustedImm32(StringImpl::flagIs8Bit())));
3772 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3773 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3775 MacroAssembler::Label loop = m_jit.label();
3777 m_jit.sub32(TrustedImm32(1), lengthGPR);
3779 // This isn't going to generate the best code on x86. But that's OK, it's still better
3780 // than not inlining.
3781 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3782 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3783 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3785 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3787 trueCase.link(&m_jit);
3788 moveTrueTo(leftTempGPR);
3790 JITCompiler::Jump done = m_jit.jump();
3792 falseCase.link(&m_jit);
3793 moveFalseTo(leftTempGPR);