2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "JSCJSValueInlines.h"
39 #include "LinkBuffer.h"
41 namespace JSC { namespace DFG {
43 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
49 , m_arguments(jit.codeBlock()->numParameters())
50 , m_variables(jit.graph().m_localVars)
51 , m_lastSetOperand(std::numeric_limits<int>::max())
52 , m_state(m_jit.graph())
53 , m_interpreter(m_jit.graph(), m_state)
54 , m_stream(&jit.jitCode()->variableEventStream)
55 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
56 , m_isCheckingArgumentTypes(false)
60 SpeculativeJIT::~SpeculativeJIT()
64 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
66 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
68 GPRTemporary scratch(this);
69 GPRTemporary scratch2(this);
70 GPRReg scratchGPR = scratch.gpr();
71 GPRReg scratch2GPR = scratch2.gpr();
73 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
75 JITCompiler::JumpList slowCases;
78 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
79 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
80 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
82 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
83 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
85 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
87 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
88 for (unsigned i = numElements; i < vectorLength; ++i)
89 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
91 EncodedValueDescriptor value;
92 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
93 for (unsigned i = numElements; i < vectorLength; ++i) {
94 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
95 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
100 // I want a slow path that also loads out the storage pointer, and that's
101 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
102 // of work for a very small piece of functionality. :-/
103 addSlowPathGenerator(adoptPtr(
104 new CallArrayAllocatorSlowPathGenerator(
105 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
106 structure, numElements)));
109 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
113 ASSERT(m_isCheckingArgumentTypes || m_canExit);
114 m_jit.appendExitInfo(jumpToFail);
115 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
118 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
122 ASSERT(m_isCheckingArgumentTypes || m_canExit);
123 m_jit.appendExitInfo(jumpsToFail);
124 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
127 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
131 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
132 if (m_speculationDirection == ForwardSpeculation)
133 convertLastOSRExitToForward();
136 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
138 ASSERT(m_isCheckingArgumentTypes || m_canExit);
139 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
142 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
145 return OSRExitJumpPlaceholder();
146 ASSERT(m_isCheckingArgumentTypes || m_canExit);
147 unsigned index = m_jit.jitCode()->osrExit.size();
148 m_jit.appendExitInfo();
149 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
150 return OSRExitJumpPlaceholder(index);
153 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
155 ASSERT(m_isCheckingArgumentTypes || m_canExit);
156 return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
163 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
164 if (m_speculationDirection == ForwardSpeculation)
165 convertLastOSRExitToForward();
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
170 ASSERT(m_isCheckingArgumentTypes || m_canExit);
171 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
174 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
178 ASSERT(m_isCheckingArgumentTypes || m_canExit);
179 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
180 m_jit.appendExitInfo(jumpToFail);
181 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
184 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
186 ASSERT(m_isCheckingArgumentTypes || m_canExit);
187 backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
190 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
194 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
195 if (m_speculationDirection == ForwardSpeculation)
196 convertLastOSRExitToForward();
199 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
201 speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
204 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
208 ASSERT(m_isCheckingArgumentTypes || m_canExit);
209 m_jit.appendExitInfo(JITCompiler::JumpList());
210 OSRExit& exit = m_jit.jitCode()->osrExit[
211 m_jit.jitCode()->appendOSRExit(OSRExit(
213 m_jit.graph().methodOfGettingAValueProfileFor(node),
214 this, m_stream->size()))];
215 exit.m_watchpointIndex = m_jit.jitCode()->appendWatchpoint(
216 JumpReplacementWatchpoint(m_jit.watchpointLabel()));
217 if (m_speculationDirection == ForwardSpeculation)
218 convertLastOSRExitToForward();
219 return &m_jit.jitCode()->watchpoints[exit.m_watchpointIndex];
222 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
224 return speculationWatchpoint(kind, JSValueSource(), 0);
227 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
229 m_jit.jitCode()->lastOSRExit().convertToForward(
230 m_block, m_currentNode, m_indexInBlock, valueRecovery);
233 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
235 ASSERT(m_isCheckingArgumentTypes || m_canExit);
236 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
237 convertLastOSRExitToForward(valueRecovery);
240 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
242 ASSERT(m_isCheckingArgumentTypes || m_canExit);
243 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
244 convertLastOSRExitToForward(valueRecovery);
247 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
249 ASSERT(m_isCheckingArgumentTypes || m_canExit);
250 #if DFG_ENABLE(DEBUG_VERBOSE)
251 dataLogF("SpeculativeJIT was terminated.\n");
255 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
256 m_compileOkay = false;
259 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
261 ASSERT(m_isCheckingArgumentTypes || m_canExit);
262 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
265 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
267 ASSERT(needsTypeCheck(edge, typesPassedThrough));
268 m_interpreter.filter(edge, typesPassedThrough);
269 backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
272 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
274 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
275 if (m_speculationDirection == ForwardSpeculation)
276 convertLastOSRExitToForward();
279 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
281 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
282 convertLastOSRExitToForward(valueRecovery);
285 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
287 m_slowPathGenerators.append(slowPathGenerator);
290 void SpeculativeJIT::runSlowPathGenerators()
292 #if DFG_ENABLE(DEBUG_VERBOSE)
293 dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
295 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
296 m_slowPathGenerators[i]->generate(this);
299 // On Windows we need to wrap fmod; on other platforms we can call it directly.
300 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
301 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
302 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
307 #define fmodAsDFGOperation fmod
310 void SpeculativeJIT::clearGenerationInfo()
312 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
313 m_generationInfo[i] = GenerationInfo();
314 m_gprs = RegisterBank<GPRInfo>();
315 m_fprs = RegisterBank<FPRInfo>();
318 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
320 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
321 Node* node = info.node();
322 DataFormat registerFormat = info.registerFormat();
323 ASSERT(registerFormat != DataFormatNone);
324 ASSERT(registerFormat != DataFormatDouble);
326 SilentSpillAction spillAction;
327 SilentFillAction fillAction;
329 if (!info.needsSpill())
330 spillAction = DoNothingForSpill;
333 ASSERT(info.gpr() == source);
334 if (registerFormat == DataFormatInt32)
335 spillAction = Store32Payload;
336 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
337 spillAction = StorePtr;
338 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
339 spillAction = Store64;
341 ASSERT(registerFormat & DataFormatJS);
342 spillAction = Store64;
344 #elif USE(JSVALUE32_64)
345 if (registerFormat & DataFormatJS) {
346 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
347 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
349 ASSERT(info.gpr() == source);
350 spillAction = Store32Payload;
355 if (registerFormat == DataFormatInt32) {
356 ASSERT(info.gpr() == source);
357 ASSERT(isJSInt32(info.registerFormat()));
358 if (node->hasConstant()) {
359 ASSERT(isInt32Constant(node));
360 fillAction = SetInt32Constant;
362 fillAction = Load32Payload;
363 } else if (registerFormat == DataFormatBoolean) {
365 RELEASE_ASSERT_NOT_REACHED();
366 fillAction = DoNothingForFill;
367 #elif USE(JSVALUE32_64)
368 ASSERT(info.gpr() == source);
369 if (node->hasConstant()) {
370 ASSERT(isBooleanConstant(node));
371 fillAction = SetBooleanConstant;
373 fillAction = Load32Payload;
375 } else if (registerFormat == DataFormatCell) {
376 ASSERT(info.gpr() == source);
377 if (node->hasConstant()) {
378 JSValue value = valueOfJSConstant(node);
379 ASSERT_UNUSED(value, value.isCell());
380 fillAction = SetCellConstant;
383 fillAction = LoadPtr;
385 fillAction = Load32Payload;
388 } else if (registerFormat == DataFormatStorage) {
389 ASSERT(info.gpr() == source);
390 fillAction = LoadPtr;
391 } else if (registerFormat == DataFormatInt52) {
392 if (node->hasConstant())
393 fillAction = SetInt52Constant;
394 else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
395 fillAction = Load32PayloadConvertToInt52;
396 else if (info.spillFormat() == DataFormatInt52)
398 else if (info.spillFormat() == DataFormatStrictInt52)
399 fillAction = Load64ShiftInt52Left;
400 else if (info.spillFormat() == DataFormatNone)
403 // Should never happen. Anything that qualifies as an int32 will never
404 // be turned into a cell (immediate spec fail) or a double (to-double
405 // conversions involve a separate node).
406 RELEASE_ASSERT_NOT_REACHED();
407 fillAction = Load64; // Make GCC happy.
409 } else if (registerFormat == DataFormatStrictInt52) {
410 if (node->hasConstant())
411 fillAction = SetStrictInt52Constant;
412 else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
413 fillAction = Load32PayloadSignExtend;
414 else if (info.spillFormat() == DataFormatInt52)
415 fillAction = Load64ShiftInt52Right;
416 else if (info.spillFormat() == DataFormatStrictInt52)
418 else if (info.spillFormat() == DataFormatNone)
421 // Should never happen. Anything that qualifies as an int32 will never
422 // be turned into a cell (immediate spec fail) or a double (to-double
423 // conversions involve a separate node).
424 RELEASE_ASSERT_NOT_REACHED();
425 fillAction = Load64; // Make GCC happy.
428 ASSERT(registerFormat & DataFormatJS);
430 ASSERT(info.gpr() == source);
431 if (node->hasConstant()) {
432 if (valueOfJSConstant(node).isCell())
433 fillAction = SetTrustedJSConstant;
434 fillAction = SetJSConstant;
435 } else if (info.spillFormat() == DataFormatInt32) {
436 ASSERT(registerFormat == DataFormatJSInt32);
437 fillAction = Load32PayloadBoxInt;
438 } else if (info.spillFormat() == DataFormatDouble) {
439 ASSERT(registerFormat == DataFormatJSDouble);
440 fillAction = LoadDoubleBoxDouble;
444 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
445 if (node->hasConstant())
446 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
447 else if (info.payloadGPR() == source)
448 fillAction = Load32Payload;
449 else { // Fill the Tag
450 switch (info.spillFormat()) {
451 case DataFormatInt32:
452 ASSERT(registerFormat == DataFormatJSInt32);
453 fillAction = SetInt32Tag;
456 ASSERT(registerFormat == DataFormatJSCell);
457 fillAction = SetCellTag;
459 case DataFormatBoolean:
460 ASSERT(registerFormat == DataFormatJSBoolean);
461 fillAction = SetBooleanTag;
464 fillAction = Load32Tag;
471 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
474 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
476 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
477 Node* node = info.node();
478 ASSERT(info.registerFormat() == DataFormatDouble);
480 SilentSpillAction spillAction;
481 SilentFillAction fillAction;
483 if (!info.needsSpill())
484 spillAction = DoNothingForSpill;
486 ASSERT(!node->hasConstant());
487 ASSERT(info.spillFormat() == DataFormatNone);
488 ASSERT(info.fpr() == source);
489 spillAction = StoreDouble;
493 if (node->hasConstant()) {
494 ASSERT(isNumberConstant(node));
495 fillAction = SetDoubleConstant;
496 } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
497 // it was already spilled previously and not as a double, which means we need unboxing.
498 ASSERT(info.spillFormat() & DataFormatJS);
499 fillAction = LoadJSUnboxDouble;
501 fillAction = LoadDouble;
502 #elif USE(JSVALUE32_64)
503 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
504 if (node->hasConstant()) {
505 ASSERT(isNumberConstant(node));
506 fillAction = SetDoubleConstant;
508 fillAction = LoadDouble;
511 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
514 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
516 switch (plan.spillAction()) {
517 case DoNothingForSpill:
520 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
523 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
526 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
530 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
534 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
537 RELEASE_ASSERT_NOT_REACHED();
541 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
543 #if USE(JSVALUE32_64)
544 UNUSED_PARAM(canTrample);
546 switch (plan.fillAction()) {
547 case DoNothingForFill:
549 case SetInt32Constant:
550 m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
553 case SetInt52Constant:
554 m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
556 case SetStrictInt52Constant:
557 m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
559 #endif // USE(JSVALUE64)
560 case SetBooleanConstant:
561 m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
563 case SetCellConstant:
564 m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
567 case SetTrustedJSConstant:
568 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
571 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
573 case SetDoubleConstant:
574 m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
575 m_jit.move64ToDouble(canTrample, plan.fpr());
577 case Load32PayloadBoxInt:
578 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
579 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
581 case Load32PayloadConvertToInt52:
582 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
583 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
584 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
586 case Load32PayloadSignExtend:
587 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
588 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
590 case LoadDoubleBoxDouble:
591 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
592 m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
594 case LoadJSUnboxDouble:
595 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
596 unboxDouble(canTrample, plan.fpr());
599 case SetJSConstantTag:
600 m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
602 case SetJSConstantPayload:
603 m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
606 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
609 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
612 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
614 case SetDoubleConstant:
615 m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
619 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
622 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
625 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
629 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
631 case Load64ShiftInt52Right:
632 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
633 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
635 case Load64ShiftInt52Left:
636 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
637 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
641 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
644 RELEASE_ASSERT_NOT_REACHED();
648 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
650 switch (arrayMode.arrayClass()) {
651 case Array::OriginalArray: {
653 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
658 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
659 return m_jit.branch32(
660 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
663 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
664 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
668 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
670 JITCompiler::JumpList result;
672 switch (arrayMode.type()) {
674 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
677 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
679 case Array::Contiguous:
680 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
682 case Array::ArrayStorage:
683 case Array::SlowPutArrayStorage: {
684 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
686 if (arrayMode.isJSArray()) {
687 if (arrayMode.isSlowPut()) {
690 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
691 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
692 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
695 MacroAssembler::Above, tempGPR,
696 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
699 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
701 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
704 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
705 if (arrayMode.isSlowPut()) {
706 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
709 MacroAssembler::Above, tempGPR,
710 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
714 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
725 void SpeculativeJIT::checkArray(Node* node)
727 ASSERT(node->arrayMode().isSpecific());
728 ASSERT(!node->arrayMode().doesConversion());
730 SpeculateCellOperand base(this, node->child1());
731 GPRReg baseReg = base.gpr();
733 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
734 noResult(m_currentNode);
738 const ClassInfo* expectedClassInfo = 0;
740 switch (node->arrayMode().type()) {
742 expectedClassInfo = JSString::info();
746 case Array::Contiguous:
747 case Array::ArrayStorage:
748 case Array::SlowPutArrayStorage: {
749 GPRTemporary temp(this);
750 GPRReg tempGPR = temp.gpr();
752 MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
753 m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
755 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
756 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
758 noResult(m_currentNode);
761 case Array::Arguments:
762 expectedClassInfo = Arguments::info();
765 expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
769 RELEASE_ASSERT(expectedClassInfo);
771 GPRTemporary temp(this);
773 MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
775 BadType, JSValueSource::unboxedCell(baseReg), node,
777 MacroAssembler::NotEqual,
778 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
779 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
781 noResult(m_currentNode);
784 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
786 ASSERT(node->arrayMode().doesConversion());
788 GPRTemporary temp(this);
789 GPRTemporary structure;
790 GPRReg tempGPR = temp.gpr();
791 GPRReg structureGPR = InvalidGPRReg;
793 if (node->op() != ArrayifyToStructure) {
794 GPRTemporary realStructure(this);
795 structure.adopt(realStructure);
796 structureGPR = structure.gpr();
799 // We can skip all that comes next if we already have array storage.
800 MacroAssembler::JumpList slowPath;
802 if (node->op() == ArrayifyToStructure) {
803 slowPath.append(m_jit.branchWeakPtr(
804 JITCompiler::NotEqual,
805 JITCompiler::Address(baseReg, JSCell::structureOffset()),
809 MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
812 MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
814 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
817 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
818 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
820 noResult(m_currentNode);
823 void SpeculativeJIT::arrayify(Node* node)
825 ASSERT(node->arrayMode().isSpecific());
827 SpeculateCellOperand base(this, node->child1());
829 if (!node->child2()) {
830 arrayify(node, base.gpr(), InvalidGPRReg);
834 SpeculateInt32Operand property(this, node->child2());
836 arrayify(node, base.gpr(), property.gpr());
839 GPRReg SpeculativeJIT::fillStorage(Edge edge)
841 VirtualRegister virtualRegister = edge->virtualRegister();
842 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
844 switch (info.registerFormat()) {
845 case DataFormatNone: {
846 if (info.spillFormat() == DataFormatStorage) {
847 GPRReg gpr = allocate();
848 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
849 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
850 info.fillStorage(*m_stream, gpr);
854 // Must be a cell; fill it as a cell and then return the pointer.
855 return fillSpeculateCell(edge);
858 case DataFormatStorage: {
859 GPRReg gpr = info.gpr();
865 return fillSpeculateCell(edge);
869 void SpeculativeJIT::useChildren(Node* node)
871 if (node->flags() & NodeHasVarArgs) {
872 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
873 if (!!m_jit.graph().m_varArgChildren[childIdx])
874 use(m_jit.graph().m_varArgChildren[childIdx]);
877 Edge child1 = node->child1();
879 ASSERT(!node->child2() && !node->child3());
884 Edge child2 = node->child2();
886 ASSERT(!node->child3());
891 Edge child3 = node->child3();
898 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
902 UNUSED_PARAM(scratch1);
903 UNUSED_PARAM(scratch2);
904 UNUSED_PARAM(useKind);
905 ASSERT(owner != scratch1);
906 ASSERT(owner != scratch2);
907 ASSERT(scratch1 != scratch2);
909 #if ENABLE(WRITE_BARRIER_PROFILING)
910 JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
914 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
916 UNUSED_PARAM(ownerGPR);
917 UNUSED_PARAM(valueGPR);
918 UNUSED_PARAM(scratch1);
919 UNUSED_PARAM(scratch2);
920 UNUSED_PARAM(useKind);
922 if (isKnownNotCell(valueUse.node()))
925 #if ENABLE(WRITE_BARRIER_PROFILING)
926 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
930 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
932 UNUSED_PARAM(ownerGPR);
934 UNUSED_PARAM(scratch1);
935 UNUSED_PARAM(scratch2);
936 UNUSED_PARAM(useKind);
938 if (Heap::isMarked(value))
941 #if ENABLE(WRITE_BARRIER_PROFILING)
942 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
946 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
949 UNUSED_PARAM(valueGPR);
950 UNUSED_PARAM(scratch);
951 UNUSED_PARAM(useKind);
953 if (isKnownNotCell(valueUse.node()))
956 #if ENABLE(WRITE_BARRIER_PROFILING)
957 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
961 void SpeculativeJIT::compileIn(Node* node)
963 SpeculateCellOperand base(this, node->child2());
964 GPRReg baseGPR = base.gpr();
966 if (isConstant(node->child1().node())) {
968 jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
969 if (string && string->tryGetValueImpl()
970 && string->tryGetValueImpl()->isIdentifier()) {
971 GPRTemporary result(this);
972 GPRReg resultGPR = result.gpr();
976 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
978 OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
979 jump.m_jump, this, operationInOptimize,
980 JSValueRegs::payloadOnly(resultGPR), baseGPR,
981 string->tryGetValueImpl());
983 m_jit.addIn(InRecord(
984 node->codeOrigin, jump, slowPath.get(), safeCast<int8_t>(baseGPR),
985 safeCast<int8_t>(resultGPR), usedRegisters()));
986 addSlowPathGenerator(slowPath.release());
992 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
994 booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1000 JSValueOperand key(this, node->child1());
1001 JSValueRegs regs = key.jsValueRegs();
1003 GPRResult result(this);
1004 GPRReg resultGPR = result.gpr();
1011 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1014 jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
1016 booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1020 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
1022 unsigned branchIndexInBlock = detectPeepHoleBranch();
1023 if (branchIndexInBlock != UINT_MAX) {
1024 Node* branchNode = m_block->at(branchIndexInBlock);
1026 ASSERT(node->adjustedRefCount() == 1);
1028 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1030 m_indexInBlock = branchIndexInBlock;
1031 m_currentNode = branchNode;
1036 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1041 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1043 unsigned branchIndexInBlock = detectPeepHoleBranch();
1044 if (branchIndexInBlock != UINT_MAX) {
1045 Node* branchNode = m_block->at(branchIndexInBlock);
1047 ASSERT(node->adjustedRefCount() == 1);
1049 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1051 m_indexInBlock = branchIndexInBlock;
1052 m_currentNode = branchNode;
1057 nonSpeculativeNonPeepholeStrictEq(node, invert);
1063 static const char* dataFormatString(DataFormat format)
1065 // These values correspond to the DataFormat enum.
1066 const char* strings[] = {
1084 return strings[format];
1087 void SpeculativeJIT::dump(const char* label)
1090 dataLogF("<%s>\n", label);
1092 dataLogF(" gprs:\n");
1094 dataLogF(" fprs:\n");
1096 dataLogF(" VirtualRegisters:\n");
1097 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1098 GenerationInfo& info = m_generationInfo[i];
1100 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1102 dataLogF(" % 3d:[__][__]", i);
1103 if (info.registerFormat() == DataFormatDouble)
1104 dataLogF(":fpr%d\n", info.fpr());
1105 else if (info.registerFormat() != DataFormatNone
1106 #if USE(JSVALUE32_64)
1107 && !(info.registerFormat() & DataFormatJS)
1110 ASSERT(info.gpr() != InvalidGPRReg);
1111 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1116 dataLogF("</%s>\n", label);
1121 #if DFG_ENABLE(CONSISTENCY_CHECK)
1122 void SpeculativeJIT::checkConsistency()
1124 bool failed = false;
1126 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1127 if (iter.isLocked()) {
1128 dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1132 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1133 if (iter.isLocked()) {
1134 dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1139 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1140 VirtualRegister virtualRegister = (VirtualRegister)i;
1141 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1144 switch (info.registerFormat()) {
1145 case DataFormatNone:
1148 case DataFormatJSInt32:
1149 case DataFormatJSDouble:
1150 case DataFormatJSCell:
1151 case DataFormatJSBoolean:
1152 #if USE(JSVALUE32_64)
1155 case DataFormatInt32:
1156 case DataFormatCell:
1157 case DataFormatBoolean:
1158 case DataFormatStorage: {
1159 GPRReg gpr = info.gpr();
1160 ASSERT(gpr != InvalidGPRReg);
1161 if (m_gprs.name(gpr) != virtualRegister) {
1162 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1167 case DataFormatDouble: {
1168 FPRReg fpr = info.fpr();
1169 ASSERT(fpr != InvalidFPRReg);
1170 if (m_fprs.name(fpr) != virtualRegister) {
1171 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1176 case DataFormatOSRMarker:
1177 case DataFormatDead:
1178 case DataFormatArguments:
1179 RELEASE_ASSERT_NOT_REACHED();
1184 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1185 VirtualRegister virtualRegister = iter.name();
1186 if (virtualRegister == InvalidVirtualRegister)
1189 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1191 if (iter.regID() != info.gpr()) {
1192 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1196 if (!(info.registerFormat() & DataFormatJS)) {
1197 if (iter.regID() != info.gpr()) {
1198 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1202 if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1203 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1210 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1211 VirtualRegister virtualRegister = iter.name();
1212 if (virtualRegister == InvalidVirtualRegister)
1215 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1216 if (iter.regID() != info.fpr()) {
1217 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1229 GPRTemporary::GPRTemporary()
1231 , m_gpr(InvalidGPRReg)
1235 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1237 , m_gpr(InvalidGPRReg)
1239 m_gpr = m_jit->allocate();
1242 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1244 , m_gpr(InvalidGPRReg)
1246 m_gpr = m_jit->allocate(specific);
1249 #if USE(JSVALUE32_64)
1250 GPRTemporary::GPRTemporary(
1251 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1253 , m_gpr(InvalidGPRReg)
1255 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1256 m_gpr = m_jit->reuse(op1.gpr(which));
1258 m_gpr = m_jit->allocate();
1260 #endif // USE(JSVALUE32_64)
1262 void GPRTemporary::adopt(GPRTemporary& other)
1265 ASSERT(m_gpr == InvalidGPRReg);
1266 ASSERT(other.m_jit);
1267 ASSERT(other.m_gpr != InvalidGPRReg);
1268 m_jit = other.m_jit;
1269 m_gpr = other.m_gpr;
1271 other.m_gpr = InvalidGPRReg;
1274 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1276 , m_fpr(InvalidFPRReg)
1278 m_fpr = m_jit->fprAllocate();
1281 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1283 , m_fpr(InvalidFPRReg)
1285 if (m_jit->canReuse(op1.node()))
1286 m_fpr = m_jit->reuse(op1.fpr());
1288 m_fpr = m_jit->fprAllocate();
1291 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1293 , m_fpr(InvalidFPRReg)
1295 if (m_jit->canReuse(op1.node()))
1296 m_fpr = m_jit->reuse(op1.fpr());
1297 else if (m_jit->canReuse(op2.node()))
1298 m_fpr = m_jit->reuse(op2.fpr());
1300 m_fpr = m_jit->fprAllocate();
1303 #if USE(JSVALUE32_64)
1304 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1306 , m_fpr(InvalidFPRReg)
1308 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1309 m_fpr = m_jit->reuse(op1.fpr());
1311 m_fpr = m_jit->fprAllocate();
1315 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1317 BasicBlock* taken = branchNode->takenBlock();
1318 BasicBlock* notTaken = branchNode->notTakenBlock();
1320 SpeculateDoubleOperand op1(this, node->child1());
1321 SpeculateDoubleOperand op2(this, node->child2());
1323 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1327 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1329 BasicBlock* taken = branchNode->takenBlock();
1330 BasicBlock* notTaken = branchNode->notTakenBlock();
1332 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1334 if (taken == nextBlock()) {
1335 condition = MacroAssembler::NotEqual;
1336 BasicBlock* tmp = taken;
1341 SpeculateCellOperand op1(this, node->child1());
1342 SpeculateCellOperand op2(this, node->child2());
1344 GPRReg op1GPR = op1.gpr();
1345 GPRReg op2GPR = op2.gpr();
1347 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1348 speculationWatchpointForMasqueradesAsUndefined();
1350 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1352 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1354 MacroAssembler::Equal,
1355 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1356 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1358 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1360 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1362 MacroAssembler::Equal,
1363 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1364 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1367 GPRTemporary structure(this);
1368 GPRReg structureGPR = structure.gpr();
1370 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1371 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1373 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1375 MacroAssembler::Equal,
1377 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1379 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1381 MacroAssembler::NonZero,
1382 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1383 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1385 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1386 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1388 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1390 MacroAssembler::Equal,
1392 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1394 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1396 MacroAssembler::NonZero,
1397 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1398 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1401 branchPtr(condition, op1GPR, op2GPR, taken);
1405 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1407 BasicBlock* taken = branchNode->takenBlock();
1408 BasicBlock* notTaken = branchNode->notTakenBlock();
1410 // The branch instruction will branch to the taken block.
1411 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1412 if (taken == nextBlock()) {
1413 condition = JITCompiler::invert(condition);
1414 BasicBlock* tmp = taken;
1419 if (isBooleanConstant(node->child1().node())) {
1420 bool imm = valueOfBooleanConstant(node->child1().node());
1421 SpeculateBooleanOperand op2(this, node->child2());
1422 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1423 } else if (isBooleanConstant(node->child2().node())) {
1424 SpeculateBooleanOperand op1(this, node->child1());
1425 bool imm = valueOfBooleanConstant(node->child2().node());
1426 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1428 SpeculateBooleanOperand op1(this, node->child1());
1429 SpeculateBooleanOperand op2(this, node->child2());
1430 branch32(condition, op1.gpr(), op2.gpr(), taken);
1436 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1438 BasicBlock* taken = branchNode->takenBlock();
1439 BasicBlock* notTaken = branchNode->notTakenBlock();
1441 // The branch instruction will branch to the taken block.
1442 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1443 if (taken == nextBlock()) {
1444 condition = JITCompiler::invert(condition);
1445 BasicBlock* tmp = taken;
1450 if (isInt32Constant(node->child1().node())) {
1451 int32_t imm = valueOfInt32Constant(node->child1().node());
1452 SpeculateInt32Operand op2(this, node->child2());
1453 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1454 } else if (isInt32Constant(node->child2().node())) {
1455 SpeculateInt32Operand op1(this, node->child1());
1456 int32_t imm = valueOfInt32Constant(node->child2().node());
1457 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1459 SpeculateInt32Operand op1(this, node->child1());
1460 SpeculateInt32Operand op2(this, node->child2());
1461 branch32(condition, op1.gpr(), op2.gpr(), taken);
1467 // Returns true if the compare is fused with a subsequent branch.
1468 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1470 // Fused compare & branch.
1471 unsigned branchIndexInBlock = detectPeepHoleBranch();
1472 if (branchIndexInBlock != UINT_MAX) {
1473 Node* branchNode = m_block->at(branchIndexInBlock);
1475 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1476 // so can be no intervening nodes to also reference the compare.
1477 ASSERT(node->adjustedRefCount() == 1);
1479 if (node->isBinaryUseKind(Int32Use))
1480 compilePeepHoleInt32Branch(node, branchNode, condition);
1482 else if (node->isBinaryUseKind(MachineIntUse))
1483 compilePeepHoleInt52Branch(node, branchNode, condition);
1484 #endif // USE(JSVALUE64)
1485 else if (node->isBinaryUseKind(NumberUse))
1486 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1487 else if (node->op() == CompareEq) {
1488 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1489 // Use non-peephole comparison, for now.
1492 if (node->isBinaryUseKind(BooleanUse))
1493 compilePeepHoleBooleanBranch(node, branchNode, condition);
1494 else if (node->isBinaryUseKind(ObjectUse))
1495 compilePeepHoleObjectEquality(node, branchNode);
1496 else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1497 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1498 else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1499 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1501 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1505 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1509 use(node->child1());
1510 use(node->child2());
1511 m_indexInBlock = branchIndexInBlock;
1512 m_currentNode = branchNode;
1518 void SpeculativeJIT::noticeOSRBirth(Node* node)
1520 if (!node->hasVirtualRegister())
1523 VirtualRegister virtualRegister = node->virtualRegister();
1524 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1526 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1529 void SpeculativeJIT::compileMovHint(Node* node)
1531 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1533 m_lastSetOperand = node->local();
1535 Node* child = node->child1().node();
1536 noticeOSRBirth(child);
1538 if (child->op() == UInt32ToNumber)
1539 noticeOSRBirth(child->child1().node());
1541 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1544 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1546 compileMovHint(node);
1547 speculate(node, node->child1());
1551 void SpeculativeJIT::compileInlineStart(Node* node)
1553 InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1554 int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1555 unsigned argumentPositionStart = node->argumentPositionStart();
1556 CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1557 for (int i = 0; i < argumentCountIncludingThis; ++i) {
1558 ValueRecovery recovery;
1559 if (codeBlock->isCaptured(argumentToOperand(i)))
1560 recovery = ValueRecovery::alreadyInJSStack();
1562 ArgumentPosition& argumentPosition =
1563 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1564 ValueSource valueSource;
1565 switch (argumentPosition.flushFormat()) {
1567 case FlushedJSValue:
1568 valueSource = ValueSource(ValueInJSStack);
1571 valueSource = ValueSource(DoubleInJSStack);
1574 valueSource = ValueSource(Int32InJSStack);
1577 valueSource = ValueSource(Int52InJSStack);
1580 valueSource = ValueSource(CellInJSStack);
1582 case FlushedBoolean:
1583 valueSource = ValueSource(BooleanInJSStack);
1586 recovery = computeValueRecoveryFor(valueSource);
1588 // The recovery should refer either to something that has already been
1589 // stored into the stack at the right place, or to a constant,
1590 // since the Arguments code isn't smart enough to handle anything else.
1591 // The exception is the this argument, which we don't really need to be
1593 #if DFG_ENABLE(DEBUG_VERBOSE)
1594 dataLogF("\nRecovery for argument %d: ", i);
1595 recovery.dump(WTF::dataFile());
1597 inlineCallFrame->arguments[i] = recovery;
1601 void SpeculativeJIT::bail()
1603 m_compileOkay = true;
1605 clearGenerationInfo();
1608 void SpeculativeJIT::compileCurrentBlock()
1610 ASSERT(m_compileOkay);
1615 ASSERT(m_block->isReachable);
1617 if (!m_block->cfaHasVisited) {
1618 // Don't generate code for basic blocks that are unreachable according to CFA.
1619 // But to be sure that nobody has generated a jump to this block, drop in a
1625 m_jit.blockHeads()[m_block->index] = m_jit.label();
1626 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1630 #if DFG_ENABLE(DEBUG_VERBOSE)
1631 dataLog("Setting up state for block ", *m_block, ": ");
1634 m_stream->appendAndLog(VariableEvent::reset());
1636 m_jit.jitAssertHasValidCallFrame();
1638 ASSERT(m_arguments.size() == m_block->variablesAtHead.numberOfArguments());
1639 for (size_t i = 0; i < m_arguments.size(); ++i) {
1640 ValueSource valueSource = ValueSource(ValueInJSStack);
1641 m_arguments[i] = valueSource;
1642 m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
1646 m_state.beginBasicBlock(m_block);
1648 ASSERT(m_variables.size() == m_block->variablesAtHead.numberOfLocals());
1649 for (size_t i = 0; i < m_variables.size(); ++i) {
1650 Node* node = m_block->variablesAtHead.local(i);
1651 ValueSource valueSource;
1653 valueSource = ValueSource(SourceIsDead);
1654 else if (node->variableAccessData()->isArgumentsAlias())
1655 valueSource = ValueSource(ArgumentsSource);
1656 else if (!node->refCount())
1657 valueSource = ValueSource(SourceIsDead);
1659 valueSource = ValueSource::forFlushFormat(node->variableAccessData()->flushFormat());
1660 m_variables[i] = valueSource;
1661 // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1662 m_stream->appendAndLog(VariableEvent::setLocal(localToOperand(i), valueSource.dataFormat()));
1665 m_lastSetOperand = std::numeric_limits<int>::max();
1666 m_codeOriginForExitTarget = CodeOrigin();
1667 m_codeOriginForExitProfile = CodeOrigin();
1669 #if DFG_ENABLE(DEBUG_VERBOSE)
1673 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1674 m_currentNode = m_block->at(m_indexInBlock);
1676 // We may have his a contradiction that the CFA was aware of but that the JIT
1677 // didn't cause directly.
1678 if (!m_state.isValid()) {
1683 m_canExit = m_currentNode->canExit();
1684 bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1685 m_jit.setForNode(m_currentNode);
1686 m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
1687 m_codeOriginForExitProfile = m_currentNode->codeOrigin;
1688 if (!m_currentNode->shouldGenerate()) {
1689 #if DFG_ENABLE(DEBUG_VERBOSE)
1690 dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1692 switch (m_currentNode->op()) {
1694 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1697 case WeakJSConstant:
1698 m_jit.addWeakReference(m_currentNode->weakConstant());
1699 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1703 RELEASE_ASSERT_NOT_REACHED();
1707 compileMovHint(m_currentNode);
1711 m_lastSetOperand = m_currentNode->local();
1712 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1717 if (belongsInMinifiedGraph(m_currentNode->op()))
1718 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1723 if (verboseCompilationEnabled()) {
1725 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1726 (int)m_currentNode->index(),
1727 m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1728 #if DFG_ENABLE(DEBUG_VERBOSE)
1734 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1737 #if DFG_ENABLE(XOR_DEBUG_AID)
1738 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1739 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1743 m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1745 compile(m_currentNode);
1746 if (!m_compileOkay) {
1751 if (belongsInMinifiedGraph(m_currentNode->op())) {
1752 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1753 noticeOSRBirth(m_currentNode);
1756 #if DFG_ENABLE(DEBUG_VERBOSE)
1757 if (m_currentNode->hasResult()) {
1758 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1759 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1760 if (info.registerFormat() != DataFormatNone) {
1761 if (info.registerFormat() == DataFormatDouble)
1762 dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1763 #if USE(JSVALUE32_64)
1764 else if (info.registerFormat() & DataFormatJS)
1765 dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1768 dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1776 #if DFG_ENABLE(DEBUG_VERBOSE)
1780 // Make sure that the abstract state is rematerialized for the next node.
1781 if (shouldExecuteEffects)
1782 m_interpreter.executeEffects(m_indexInBlock);
1784 if (m_currentNode->shouldGenerate())
1788 // Perform the most basic verification that children have been used correctly.
1789 #if !ASSERT_DISABLED
1790 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1791 GenerationInfo& info = m_generationInfo[index];
1792 ASSERT(!info.alive());
1797 // If we are making type predictions about our arguments then
1798 // we need to check that they are correct on function entry.
1799 void SpeculativeJIT::checkArgumentTypes()
1801 ASSERT(!m_currentNode);
1802 m_isCheckingArgumentTypes = true;
1803 m_speculationDirection = BackwardSpeculation;
1804 m_codeOriginForExitTarget = CodeOrigin(0);
1805 m_codeOriginForExitProfile = CodeOrigin(0);
1807 for (size_t i = 0; i < m_arguments.size(); ++i)
1808 m_arguments[i] = ValueSource(ValueInJSStack);
1809 for (size_t i = 0; i < m_variables.size(); ++i)
1810 m_variables[i] = ValueSource(ValueInJSStack);
1812 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1813 Node* node = m_jit.graph().m_arguments[i];
1814 ASSERT(node->op() == SetArgument);
1815 if (!node->shouldGenerate()) {
1816 // The argument is dead. We don't do any checks for such arguments.
1820 VariableAccessData* variableAccessData = node->variableAccessData();
1821 FlushFormat format = variableAccessData->flushFormat();
1823 if (format == FlushedJSValue)
1826 VirtualRegister virtualRegister = variableAccessData->local();
1828 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1832 case FlushedInt32: {
1833 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1836 case FlushedBoolean: {
1837 GPRTemporary temp(this);
1838 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1839 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1840 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1844 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1848 RELEASE_ASSERT_NOT_REACHED();
1853 case FlushedInt32: {
1854 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1857 case FlushedBoolean: {
1858 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1862 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1866 RELEASE_ASSERT_NOT_REACHED();
1871 m_isCheckingArgumentTypes = false;
1874 bool SpeculativeJIT::compile()
1876 checkArgumentTypes();
1878 ASSERT(!m_currentNode);
1879 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1880 m_jit.setForBlockIndex(blockIndex);
1881 m_block = m_jit.graph().block(blockIndex);
1882 compileCurrentBlock();
1888 void SpeculativeJIT::createOSREntries()
1890 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1891 BasicBlock* block = m_jit.graph().block(blockIndex);
1894 if (!block->isOSRTarget)
1897 // Currently we don't have OSR entry trampolines. We could add them
1899 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1903 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1905 unsigned osrEntryIndex = 0;
1906 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1907 BasicBlock* block = m_jit.graph().block(blockIndex);
1910 if (!block->isOSRTarget)
1912 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1914 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1917 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1919 if (valueSource.isInJSStack())
1920 return valueSource.valueRecovery();
1922 ASSERT(valueSource.kind() == HaveNode);
1923 Node* node = valueSource.id().node(m_jit.graph());
1924 if (isConstant(node))
1925 return ValueRecovery::constant(valueOfJSConstant(node));
1927 return ValueRecovery();
1930 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1932 Edge child3 = m_jit.graph().varArgChild(node, 2);
1933 Edge child4 = m_jit.graph().varArgChild(node, 3);
1935 ArrayMode arrayMode = node->arrayMode();
1937 GPRReg baseReg = base.gpr();
1938 GPRReg propertyReg = property.gpr();
1940 SpeculateDoubleOperand value(this, child3);
1942 FPRReg valueReg = value.fpr();
1945 JSValueRegs(), child3, SpecFullRealNumber,
1947 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1952 StorageOperand storage(this, child4);
1953 GPRReg storageReg = storage.gpr();
1955 if (node->op() == PutByValAlias) {
1956 // Store the value to the array.
1957 GPRReg propertyReg = property.gpr();
1958 FPRReg valueReg = value.fpr();
1959 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1961 noResult(m_currentNode);
1965 GPRTemporary temporary;
1966 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1968 MacroAssembler::Jump slowCase;
1970 if (arrayMode.isInBounds()) {
1972 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
1973 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1975 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1977 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1979 if (!arrayMode.isOutOfBounds())
1980 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1982 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1983 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1985 inBounds.link(&m_jit);
1988 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1995 if (arrayMode.isOutOfBounds()) {
1996 addSlowPathGenerator(
1999 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2000 NoResult, baseReg, propertyReg, valueReg));
2003 noResult(m_currentNode, UseChildrenCalledExplicitly);
2006 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2008 SpeculateCellOperand string(this, node->child1());
2009 SpeculateStrictInt32Operand index(this, node->child2());
2010 StorageOperand storage(this, node->child3());
2012 GPRReg stringReg = string.gpr();
2013 GPRReg indexReg = index.gpr();
2014 GPRReg storageReg = storage.gpr();
2016 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2018 // unsigned comparison so we can filter out negative indices and indices that are too large
2019 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2021 GPRTemporary scratch(this);
2022 GPRReg scratchReg = scratch.gpr();
2024 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2026 // Load the character into scratchReg
2027 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2029 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2030 JITCompiler::Jump cont8Bit = m_jit.jump();
2032 is16Bit.link(&m_jit);
2034 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2036 cont8Bit.link(&m_jit);
2038 int32Result(scratchReg, m_currentNode);
2041 void SpeculativeJIT::compileGetByValOnString(Node* node)
2043 SpeculateCellOperand base(this, node->child1());
2044 SpeculateStrictInt32Operand property(this, node->child2());
2045 StorageOperand storage(this, node->child3());
2046 GPRReg baseReg = base.gpr();
2047 GPRReg propertyReg = property.gpr();
2048 GPRReg storageReg = storage.gpr();
2050 GPRTemporary scratch(this);
2051 GPRReg scratchReg = scratch.gpr();
2052 #if USE(JSVALUE32_64)
2053 GPRTemporary resultTag;
2054 GPRReg resultTagReg = InvalidGPRReg;
2055 if (node->arrayMode().isOutOfBounds()) {
2056 GPRTemporary realResultTag(this);
2057 resultTag.adopt(realResultTag);
2058 resultTagReg = resultTag.gpr();
2062 if (node->arrayMode().isOutOfBounds()) {
2063 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2064 if (globalObject->stringPrototypeChainIsSane()) {
2066 speculationWatchpoint(),
2067 globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2069 speculationWatchpoint(),
2070 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2074 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2076 // unsigned comparison so we can filter out negative indices and indices that are too large
2077 JITCompiler::Jump outOfBounds = m_jit.branch32(
2078 MacroAssembler::AboveOrEqual, propertyReg,
2079 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2080 if (node->arrayMode().isInBounds())
2081 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2083 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2085 // Load the character into scratchReg
2086 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2088 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2089 JITCompiler::Jump cont8Bit = m_jit.jump();
2091 is16Bit.link(&m_jit);
2093 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2095 JITCompiler::Jump bigCharacter =
2096 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2098 // 8 bit string values don't need the isASCII check.
2099 cont8Bit.link(&m_jit);
2102 // Don't have enough register, construct our own indexed address and load.
2103 m_jit.lshift32(MacroAssembler::TrustedImm32(2), scratchReg);
2104 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2105 m_jit.loadPtr(scratchReg, scratchReg);
2107 GPRTemporary smallStrings(this);
2108 GPRReg smallStringsReg = smallStrings.gpr();
2109 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2110 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2113 addSlowPathGenerator(
2115 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2117 if (node->arrayMode().isOutOfBounds()) {
2118 #if USE(JSVALUE32_64)
2119 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2122 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2123 if (globalObject->stringPrototypeChainIsSane()) {
2125 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2126 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
2128 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2129 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2130 baseReg, propertyReg)));
2134 addSlowPathGenerator(
2136 outOfBounds, this, operationGetByValStringInt,
2137 scratchReg, baseReg, propertyReg));
2139 addSlowPathGenerator(
2141 outOfBounds, this, operationGetByValStringInt,
2142 resultTagReg, scratchReg, baseReg, propertyReg));
2147 jsValueResult(scratchReg, m_currentNode);
2149 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2152 cellResult(scratchReg, m_currentNode);
2155 void SpeculativeJIT::compileFromCharCode(Node* node)
2157 SpeculateStrictInt32Operand property(this, node->child1());
2158 GPRReg propertyReg = property.gpr();
2159 GPRTemporary smallStrings(this);
2160 GPRTemporary scratch(this);
2161 GPRReg scratchReg = scratch.gpr();
2162 GPRReg smallStringsReg = smallStrings.gpr();
2164 JITCompiler::JumpList slowCases;
2165 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2166 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2167 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2169 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2170 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2171 cellResult(scratchReg, m_currentNode);
2174 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2176 #if DFG_ENABLE(DEBUG_VERBOSE)
2177 dataLogF("checkGeneratedTypeForToInt32@%d ", node->index());
2179 VirtualRegister virtualRegister = node->virtualRegister();
2180 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2182 switch (info.registerFormat()) {
2183 case DataFormatStorage:
2184 RELEASE_ASSERT_NOT_REACHED();
2186 case DataFormatBoolean:
2187 case DataFormatCell:
2188 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2189 return GeneratedOperandTypeUnknown;
2191 case DataFormatNone:
2192 case DataFormatJSCell:
2194 case DataFormatJSBoolean:
2195 return GeneratedOperandJSValue;
2197 case DataFormatJSInt32:
2198 case DataFormatInt32:
2199 return GeneratedOperandInteger;
2201 case DataFormatJSDouble:
2202 case DataFormatDouble:
2203 return GeneratedOperandDouble;
2206 RELEASE_ASSERT_NOT_REACHED();
2207 return GeneratedOperandTypeUnknown;
2211 void SpeculativeJIT::compileValueToInt32(Node* node)
2213 switch (node->child1().useKind()) {
2215 SpeculateInt32Operand op1(this, node->child1());
2216 GPRTemporary result(this, Reuse, op1);
2217 m_jit.move(op1.gpr(), result.gpr());
2218 int32Result(result.gpr(), node, op1.format());
2223 case MachineIntUse: {
2224 SpeculateStrictInt52Operand op1(this, node->child1());
2225 GPRTemporary result(this, Reuse, op1);
2226 GPRReg op1GPR = op1.gpr();
2227 GPRReg resultGPR = result.gpr();
2228 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2229 int32Result(resultGPR, node, DataFormatInt32);
2232 #endif // USE(JSVALUE64)
2236 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2237 case GeneratedOperandInteger: {
2238 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2239 GPRTemporary result(this, Reuse, op1);
2240 m_jit.move(op1.gpr(), result.gpr());
2241 int32Result(result.gpr(), node, op1.format());
2244 case GeneratedOperandDouble: {
2245 GPRTemporary result(this);
2246 SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2247 FPRReg fpr = op1.fpr();
2248 GPRReg gpr = result.gpr();
2249 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2251 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2253 int32Result(gpr, node);
2256 case GeneratedOperandJSValue: {
2257 GPRTemporary result(this);
2259 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2261 GPRReg gpr = op1.gpr();
2262 GPRReg resultGpr = result.gpr();
2263 FPRTemporary tempFpr(this);
2264 FPRReg fpr = tempFpr.fpr();
2266 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2267 JITCompiler::JumpList converted;
2269 if (node->child1().useKind() == NumberUse) {
2271 JSValueRegs(gpr), node->child1(), SpecFullNumber,
2273 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2275 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2278 JSValueRegs(gpr), node->child1(), ~SpecCell,
2280 JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2282 // It's not a cell: so true turns into 1 and all else turns into 0.
2283 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2284 converted.append(m_jit.jump());
2286 isNumber.link(&m_jit);
2289 // First, if we get here we have a double encoded as a JSValue
2290 m_jit.move(gpr, resultGpr);
2291 unboxDouble(resultGpr, fpr);
2293 silentSpillAllRegisters(resultGpr);
2294 callOperation(toInt32, resultGpr, fpr);
2295 silentFillAllRegisters(resultGpr);
2297 converted.append(m_jit.jump());
2299 isInteger.link(&m_jit);
2300 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2302 converted.link(&m_jit);
2304 Node* childNode = node->child1().node();
2305 VirtualRegister virtualRegister = childNode->virtualRegister();
2306 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2308 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2310 GPRReg payloadGPR = op1.payloadGPR();
2311 GPRReg resultGpr = result.gpr();
2313 JITCompiler::JumpList converted;
2315 if (info.registerFormat() == DataFormatJSInt32)
2316 m_jit.move(payloadGPR, resultGpr);
2318 GPRReg tagGPR = op1.tagGPR();
2319 FPRTemporary tempFpr(this);
2320 FPRReg fpr = tempFpr.fpr();
2321 FPRTemporary scratch(this);
2323 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2325 if (node->child1().useKind() == NumberUse) {
2327 JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber,
2329 MacroAssembler::AboveOrEqual, tagGPR,
2330 TrustedImm32(JSValue::LowestTag)));
2332 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2335 JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2337 JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2339 // It's not a cell: so true turns into 1 and all else turns into 0.
2340 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2341 m_jit.move(TrustedImm32(0), resultGpr);
2342 converted.append(m_jit.jump());
2344 isBoolean.link(&m_jit);
2345 m_jit.move(payloadGPR, resultGpr);
2346 converted.append(m_jit.jump());
2348 isNumber.link(&m_jit);
2351 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2353 silentSpillAllRegisters(resultGpr);
2354 callOperation(toInt32, resultGpr, fpr);
2355 silentFillAllRegisters(resultGpr);
2357 converted.append(m_jit.jump());
2359 isInteger.link(&m_jit);
2360 m_jit.move(payloadGPR, resultGpr);
2362 converted.link(&m_jit);
2365 int32Result(resultGpr, node);
2368 case GeneratedOperandTypeUnknown:
2369 RELEASE_ASSERT(!m_compileOkay);
2372 RELEASE_ASSERT_NOT_REACHED();
2377 SpeculateBooleanOperand op1(this, node->child1());
2378 GPRTemporary result(this, Reuse, op1);
2380 m_jit.move(op1.gpr(), result.gpr());
2381 m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2383 int32Result(result.gpr(), node);
2388 ASSERT(!m_compileOkay);
2393 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2395 if (!nodeCanSpeculateInt32(node->arithNodeFlags())) {
2396 // We know that this sometimes produces doubles. So produce a double every
2397 // time. This at least allows subsequent code to not have weird conditionals.
2399 SpeculateInt32Operand op1(this, node->child1());
2400 FPRTemporary result(this);
2402 GPRReg inputGPR = op1.gpr();
2403 FPRReg outputFPR = result.fpr();
2405 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2407 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2408 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2409 positive.link(&m_jit);
2411 doubleResult(outputFPR, node);
2415 SpeculateInt32Operand op1(this, node->child1());
2416 GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2418 m_jit.move(op1.gpr(), result.gpr());
2420 // Test the operand is positive. This is a very special speculation check - we actually
2421 // use roll-forward speculation here, where if this fails, we jump to the baseline
2422 // instruction that follows us, rather than the one we're executing right now. We have
2423 // to do this because by this point, the original values necessary to compile whatever
2424 // operation the UInt32ToNumber originated from might be dead.
2425 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2427 int32Result(result.gpr(), node, op1.format());
2430 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2432 SpeculateDoubleOperand op1(this, node->child1());
2433 FPRTemporary scratch(this);
2434 GPRTemporary result(this);
2436 FPRReg valueFPR = op1.fpr();
2437 FPRReg scratchFPR = scratch.fpr();
2438 GPRReg resultGPR = result.gpr();
2440 JITCompiler::JumpList failureCases;
2441 bool negZeroCheck = !bytecodeCanIgnoreNegativeZero(node->arithNodeFlags());
2442 m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2443 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2445 int32Result(resultGPR, node);
2448 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2450 ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2452 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2453 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2454 FPRTemporary result(this);
2455 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2456 doubleResult(result.fpr(), node);
2460 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2461 FPRTemporary result(this);
2464 GPRTemporary temp(this);
2466 GPRReg op1GPR = op1.gpr();
2467 GPRReg tempGPR = temp.gpr();
2468 FPRReg resultFPR = result.fpr();
2470 JITCompiler::Jump isInteger = m_jit.branch64(
2471 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2473 if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2474 if (node->flags() & NodeExitsForward) {
2476 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2477 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2478 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2481 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2482 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2486 m_jit.move(op1GPR, tempGPR);
2487 unboxDouble(tempGPR, resultFPR);
2488 JITCompiler::Jump done = m_jit.jump();
2490 isInteger.link(&m_jit);
2491 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2494 FPRTemporary temp(this);
2496 GPRReg op1TagGPR = op1.tagGPR();
2497 GPRReg op1PayloadGPR = op1.payloadGPR();
2498 FPRReg tempFPR = temp.fpr();
2499 FPRReg resultFPR = result.fpr();
2501 JITCompiler::Jump isInteger = m_jit.branch32(
2502 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2504 if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2505 if (node->flags() & NodeExitsForward) {
2507 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2508 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2509 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2512 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2513 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2517 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2518 JITCompiler::Jump done = m_jit.jump();
2520 isInteger.link(&m_jit);
2521 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2525 doubleResult(resultFPR, node);
2528 static double clampDoubleToByte(double d)
2538 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2540 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2541 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2542 jit.xorPtr(result, result);
2543 MacroAssembler::Jump clamped = jit.jump();
2545 jit.move(JITCompiler::TrustedImm32(255), result);
2547 inBounds.link(&jit);
2550 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2552 // Unordered compare so we pick up NaN
2553 static const double zero = 0;
2554 static const double byteMax = 255;
2555 static const double half = 0.5;
2556 jit.loadDouble(&zero, scratch);
2557 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2558 jit.loadDouble(&byteMax, scratch);
2559 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2561 jit.loadDouble(&half, scratch);
2562 // FIXME: This should probably just use a floating point round!
2563 // https://bugs.webkit.org/show_bug.cgi?id=72054
2564 jit.addDouble(source, scratch);
2565 jit.truncateDoubleToInt32(scratch, result);
2566 MacroAssembler::Jump truncatedInt = jit.jump();
2568 tooSmall.link(&jit);
2569 jit.xorPtr(result, result);
2570 MacroAssembler::Jump zeroed = jit.jump();
2573 jit.move(JITCompiler::TrustedImm32(255), result);
2575 truncatedInt.link(&jit);
2580 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2582 ASSERT(isInt(type));
2584 SpeculateCellOperand base(this, node->child1());
2585 SpeculateStrictInt32Operand property(this, node->child2());
2586 StorageOperand storage(this, node->child3());
2588 GPRReg baseReg = base.gpr();
2589 GPRReg propertyReg = property.gpr();
2590 GPRReg storageReg = storage.gpr();
2592 GPRTemporary result(this);
2593 GPRReg resultReg = result.gpr();
2595 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2598 Uncountable, JSValueRegs(), 0,
2600 MacroAssembler::AboveOrEqual, propertyReg,
2601 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2602 switch (elementSize(type)) {
2605 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2607 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2611 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2613 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2616 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2621 if (elementSize(type) < 4 || isSigned(type)) {
2622 int32Result(resultReg, node);
2626 ASSERT(elementSize(type) == 4 && !isSigned(type));
2627 if (node->shouldSpeculateInt32()) {
2628 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2629 int32Result(resultReg, node);
2634 if (node->shouldSpeculateMachineInt()) {
2635 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2636 strictInt52Result(resultReg, node);
2641 FPRTemporary fresult(this);
2642 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2643 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2644 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2645 positive.link(&m_jit);
2646 doubleResult(fresult.fpr(), node);
2649 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2651 ASSERT(isInt(type));
2653 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2654 GPRReg storageReg = storage.gpr();
2656 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2659 GPRReg valueGPR = InvalidGPRReg;
2661 if (valueUse->isConstant()) {
2662 JSValue jsValue = valueOfJSConstant(valueUse.node());
2663 if (!jsValue.isNumber()) {
2664 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2668 double d = jsValue.asNumber();
2669 if (isClamped(type)) {
2670 ASSERT(elementSize(type) == 1);
2671 d = clampDoubleToByte(d);
2673 GPRTemporary scratch(this);
2674 GPRReg scratchReg = scratch.gpr();
2675 m_jit.move(Imm32(toInt32(d)), scratchReg);
2676 value.adopt(scratch);
2677 valueGPR = scratchReg;
2679 switch (valueUse.useKind()) {
2681 SpeculateInt32Operand valueOp(this, valueUse);
2682 GPRTemporary scratch(this);
2683 GPRReg scratchReg = scratch.gpr();
2684 m_jit.move(valueOp.gpr(), scratchReg);
2685 if (isClamped(type)) {
2686 ASSERT(elementSize(type) == 1);
2687 compileClampIntegerToByte(m_jit, scratchReg);
2689 value.adopt(scratch);
2690 valueGPR = scratchReg;
2695 case MachineIntUse: {
2696 SpeculateStrictInt52Operand valueOp(this, valueUse);
2697 GPRTemporary scratch(this);
2698 GPRReg scratchReg = scratch.gpr();
2699 m_jit.move(valueOp.gpr(), scratchReg);
2700 if (isClamped(type)) {
2701 ASSERT(elementSize(type) == 1);
2702 MacroAssembler::Jump inBounds = m_jit.branch64(
2703 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2704 MacroAssembler::Jump tooBig = m_jit.branch64(
2705 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2706 m_jit.move(TrustedImm32(0), scratchReg);
2707 MacroAssembler::Jump clamped = m_jit.jump();
2708 tooBig.link(&m_jit);
2709 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2710 clamped.link(&m_jit);
2711 inBounds.link(&m_jit);
2713 value.adopt(scratch);
2714 valueGPR = scratchReg;
2717 #endif // USE(JSVALUE64)
2720 if (isClamped(type)) {
2721 ASSERT(elementSize(type) == 1);
2722 SpeculateDoubleOperand valueOp(this, valueUse);
2723 GPRTemporary result(this);
2724 FPRTemporary floatScratch(this);
2725 FPRReg fpr = valueOp.fpr();
2726 GPRReg gpr = result.gpr();
2727 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2728 value.adopt(result);
2731 SpeculateDoubleOperand valueOp(this, valueUse);
2732 GPRTemporary result(this);
2733 FPRReg fpr = valueOp.fpr();
2734 GPRReg gpr = result.gpr();
2735 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2736 m_jit.xorPtr(gpr, gpr);
2737 MacroAssembler::Jump fixed = m_jit.jump();
2738 notNaN.link(&m_jit);
2740 MacroAssembler::Jump failed;
2742 failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2744 failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2746 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2749 value.adopt(result);
2756 RELEASE_ASSERT_NOT_REACHED();
2761 ASSERT_UNUSED(valueGPR, valueGPR != property);
2762 ASSERT(valueGPR != base);
2763 ASSERT(valueGPR != storageReg);
2764 MacroAssembler::Jump outOfBounds;
2765 if (node->op() == PutByVal)
2766 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2768 switch (elementSize(type)) {
2770 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2773 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2776 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2781 if (node->op() == PutByVal)
2782 outOfBounds.link(&m_jit);
2786 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2788 ASSERT(isFloat(type));
2790 SpeculateCellOperand base(this, node->child1());
2791 SpeculateStrictInt32Operand property(this, node->child2());
2792 StorageOperand storage(this, node->child3());
2794 GPRReg baseReg = base.gpr();
2795 GPRReg propertyReg = property.gpr();
2796 GPRReg storageReg = storage.gpr();
2798 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2800 FPRTemporary result(this);
2801 FPRReg resultReg = result.fpr();
2803 Uncountable, JSValueRegs(), 0,
2805 MacroAssembler::AboveOrEqual, propertyReg,
2806 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2807 switch (elementSize(type)) {
2809 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2810 m_jit.convertFloatToDouble(resultReg, resultReg);
2813 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2817 RELEASE_ASSERT_NOT_REACHED();
2820 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2821 static const double NaN = QNaN;
2822 m_jit.loadDouble(&NaN, resultReg);
2823 notNaN.link(&m_jit);
2825 doubleResult(resultReg, node);
2828 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2830 ASSERT(isFloat(type));
2832 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2833 GPRReg storageReg = storage.gpr();
2835 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2836 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2838 SpeculateDoubleOperand valueOp(this, valueUse);
2839 FPRTemporary scratch(this);
2840 FPRReg valueFPR = valueOp.fpr();
2841 FPRReg scratchFPR = scratch.fpr();
2843 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2845 MacroAssembler::Jump outOfBounds;
2846 if (node->op() == PutByVal) {
2847 outOfBounds = m_jit.branch32(
2848 MacroAssembler::AboveOrEqual, property,
2849 MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2852 switch (elementSize(type)) {
2854 m_jit.moveDouble(valueFPR, scratchFPR);
2855 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2856 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2860 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2863 RELEASE_ASSERT_NOT_REACHED();
2865 if (node->op() == PutByVal)
2866 outOfBounds.link(&m_jit);
2870 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2872 // Check that prototype is an object.
2873 m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2874 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2876 // Initialize scratchReg with the value being checked.
2877 m_jit.move(valueReg, scratchReg);
2879 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2880 MacroAssembler::Label loop(&m_jit);
2881 m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2883 m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2884 MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2885 m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2887 m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2888 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2889 m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2892 // No match - result is false.
2894 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2896 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2898 MacroAssembler::Jump putResult = m_jit.jump();
2900 isInstance.link(&m_jit);
2902 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2904 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2907 putResult.link(&m_jit);
2910 void SpeculativeJIT::compileInstanceOf(Node* node)
2912 if (node->child1().useKind() == UntypedUse) {
2913 // It might not be a cell. Speculate less aggressively.
2914 // Or: it might only be used once (i.e. by us), so we get zero benefit
2915 // from speculating any more aggressively than we absolutely need to.
2917 JSValueOperand value(this, node->child1());
2918 SpeculateCellOperand prototype(this, node->child2());
2919 GPRTemporary scratch(this);
2921 GPRReg prototypeReg = prototype.gpr();
2922 GPRReg scratchReg = scratch.gpr();
2925 GPRReg valueReg = value.gpr();
2926 MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2927 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2929 GPRReg valueTagReg = value.tagGPR();
2930 GPRReg valueReg = value.payloadGPR();
2931 MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2932 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2935 MacroAssembler::Jump done = m_jit.jump();
2937 isCell.link(&m_jit);
2939 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2944 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2946 booleanResult(scratchReg, node);
2951 SpeculateCellOperand value(this, node->child1());
2952 SpeculateCellOperand prototype(this, node->child2());
2954 GPRTemporary scratch(this);
2956 GPRReg valueReg = value.gpr();
2957 GPRReg prototypeReg = prototype.gpr();
2958 GPRReg scratchReg = scratch.gpr();
2960 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2963 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2965 booleanResult(scratchReg, node);
2969 void SpeculativeJIT::compileAdd(Node* node)
2971 switch (node->binaryUseKind()) {
2973 if (isNumberConstant(node->child1().node())) {
2974 int32_t imm1 = valueOfInt32Constant(node->child1().node());
2975 SpeculateInt32Operand op2(this, node->child2());
2976 GPRTemporary result(this);
2978 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2979 m_jit.move(op2.gpr(), result.gpr());
2980 m_jit.add32(Imm32(imm1), result.gpr());
2982 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2984 int32Result(result.gpr(), node);
2988 if (isNumberConstant(node->child2().node())) {
2989 SpeculateInt32Operand op1(this, node->child1());
2990 int32_t imm2 = valueOfInt32Constant(node->child2().node());
2991 GPRTemporary result(this);
2993 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2994 m_jit.move(op1.gpr(), result.gpr());
2995 m_jit.add32(Imm32(imm2), result.gpr());
2997 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2999 int32Result(result.gpr(), node);
3003 SpeculateInt32Operand op1(this, node->child1());
3004 SpeculateInt32Operand op2(this, node->child2());
3005 GPRTemporary result(this, Reuse, op1, op2);
3007 GPRReg gpr1 = op1.gpr();
3008 GPRReg gpr2 = op2.gpr();
3009 GPRReg gprResult = result.gpr();
3011 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3012 if (gpr1 == gprResult)
3013 m_jit.add32(gpr2, gprResult);
3015 m_jit.move(gpr2, gprResult);
3016 m_jit.add32(gpr1, gprResult);
3019 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3021 if (gpr1 == gprResult)
3022 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3023 else if (gpr2 == gprResult)
3024 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3026 speculationCheck(Overflow, JSValueRegs(), 0, check);
3029 int32Result(gprResult, node);
3034 case MachineIntUse: {
3035 // Will we need an overflow check? If we can prove that neither input can be
3036 // Int52 then the overflow check will not be necessary.
3037 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3038 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3039 SpeculateWhicheverInt52Operand op1(this, node->child1());
3040 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3041 GPRTemporary result(this, Reuse, op1);
3042 m_jit.move(op1.gpr(), result.gpr());
3043 m_jit.add64(op2.gpr(), result.gpr());
3044 int52Result(result.gpr(), node, op1.format());
3048 SpeculateInt52Operand op1(this, node->child1());
3049 SpeculateInt52Operand op2(this, node->child2());
3050 GPRTemporary result(this, Reuse, op1, op2);
3051 m_jit.move(op1.gpr(), result.gpr());
3053 Int52Overflow, JSValueRegs(), 0,
3054 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3055 int52Result(result.gpr(), node);
3058 #endif // USE(JSVALUE64)
3061 SpeculateDoubleOperand op1(this, node->child1());
3062 SpeculateDoubleOperand op2(this, node->child2());
3063 FPRTemporary result(this, op1, op2);
3065 FPRReg reg1 = op1.fpr();
3066 FPRReg reg2 = op2.fpr();
3067 m_jit.addDouble(reg1, reg2, result.fpr());
3069 doubleResult(result.fpr(), node);
3074 RELEASE_ASSERT(node->op() == ValueAdd);
3075 compileValueAdd(node);
3080 RELEASE_ASSERT_NOT_REACHED();
3085 void SpeculativeJIT::compileMakeRope(Node* node)
3087 ASSERT(node->child1().useKind() == KnownStringUse);
3088 ASSERT(node->child2().useKind() == KnownStringUse);
3089 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3091 SpeculateCellOperand op1(this, node->child1());
3092 SpeculateCellOperand op2(this, node->child2());
3093 SpeculateCellOperand op3(this, node->child3());
3094 GPRTemporary result(this);
3095 GPRTemporary allocator(this);
3096 GPRTemporary scratch(this);
3100 opGPRs[0] = op1.gpr();
3101 opGPRs[1] = op2.gpr();
3102 if (node->child3()) {
3103 opGPRs[2] = op3.gpr();
3106 opGPRs[2] = InvalidGPRReg;
3109 GPRReg resultGPR = result.gpr();
3110 GPRReg allocatorGPR = allocator.gpr();
3111 GPRReg scratchGPR = scratch.gpr();
3113 JITCompiler::JumpList slowPath;
3114 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
3115 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3116 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3118 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3119 for (unsigned i = 0; i < numOpGPRs; ++i)
3120 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3121 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3122 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3123 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3124 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3125 for (unsigned i = 1; i < numOpGPRs; ++i) {
3126 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3127 m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
3129 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3130 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3131 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3133 switch (numOpGPRs) {
3135 addSlowPathGenerator(slowPathCall(
3136 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3139 addSlowPathGenerator(slowPathCall(
3140 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3143 RELEASE_ASSERT_NOT_REACHED();
3147 cellResult(resultGPR, node);
3150 void SpeculativeJIT::compileArithSub(Node* node)
3152 switch (node->binaryUseKind()) {
3154 if (isNumberConstant(node->child2().node())) {
3155 SpeculateInt32Operand op1(this, node->child1());
3156 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3157 GPRTemporary result(this);
3159 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3160 m_jit.move(op1.gpr(), result.gpr());
3161 m_jit.sub32(Imm32(imm2), result.gpr());
3163 #if ENABLE(JIT_CONSTANT_BLINDING)
3164 GPRTemporary scratch(this);
3165 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3167 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3171 int32Result(result.gpr(), node);
3175 if (isNumberConstant(node->child1().node())) {
3176 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3177 SpeculateInt32Operand op2(this, node->child2());
3178 GPRTemporary result(this);
3180 m_jit.move(Imm32(imm1), result.gpr());
3181 if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3182 m_jit.sub32(op2.gpr(), result.gpr());
3184 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3186 int32Result(result.gpr(), node);
3190 SpeculateInt32Operand op1(this, node->child1());
3191 SpeculateInt32Operand op2(this, node->child2());
3192 GPRTemporary result(this);
3194 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3195 m_jit.move(op1.gpr(), result.gpr());
3196 m_jit.sub32(op2.gpr(), result.gpr());
3198 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3200 int32Result(result.gpr(), node);
3205 case MachineIntUse: {
3206 // Will we need an overflow check? If we can prove that neither input can be
3207 // Int52 then the overflow check will not be necessary.
3208 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3209 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3210 SpeculateWhicheverInt52Operand op1(this, node->child1());
3211 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3212 GPRTemporary result(this, Reuse, op1);
3213 m_jit.move(op1.gpr(), result.gpr());
3214 m_jit.sub64(op2.gpr(), result.gpr());
3215 int52Result(result.gpr(), node, op1.format());
3219 SpeculateInt52Operand op1(this, node->child1());
3220 SpeculateInt52Operand op2(this, node->child2());
3221 GPRTemporary result(this, Reuse, op1, op2);
3222 m_jit.move(op1.gpr(), result.gpr());
3224 Int52Overflow, JSValueRegs(), 0,
3225 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3226 int52Result(result.gpr(), node);
3229 #endif // USE(JSVALUE64)
3232 SpeculateDoubleOperand op1(this, node->child1());
3233 SpeculateDoubleOperand op2(this, node->child2());
3234 FPRTemporary result(this, op1);
3236 FPRReg reg1 = op1.fpr();
3237 FPRReg reg2 = op2.fpr();
3238 m_jit.subDouble(reg1, reg2, result.fpr());
3240 doubleResult(result.fpr(), node);
3245 RELEASE_ASSERT_NOT_REACHED();
3250 void SpeculativeJIT::compileArithNegate(Node* node)
3252 switch (node->child1().useKind()) {
3254 SpeculateInt32Operand op1(this, node->child1());
3255 GPRTemporary result(this);
3257 m_jit.move(op1.gpr(), result.gpr());
3259 // Note: there is no notion of being not used as a number, but someone
3260 // caring about negative zero.
3262 if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3263 m_jit.neg32(result.gpr());
3264 else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3265 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3267 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3268 m_jit.neg32(result.gpr());
3271 int32Result(result.gpr(), node);
3276 case MachineIntUse: {
3277 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3278 SpeculateWhicheverInt52Operand op1(this, node->child1());
3279 GPRTemporary result(this);
3280 GPRReg op1GPR = op1.gpr();
3281 GPRReg resultGPR = result.gpr();
3282 m_jit.move(op1GPR, resultGPR);
3283 m_jit.neg64(resultGPR);
3284 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3286 NegativeZero, JSValueRegs(), 0,
3287 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3289 int52Result(resultGPR, node, op1.format());
3293 SpeculateInt52Operand op1(this, node->child1());
3294 GPRTemporary result(this);
3295 GPRReg op1GPR = op1.gpr();
3296 GPRReg resultGPR = result.gpr();
3297 m_jit.move(op1GPR, resultGPR);
3299 Int52Overflow, JSValueRegs(), 0,
3300 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3301 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3303 NegativeZero, JSValueRegs(), 0,
3304 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3306 int52Result(resultGPR, node);
3309 #endif // USE(JSVALUE64)
3312 SpeculateDoubleOperand op1(this, node->child1());
3313 FPRTemporary result(this);
3315 m_jit.negateDouble(op1.fpr(), result.fpr());
3317 doubleResult(result.fpr(), node);
3322 RELEASE_ASSERT_NOT_REACHED();
3326 void SpeculativeJIT::compileArithIMul(Node* node)
3328 SpeculateInt32Operand op1(this, node->child1());
3329 SpeculateInt32Operand op2(this, node->child2());
3330 GPRTemporary result(this);
3332 GPRReg reg1 = op1.gpr();
3333 GPRReg reg2 = op2.gpr();
3335 m_jit.move(reg1, result.gpr());
3336 m_jit.mul32(reg2, result.gpr());
3337 int32Result(result.gpr(), node);
3341 void SpeculativeJIT::compileArithMul(Node* node)
3343 switch (node->binaryUseKind()) {
3345 SpeculateInt32Operand op1(this, node->child1());
3346 SpeculateInt32Operand op2(this, node->child2());
3347 GPRTemporary result(this);
3349 GPRReg reg1 = op1.gpr();
3350 GPRReg reg2 = op2.gpr();
3352 // We can perform truncated multiplications if we get to this point, because if the
3353 // fixup phase could not prove that it would be safe, it would have turned us into
3354 // a double multiplication.
3355 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3356 m_jit.move(reg1, result.gpr());
3357 m_jit.mul32(reg2, result.gpr());
3360 Overflow, JSValueRegs(), 0,
3361 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3364 // Check for negative zero, if the users of this node care about such things.
3365 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3366 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3367 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3368 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3369 resultNonZero.link(&m_jit);
3372 int32Result(result.gpr(), node);
3377 case MachineIntUse: {
3378 // This is super clever. We want to do an int52 multiplication and check the
3379 // int52 overflow bit. There is no direct hardware support for this, but we do
3380 // have the ability to do an int64 multiplication and check the int64 overflow
3381 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3382 // registers, with the high 12 bits being sign-extended. We can do:
3386 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3387 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3388 // multiplication overflows is identical to whether the 'a * b' 52-bit
3389 // multiplication overflows.
3391 // In our nomenclature, this is:
3393 // strictInt52(a) * int52(b) => int52
3395 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3398 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3399 // we just do whatever is more convenient for op1 and have op2 do the
3400 // opposite. This ensures that we do at most one shift.
3402 SpeculateWhicheverInt52Operand op1(this, node->child1());
3403 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3404 GPRTemporary result(this);
3406 GPRReg op1GPR = op1.gpr();
3407 GPRReg op2GPR = op2.gpr();
3408 GPRReg resultGPR = result.gpr();
3410 m_jit.move(op1GPR, resultGPR);
3412 Int52Overflow, JSValueRegs(), 0,
3413 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3415 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3416 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3417 MacroAssembler::NonZero, resultGPR);
3419 NegativeZero, JSValueRegs(), 0,
3420 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3422 NegativeZero, JSValueRegs(), 0,
3423 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3424 resultNonZero.link(&m_jit);
3427 int52Result(resultGPR, node);
3430 #endif // USE(JSVALUE64)
3433 SpeculateDoubleOperand op1(this, node->child1());
3434 SpeculateDoubleOperand op2(this, node->child2());
3435 FPRTemporary result(this, op1, op2);
3437 FPRReg reg1 = op1.fpr();
3438 FPRReg reg2 = op2.fpr();
3440 m_jit.mulDouble(reg1, reg2, result.fpr());
3442 doubleResult(result.fpr(), node);
3447 RELEASE_ASSERT_NOT_REACHED();
3452 void SpeculativeJIT::compileArithDiv(Node* node)
3454 switch (node->binaryUseKind()) {
3456 #if CPU(X86) || CPU(X86_64)
3457 SpeculateInt32Operand op1(this, node->child1());
3458 SpeculateInt32Operand op2(this, node->child2());
3459 GPRTemporary eax(this, X86Registers::eax);
3460 GPRTemporary edx(this, X86Registers::edx);
3461 GPRReg op1GPR = op1.gpr();
3462 GPRReg op2GPR = op2.gpr();
3466 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3467 op2TempGPR = allocate();
3470 op2TempGPR = InvalidGPRReg;
3471 if (op1GPR == X86Registers::eax)
3472 temp = X86Registers::edx;
3474 temp = X86Registers::eax;
3477 ASSERT(temp != op1GPR);
3478 ASSERT(temp != op2GPR);
3480 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3482 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3484 JITCompiler::JumpList done;
3485 if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3486 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3487 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3489 // This is the case where we convert the result to an int after we're done, and we
3490 // already know that the denominator is either -1 or 0. So, if the denominator is
3491 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3492 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3493 // are happy to fall through to a normal division, since we're just dividing
3494 // something by negative 1.
3496 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3497 m_jit.move(TrustedImm32(0), eax.gpr());
3498 done.append(m_jit.jump());
3500 notZero.link(&m_jit);
3501 JITCompiler::Jump notNeg2ToThe31 =
3502 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3503 m_jit.move(op1GPR, eax.gpr());
3504 done.append(m_jit.jump());
3506 notNeg2ToThe31.link(&m_jit);
3509 safeDenominator.link(&m_jit);
3511 // If the user cares about negative zero, then speculate that we're not about
3512 // to produce negative zero.
3513 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3514 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3515 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3516 numeratorNonZero.link(&m_jit);
3519 if (op2TempGPR != InvalidGPRReg) {
3520 m_jit.move(op2GPR, op2TempGPR);
3521 op2GPR = op2TempGPR;
3524 m_jit.move(op1GPR, eax.gpr());
3525 m_jit.assembler().cdq();
3526 m_jit.assembler().idivl_r(op2GPR);
3528 if (op2TempGPR != InvalidGPRReg)
3531 // Check that there was no remainder. If there had been, then we'd be obligated to
3532 // produce a double result instead.
3533 if (bytecodeUsesAsNumber(node->arithNodeFlags()))
3534 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3537 int32Result(eax.gpr(), node);
3538 #elif CPU(APPLE_ARMV7S)
3539 SpeculateInt32Operand op1(this, node->child1());
3540 SpeculateInt32Operand op2(this, node->child2());
3541 GPRReg op1GPR = op1.gpr();
3542 GPRReg op2GPR = op2.gpr();
3543 GPRTemporary quotient(this);
3544 GPRTemporary multiplyAnswer(this);
3546 // If the user cares about negative zero, then speculate that we're not about
3547 // to produce negative zero.
3548 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3549 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3550 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3551 numeratorNonZero.link(&m_jit);
3554 m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3556 // Check that there was no remainder. If there had been, then we'd be obligated to
3557 // produce a double result instead.
3558 if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3559 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3560 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3563 int32Result(quotient.gpr(), node);
3565 RELEASE_ASSERT_NOT_REACHED();
3571 SpeculateDoubleOperand op1(this, node->child1());
3572 SpeculateDoubleOperand op2(this, node->child2());
3573 FPRTemporary result(this, op1);
3575 FPRReg reg1 = op1.fpr();
3576 FPRReg reg2 = op2.fpr();
3577 m_jit.divDouble(reg1, reg2, result.fpr());
3579 doubleResult(result.fpr(), node);
3584 RELEASE_ASSERT_NOT_REACHED();
3589 void SpeculativeJIT::compileArithMod(Node* node)
3591 switch (node->binaryUseKind()) {
3593 // In the fast path, the dividend value could be the final result
3594 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3595 SpeculateStrictInt32Operand op1(this, node->child1());
3597 if (isInt32Constant(node->child2().node())) {
3598 int32_t divisor = valueOfInt32Constant(node->child2().node());
3599 if (divisor > 0 && hasOneBitSet(divisor)) {
3600 ASSERT(divisor != 1);
3601 unsigned logarithm = WTF::fastLog2(divisor);
3602 GPRReg dividendGPR = op1.gpr();
3603 GPRTemporary result(this);
3604 GPRReg resultGPR = result.gpr();
3606 // This is what LLVM generates. It's pretty crazy. Here's my
3607 // attempt at understanding it.
3609 // First, compute either divisor - 1, or 0, depending on whether
3610 // the dividend is negative:
3612 // If dividend < 0: resultGPR = divisor - 1
3613 // If dividend >= 0: resultGPR = 0
3614 m_jit.move(dividendGPR, resultGPR);
3615 m_jit.rshift32(TrustedImm32(31), resultGPR);
3616 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3618 // Add in the dividend, so that:
3620 // If dividend < 0: resultGPR = dividend + divisor - 1
3621 // If dividend >= 0: resultGPR = dividend
3622 m_jit.add32(dividendGPR, resultGPR);
3624 // Mask so as to only get the *high* bits. This rounds down
3625 // (towards negative infinity) resultGPR to the nearest multiple
3626 // of divisor, so that:
3628 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3629 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3631 // Note that this can be simplified to:
3633 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3634 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3636 // Note that if the dividend is negative, resultGPR will also be negative.
3637 // Regardless of the sign of dividend, resultGPR will be rounded towards
3638 // zero, because of how things are conditionalized.
3639 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3641 // Subtract resultGPR from dividendGPR, which yields the remainder:
3643 // resultGPR = dividendGPR - resultGPR
3644 m_jit.neg32(resultGPR);
3645 m_jit.add32(dividendGPR, resultGPR);
3647 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3648 // Check that we're not about to create negative zero.
3649 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3650 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3651 numeratorPositive.link(&m_jit);
3654 int32Result(resultGPR, node);
3659 #if CPU(X86) || CPU(X86_64)
3660 if (isInt32Constant(node->child2().node())) {
3661 int32_t divisor = valueOfInt32Constant(node->child2().node());
3662 if (divisor && divisor != -1) {
3663 GPRReg op1Gpr = op1.gpr();
3665 GPRTemporary eax(this, X86Registers::eax);
3666 GPRTemporary edx(this, X86Registers::edx);
3667 GPRTemporary scratch(this);
3668 GPRReg scratchGPR = scratch.gpr();
3671 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3672 op1SaveGPR = allocate();
3673 ASSERT(op1Gpr != op1SaveGPR);
3674 m_jit.move(op1Gpr, op1SaveGPR);
3676 op1SaveGPR = op1Gpr;
3677 ASSERT(op1SaveGPR != X86Registers::eax);
3678 ASSERT(op1SaveGPR != X86Registers::edx);
3680 m_jit.move(op1Gpr, eax.gpr());
3681 m_jit.move(TrustedImm32(divisor), scratchGPR);
3682 m_jit.assembler().cdq();
3683 m_jit.assembler().idivl_r(scratchGPR);
3684 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3685 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3686 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3687 numeratorPositive.link(&m_jit);
3690 if (op1SaveGPR != op1Gpr)
3693 int32Result(edx.gpr(), node);
3699 SpeculateInt32Operand op2(this, node->child2());
3700 #if CPU(X86) || CPU(X86_64)
3701 GPRTemporary eax(this, X86Registers::eax);
3702 GPRTemporary edx(this, X86Registers::edx);
3703 GPRReg op1GPR = op1.gpr();
3704 GPRReg op2GPR = op2.gpr();
3710 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3711 op2TempGPR = allocate();
3714 op2TempGPR = InvalidGPRReg;
3715 if (op1GPR == X86Registers::eax)
3716 temp = X86Registers::edx;
3718 temp = X86Registers::eax;
3721 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3722 op1SaveGPR = allocate();
3723 ASSERT(op1GPR != op1SaveGPR);
3724 m_jit.move(op1GPR, op1SaveGPR);
3726 op1SaveGPR = op1GPR;
3728 ASSERT(temp != op1GPR);
3729 ASSERT(temp != op2GPR);
3730 ASSERT(op1SaveGPR != X86Registers::eax);
3731 ASSERT(op1SaveGPR != X86Registers::edx);
3733 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3735 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3737 JITCompiler::JumpList done;
3739 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3740 // separate case for that. But it probably doesn't matter so much.
3741 if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3742 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3743 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3745 // This is the case where we convert the result to an int after we're done, and we
3746 // already know that the denominator is either -1 or 0. So, if the denominator is
3747 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3748 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3749 // happy to fall through to a normal division, since we're just dividing something
3752 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3753 m_jit.move(TrustedImm32(0), edx.gpr());
3754 done.append(m_jit.jump());
3756 notZero.link(&m_jit);
3757 JITCompiler::Jump notNeg2ToThe31 =
3758 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3759 m_jit.move(TrustedImm32(0), edx.gpr());
3760 done.append(m_jit.jump());
3762 notNeg2ToThe31.link(&m_jit);
3765 safeDenominator.link(&m_jit);
3767 if (op2TempGPR != InvalidGPRReg) {
3768 m_jit.move(op2GPR, op2TempGPR);
3769 op2GPR = op2TempGPR;
3772 m_jit.move(op1GPR, eax.gpr());
3773 m_jit.assembler().cdq();
3774 m_jit.assembler().idivl_r(op2GPR);
3776 if (op2TempGPR != InvalidGPRReg)
3779 // Check that we're not about to create negative zero.
3780 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3781 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3782 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3783 numeratorPositive.link(&m_jit);
3786 if (op1SaveGPR != op1GPR)
3790 int32Result(edx.gpr(), node);
3792 #elif CPU(APPLE_ARMV7S)
3793 GPRTemporary temp(this);