2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "JSCJSValueInlines.h"
39 #include "LinkBuffer.h"
41 namespace JSC { namespace DFG {
43 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
49 , m_arguments(jit.codeBlock()->numParameters())
50 , m_variables(jit.graph().m_localVars)
51 , m_lastSetOperand(std::numeric_limits<int>::max())
52 , m_state(m_jit.graph())
53 , m_interpreter(m_jit.graph(), m_state)
54 , m_stream(&jit.jitCode()->variableEventStream)
55 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
56 , m_isCheckingArgumentTypes(false)
60 SpeculativeJIT::~SpeculativeJIT()
64 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
66 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
68 GPRTemporary scratch(this);
69 GPRTemporary scratch2(this);
70 GPRReg scratchGPR = scratch.gpr();
71 GPRReg scratch2GPR = scratch2.gpr();
73 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
75 JITCompiler::JumpList slowCases;
78 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
79 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
80 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
82 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
83 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
85 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
87 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
88 for (unsigned i = numElements; i < vectorLength; ++i)
89 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
91 EncodedValueDescriptor value;
92 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
93 for (unsigned i = numElements; i < vectorLength; ++i) {
94 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
95 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
100 // I want a slow path that also loads out the storage pointer, and that's
101 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
102 // of work for a very small piece of functionality. :-/
103 addSlowPathGenerator(adoptPtr(
104 new CallArrayAllocatorSlowPathGenerator(
105 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
106 structure, numElements)));
109 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
113 ASSERT(m_isCheckingArgumentTypes || m_canExit);
114 m_jit.appendExitInfo(jumpToFail);
115 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
118 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
122 ASSERT(m_isCheckingArgumentTypes || m_canExit);
123 m_jit.appendExitInfo(jumpsToFail);
124 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
127 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
131 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
132 if (m_speculationDirection == ForwardSpeculation)
133 convertLastOSRExitToForward();
136 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
138 ASSERT(m_isCheckingArgumentTypes || m_canExit);
139 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
142 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
145 return OSRExitJumpPlaceholder();
146 ASSERT(m_isCheckingArgumentTypes || m_canExit);
147 unsigned index = m_jit.jitCode()->osrExit.size();
148 m_jit.appendExitInfo();
149 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
150 return OSRExitJumpPlaceholder(index);
153 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
155 ASSERT(m_isCheckingArgumentTypes || m_canExit);
156 return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
163 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
164 if (m_speculationDirection == ForwardSpeculation)
165 convertLastOSRExitToForward();
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
170 ASSERT(m_isCheckingArgumentTypes || m_canExit);
171 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
174 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
178 ASSERT(m_isCheckingArgumentTypes || m_canExit);
179 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
180 m_jit.appendExitInfo(jumpToFail);
181 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
184 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
186 ASSERT(m_isCheckingArgumentTypes || m_canExit);
187 backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
190 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
194 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
195 if (m_speculationDirection == ForwardSpeculation)
196 convertLastOSRExitToForward();
199 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
201 speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
204 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
208 ASSERT(m_isCheckingArgumentTypes || m_canExit);
209 m_jit.appendExitInfo(JITCompiler::JumpList());
210 OSRExit& exit = m_jit.jitCode()->osrExit[
211 m_jit.jitCode()->appendOSRExit(OSRExit(
213 m_jit.graph().methodOfGettingAValueProfileFor(node),
214 this, m_stream->size()))];
215 exit.m_watchpointIndex = m_jit.jitCode()->appendWatchpoint(
216 JumpReplacementWatchpoint(m_jit.watchpointLabel()));
217 if (m_speculationDirection == ForwardSpeculation)
218 convertLastOSRExitToForward();
219 return &m_jit.jitCode()->watchpoints[exit.m_watchpointIndex];
222 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
224 return speculationWatchpoint(kind, JSValueSource(), 0);
227 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
229 m_jit.jitCode()->lastOSRExit().convertToForward(
230 m_block, m_currentNode, m_indexInBlock, valueRecovery);
233 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
235 ASSERT(m_isCheckingArgumentTypes || m_canExit);
236 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
237 convertLastOSRExitToForward(valueRecovery);
240 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
242 ASSERT(m_isCheckingArgumentTypes || m_canExit);
243 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
244 convertLastOSRExitToForward(valueRecovery);
247 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
249 ASSERT(m_isCheckingArgumentTypes || m_canExit);
250 #if DFG_ENABLE(DEBUG_VERBOSE)
251 dataLogF("SpeculativeJIT was terminated.\n");
255 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
256 m_compileOkay = false;
259 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
261 ASSERT(m_isCheckingArgumentTypes || m_canExit);
262 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
265 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
267 ASSERT(needsTypeCheck(edge, typesPassedThrough));
268 m_interpreter.filter(edge, typesPassedThrough);
269 backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
272 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
274 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
275 if (m_speculationDirection == ForwardSpeculation)
276 convertLastOSRExitToForward();
279 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
281 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
282 convertLastOSRExitToForward(valueRecovery);
285 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
287 m_slowPathGenerators.append(slowPathGenerator);
290 void SpeculativeJIT::runSlowPathGenerators()
292 #if DFG_ENABLE(DEBUG_VERBOSE)
293 dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
295 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
296 m_slowPathGenerators[i]->generate(this);
299 // On Windows we need to wrap fmod; on other platforms we can call it directly.
300 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
301 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
302 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
307 #define fmodAsDFGOperation fmod
310 void SpeculativeJIT::clearGenerationInfo()
312 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
313 m_generationInfo[i] = GenerationInfo();
314 m_gprs = RegisterBank<GPRInfo>();
315 m_fprs = RegisterBank<FPRInfo>();
318 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
320 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
321 Node* node = info.node();
322 DataFormat registerFormat = info.registerFormat();
323 ASSERT(registerFormat != DataFormatNone);
324 ASSERT(registerFormat != DataFormatDouble);
326 SilentSpillAction spillAction;
327 SilentFillAction fillAction;
329 if (!info.needsSpill())
330 spillAction = DoNothingForSpill;
333 ASSERT(info.gpr() == source);
334 if (registerFormat == DataFormatInt32)
335 spillAction = Store32Payload;
336 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
337 spillAction = StorePtr;
339 ASSERT(registerFormat & DataFormatJS);
340 spillAction = Store64;
342 #elif USE(JSVALUE32_64)
343 if (registerFormat & DataFormatJS) {
344 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
345 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
347 ASSERT(info.gpr() == source);
348 spillAction = Store32Payload;
353 if (registerFormat == DataFormatInt32) {
354 ASSERT(info.gpr() == source);
355 ASSERT(isJSInt32(info.registerFormat()));
356 if (node->hasConstant()) {
357 ASSERT(isInt32Constant(node));
358 fillAction = SetInt32Constant;
360 fillAction = Load32Payload;
361 } else if (registerFormat == DataFormatBoolean) {
363 RELEASE_ASSERT_NOT_REACHED();
364 fillAction = DoNothingForFill;
365 #elif USE(JSVALUE32_64)
366 ASSERT(info.gpr() == source);
367 if (node->hasConstant()) {
368 ASSERT(isBooleanConstant(node));
369 fillAction = SetBooleanConstant;
371 fillAction = Load32Payload;
373 } else if (registerFormat == DataFormatCell) {
374 ASSERT(info.gpr() == source);
375 if (node->hasConstant()) {
376 JSValue value = valueOfJSConstant(node);
377 ASSERT_UNUSED(value, value.isCell());
378 fillAction = SetCellConstant;
381 fillAction = LoadPtr;
383 fillAction = Load32Payload;
386 } else if (registerFormat == DataFormatStorage) {
387 ASSERT(info.gpr() == source);
388 fillAction = LoadPtr;
390 ASSERT(registerFormat & DataFormatJS);
392 ASSERT(info.gpr() == source);
393 if (node->hasConstant()) {
394 if (valueOfJSConstant(node).isCell())
395 fillAction = SetTrustedJSConstant;
397 fillAction = SetJSConstant;
398 } else if (info.spillFormat() == DataFormatInt32) {
399 ASSERT(registerFormat == DataFormatJSInt32);
400 fillAction = Load32PayloadBoxInt;
401 } else if (info.spillFormat() == DataFormatDouble) {
402 ASSERT(registerFormat == DataFormatJSDouble);
403 fillAction = LoadDoubleBoxDouble;
407 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
408 if (node->hasConstant())
409 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
410 else if (info.payloadGPR() == source)
411 fillAction = Load32Payload;
412 else { // Fill the Tag
413 switch (info.spillFormat()) {
414 case DataFormatInt32:
415 ASSERT(registerFormat == DataFormatJSInt32);
416 fillAction = SetInt32Tag;
419 ASSERT(registerFormat == DataFormatJSCell);
420 fillAction = SetCellTag;
422 case DataFormatBoolean:
423 ASSERT(registerFormat == DataFormatJSBoolean);
424 fillAction = SetBooleanTag;
427 fillAction = Load32Tag;
434 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
437 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
439 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
440 Node* node = info.node();
441 ASSERT(info.registerFormat() == DataFormatDouble);
443 SilentSpillAction spillAction;
444 SilentFillAction fillAction;
446 if (!info.needsSpill())
447 spillAction = DoNothingForSpill;
449 ASSERT(!node->hasConstant());
450 ASSERT(info.spillFormat() == DataFormatNone);
451 ASSERT(info.fpr() == source);
452 spillAction = StoreDouble;
456 if (node->hasConstant()) {
457 ASSERT(isNumberConstant(node));
458 fillAction = SetDoubleConstant;
459 } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
460 // it was already spilled previously and not as a double, which means we need unboxing.
461 ASSERT(info.spillFormat() & DataFormatJS);
462 fillAction = LoadJSUnboxDouble;
464 fillAction = LoadDouble;
465 #elif USE(JSVALUE32_64)
466 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
467 if (node->hasConstant()) {
468 ASSERT(isNumberConstant(node));
469 fillAction = SetDoubleConstant;
471 fillAction = LoadDouble;
474 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
477 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
479 switch (plan.spillAction()) {
480 case DoNothingForSpill:
483 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
486 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
489 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
493 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
497 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
500 RELEASE_ASSERT_NOT_REACHED();
504 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
506 #if USE(JSVALUE32_64)
507 UNUSED_PARAM(canTrample);
509 switch (plan.fillAction()) {
510 case DoNothingForFill:
512 case SetInt32Constant:
513 m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
515 case SetBooleanConstant:
516 m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
518 case SetCellConstant:
519 m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
522 case SetTrustedJSConstant:
523 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
526 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
528 case SetDoubleConstant:
529 m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
530 m_jit.move64ToDouble(canTrample, plan.fpr());
532 case Load32PayloadBoxInt:
533 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
534 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
536 case LoadDoubleBoxDouble:
537 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
538 m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
540 case LoadJSUnboxDouble:
541 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
542 unboxDouble(canTrample, plan.fpr());
545 case SetJSConstantTag:
546 m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
548 case SetJSConstantPayload:
549 m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
552 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
555 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
558 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
560 case SetDoubleConstant:
561 m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
565 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
568 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
571 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
575 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
579 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
582 RELEASE_ASSERT_NOT_REACHED();
586 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
588 switch (arrayMode.arrayClass()) {
589 case Array::OriginalArray: {
591 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
596 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
597 return m_jit.branch32(
598 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
601 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
602 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
606 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
608 JITCompiler::JumpList result;
610 switch (arrayMode.type()) {
612 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
615 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
617 case Array::Contiguous:
618 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
620 case Array::ArrayStorage:
621 case Array::SlowPutArrayStorage: {
622 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
624 if (arrayMode.isJSArray()) {
625 if (arrayMode.isSlowPut()) {
628 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
629 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
630 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
633 MacroAssembler::Above, tempGPR,
634 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
637 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
639 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
642 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
643 if (arrayMode.isSlowPut()) {
644 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
647 MacroAssembler::Above, tempGPR,
648 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
652 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
663 void SpeculativeJIT::checkArray(Node* node)
665 ASSERT(node->arrayMode().isSpecific());
666 ASSERT(!node->arrayMode().doesConversion());
668 SpeculateCellOperand base(this, node->child1());
669 GPRReg baseReg = base.gpr();
671 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
672 noResult(m_currentNode);
676 const ClassInfo* expectedClassInfo = 0;
678 switch (node->arrayMode().type()) {
680 expectedClassInfo = JSString::info();
684 case Array::Contiguous:
685 case Array::ArrayStorage:
686 case Array::SlowPutArrayStorage: {
687 GPRTemporary temp(this);
688 GPRReg tempGPR = temp.gpr();
690 MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
691 m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
693 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
694 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
696 noResult(m_currentNode);
699 case Array::Arguments:
700 expectedClassInfo = Arguments::info();
703 expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
707 RELEASE_ASSERT(expectedClassInfo);
709 GPRTemporary temp(this);
711 MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
713 BadType, JSValueSource::unboxedCell(baseReg), node,
715 MacroAssembler::NotEqual,
716 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
717 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
719 noResult(m_currentNode);
722 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
724 ASSERT(node->arrayMode().doesConversion());
726 GPRTemporary temp(this);
727 GPRTemporary structure;
728 GPRReg tempGPR = temp.gpr();
729 GPRReg structureGPR = InvalidGPRReg;
731 if (node->op() != ArrayifyToStructure) {
732 GPRTemporary realStructure(this);
733 structure.adopt(realStructure);
734 structureGPR = structure.gpr();
737 // We can skip all that comes next if we already have array storage.
738 MacroAssembler::JumpList slowPath;
740 if (node->op() == ArrayifyToStructure) {
741 slowPath.append(m_jit.branchWeakPtr(
742 JITCompiler::NotEqual,
743 JITCompiler::Address(baseReg, JSCell::structureOffset()),
747 MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
750 MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
752 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
755 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
756 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
758 noResult(m_currentNode);
761 void SpeculativeJIT::arrayify(Node* node)
763 ASSERT(node->arrayMode().isSpecific());
765 SpeculateCellOperand base(this, node->child1());
767 if (!node->child2()) {
768 arrayify(node, base.gpr(), InvalidGPRReg);
772 SpeculateInt32Operand property(this, node->child2());
774 arrayify(node, base.gpr(), property.gpr());
777 GPRReg SpeculativeJIT::fillStorage(Edge edge)
779 VirtualRegister virtualRegister = edge->virtualRegister();
780 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
782 switch (info.registerFormat()) {
783 case DataFormatNone: {
784 if (info.spillFormat() == DataFormatStorage) {
785 GPRReg gpr = allocate();
786 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
787 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
788 info.fillStorage(*m_stream, gpr);
792 // Must be a cell; fill it as a cell and then return the pointer.
793 return fillSpeculateCell(edge);
796 case DataFormatStorage: {
797 GPRReg gpr = info.gpr();
803 return fillSpeculateCell(edge);
807 void SpeculativeJIT::useChildren(Node* node)
809 if (node->flags() & NodeHasVarArgs) {
810 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
811 if (!!m_jit.graph().m_varArgChildren[childIdx])
812 use(m_jit.graph().m_varArgChildren[childIdx]);
815 Edge child1 = node->child1();
817 ASSERT(!node->child2() && !node->child3());
822 Edge child2 = node->child2();
824 ASSERT(!node->child3());
829 Edge child3 = node->child3();
836 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
840 UNUSED_PARAM(scratch1);
841 UNUSED_PARAM(scratch2);
842 UNUSED_PARAM(useKind);
843 ASSERT(owner != scratch1);
844 ASSERT(owner != scratch2);
845 ASSERT(scratch1 != scratch2);
847 #if ENABLE(WRITE_BARRIER_PROFILING)
848 JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
852 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
854 UNUSED_PARAM(ownerGPR);
855 UNUSED_PARAM(valueGPR);
856 UNUSED_PARAM(scratch1);
857 UNUSED_PARAM(scratch2);
858 UNUSED_PARAM(useKind);
860 if (isKnownNotCell(valueUse.node()))
863 #if ENABLE(WRITE_BARRIER_PROFILING)
864 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
868 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
870 UNUSED_PARAM(ownerGPR);
872 UNUSED_PARAM(scratch1);
873 UNUSED_PARAM(scratch2);
874 UNUSED_PARAM(useKind);
876 if (Heap::isMarked(value))
879 #if ENABLE(WRITE_BARRIER_PROFILING)
880 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
884 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
887 UNUSED_PARAM(valueGPR);
888 UNUSED_PARAM(scratch);
889 UNUSED_PARAM(useKind);
891 if (isKnownNotCell(valueUse.node()))
894 #if ENABLE(WRITE_BARRIER_PROFILING)
895 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
899 void SpeculativeJIT::compileIn(Node* node)
901 SpeculateCellOperand base(this, node->child2());
902 GPRReg baseGPR = base.gpr();
904 if (isConstant(node->child1().node())) {
906 jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
907 if (string && string->tryGetValueImpl()
908 && string->tryGetValueImpl()->isIdentifier()) {
909 GPRTemporary result(this);
910 GPRReg resultGPR = result.gpr();
914 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
916 OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
917 jump.m_jump, this, operationInOptimize,
918 JSValueRegs::payloadOnly(resultGPR), baseGPR,
919 string->tryGetValueImpl());
921 m_jit.addIn(InRecord(
922 node->codeOrigin, jump, slowPath.get(), safeCast<int8_t>(baseGPR),
923 safeCast<int8_t>(resultGPR), usedRegisters()));
924 addSlowPathGenerator(slowPath.release());
930 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
932 booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
938 JSValueOperand key(this, node->child1());
939 JSValueRegs regs = key.jsValueRegs();
941 GPRResult result(this);
942 GPRReg resultGPR = result.gpr();
949 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
952 jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
954 booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
958 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
960 unsigned branchIndexInBlock = detectPeepHoleBranch();
961 if (branchIndexInBlock != UINT_MAX) {
962 Node* branchNode = m_block->at(branchIndexInBlock);
964 ASSERT(node->adjustedRefCount() == 1);
966 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
968 m_indexInBlock = branchIndexInBlock;
969 m_currentNode = branchNode;
974 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
979 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
981 unsigned branchIndexInBlock = detectPeepHoleBranch();
982 if (branchIndexInBlock != UINT_MAX) {
983 Node* branchNode = m_block->at(branchIndexInBlock);
985 ASSERT(node->adjustedRefCount() == 1);
987 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
989 m_indexInBlock = branchIndexInBlock;
990 m_currentNode = branchNode;
995 nonSpeculativeNonPeepholeStrictEq(node, invert);
1001 static const char* dataFormatString(DataFormat format)
1003 // These values correspond to the DataFormat enum.
1004 const char* strings[] = {
1022 return strings[format];
1025 void SpeculativeJIT::dump(const char* label)
1028 dataLogF("<%s>\n", label);
1030 dataLogF(" gprs:\n");
1032 dataLogF(" fprs:\n");
1034 dataLogF(" VirtualRegisters:\n");
1035 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1036 GenerationInfo& info = m_generationInfo[i];
1038 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1040 dataLogF(" % 3d:[__][__]", i);
1041 if (info.registerFormat() == DataFormatDouble)
1042 dataLogF(":fpr%d\n", info.fpr());
1043 else if (info.registerFormat() != DataFormatNone
1044 #if USE(JSVALUE32_64)
1045 && !(info.registerFormat() & DataFormatJS)
1048 ASSERT(info.gpr() != InvalidGPRReg);
1049 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1054 dataLogF("</%s>\n", label);
1059 #if DFG_ENABLE(CONSISTENCY_CHECK)
1060 void SpeculativeJIT::checkConsistency()
1062 bool failed = false;
1064 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1065 if (iter.isLocked()) {
1066 dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1070 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1071 if (iter.isLocked()) {
1072 dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1077 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1078 VirtualRegister virtualRegister = (VirtualRegister)i;
1079 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1082 switch (info.registerFormat()) {
1083 case DataFormatNone:
1086 case DataFormatJSInt32:
1087 case DataFormatJSDouble:
1088 case DataFormatJSCell:
1089 case DataFormatJSBoolean:
1090 #if USE(JSVALUE32_64)
1093 case DataFormatInt32:
1094 case DataFormatCell:
1095 case DataFormatBoolean:
1096 case DataFormatStorage: {
1097 GPRReg gpr = info.gpr();
1098 ASSERT(gpr != InvalidGPRReg);
1099 if (m_gprs.name(gpr) != virtualRegister) {
1100 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1105 case DataFormatDouble: {
1106 FPRReg fpr = info.fpr();
1107 ASSERT(fpr != InvalidFPRReg);
1108 if (m_fprs.name(fpr) != virtualRegister) {
1109 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1114 case DataFormatOSRMarker:
1115 case DataFormatDead:
1116 case DataFormatArguments:
1117 RELEASE_ASSERT_NOT_REACHED();
1122 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1123 VirtualRegister virtualRegister = iter.name();
1124 if (virtualRegister == InvalidVirtualRegister)
1127 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1129 if (iter.regID() != info.gpr()) {
1130 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1134 if (!(info.registerFormat() & DataFormatJS)) {
1135 if (iter.regID() != info.gpr()) {
1136 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1140 if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1141 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1148 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1149 VirtualRegister virtualRegister = iter.name();
1150 if (virtualRegister == InvalidVirtualRegister)
1153 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1154 if (iter.regID() != info.fpr()) {
1155 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1167 GPRTemporary::GPRTemporary()
1169 , m_gpr(InvalidGPRReg)
1173 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1175 , m_gpr(InvalidGPRReg)
1177 m_gpr = m_jit->allocate();
1180 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1182 , m_gpr(InvalidGPRReg)
1184 m_gpr = m_jit->allocate(specific);
1187 #if USE(JSVALUE32_64)
1188 GPRTemporary::GPRTemporary(
1189 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1191 , m_gpr(InvalidGPRReg)
1193 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1194 m_gpr = m_jit->reuse(op1.gpr(which));
1196 m_gpr = m_jit->allocate();
1198 #endif // USE(JSVALUE32_64)
1200 void GPRTemporary::adopt(GPRTemporary& other)
1203 ASSERT(m_gpr == InvalidGPRReg);
1204 ASSERT(other.m_jit);
1205 ASSERT(other.m_gpr != InvalidGPRReg);
1206 m_jit = other.m_jit;
1207 m_gpr = other.m_gpr;
1209 other.m_gpr = InvalidGPRReg;
1212 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1214 , m_fpr(InvalidFPRReg)
1216 m_fpr = m_jit->fprAllocate();
1219 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1221 , m_fpr(InvalidFPRReg)
1223 if (m_jit->canReuse(op1.node()))
1224 m_fpr = m_jit->reuse(op1.fpr());
1226 m_fpr = m_jit->fprAllocate();
1229 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1231 , m_fpr(InvalidFPRReg)
1233 if (m_jit->canReuse(op1.node()))
1234 m_fpr = m_jit->reuse(op1.fpr());
1235 else if (m_jit->canReuse(op2.node()))
1236 m_fpr = m_jit->reuse(op2.fpr());
1238 m_fpr = m_jit->fprAllocate();
1241 #if USE(JSVALUE32_64)
1242 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1244 , m_fpr(InvalidFPRReg)
1246 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1247 m_fpr = m_jit->reuse(op1.fpr());
1249 m_fpr = m_jit->fprAllocate();
1253 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1255 BasicBlock* taken = branchNode->takenBlock();
1256 BasicBlock* notTaken = branchNode->notTakenBlock();
1258 SpeculateDoubleOperand op1(this, node->child1());
1259 SpeculateDoubleOperand op2(this, node->child2());
1261 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1265 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1267 BasicBlock* taken = branchNode->takenBlock();
1268 BasicBlock* notTaken = branchNode->notTakenBlock();
1270 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1272 if (taken == nextBlock()) {
1273 condition = MacroAssembler::NotEqual;
1274 BasicBlock* tmp = taken;
1279 SpeculateCellOperand op1(this, node->child1());
1280 SpeculateCellOperand op2(this, node->child2());
1282 GPRReg op1GPR = op1.gpr();
1283 GPRReg op2GPR = op2.gpr();
1285 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1286 speculationWatchpointForMasqueradesAsUndefined();
1288 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1290 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1292 MacroAssembler::Equal,
1293 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1294 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1296 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1298 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1300 MacroAssembler::Equal,
1301 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1302 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1305 GPRTemporary structure(this);
1306 GPRReg structureGPR = structure.gpr();
1308 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1309 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1311 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1313 MacroAssembler::Equal,
1315 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1317 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1319 MacroAssembler::NonZero,
1320 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1321 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1323 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1324 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1326 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1328 MacroAssembler::Equal,
1330 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1332 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1334 MacroAssembler::NonZero,
1335 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1336 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1339 branchPtr(condition, op1GPR, op2GPR, taken);
1343 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1345 BasicBlock* taken = branchNode->takenBlock();
1346 BasicBlock* notTaken = branchNode->notTakenBlock();
1348 // The branch instruction will branch to the taken block.
1349 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1350 if (taken == nextBlock()) {
1351 condition = JITCompiler::invert(condition);
1352 BasicBlock* tmp = taken;
1357 if (isBooleanConstant(node->child1().node())) {
1358 bool imm = valueOfBooleanConstant(node->child1().node());
1359 SpeculateBooleanOperand op2(this, node->child2());
1360 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1361 } else if (isBooleanConstant(node->child2().node())) {
1362 SpeculateBooleanOperand op1(this, node->child1());
1363 bool imm = valueOfBooleanConstant(node->child2().node());
1364 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1366 SpeculateBooleanOperand op1(this, node->child1());
1367 SpeculateBooleanOperand op2(this, node->child2());
1368 branch32(condition, op1.gpr(), op2.gpr(), taken);
1374 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1376 BasicBlock* taken = branchNode->takenBlock();
1377 BasicBlock* notTaken = branchNode->notTakenBlock();
1379 // The branch instruction will branch to the taken block.
1380 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1381 if (taken == nextBlock()) {
1382 condition = JITCompiler::invert(condition);
1383 BasicBlock* tmp = taken;
1388 if (isInt32Constant(node->child1().node())) {
1389 int32_t imm = valueOfInt32Constant(node->child1().node());
1390 SpeculateInt32Operand op2(this, node->child2());
1391 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1392 } else if (isInt32Constant(node->child2().node())) {
1393 SpeculateInt32Operand op1(this, node->child1());
1394 int32_t imm = valueOfInt32Constant(node->child2().node());
1395 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1397 SpeculateInt32Operand op1(this, node->child1());
1398 SpeculateInt32Operand op2(this, node->child2());
1399 branch32(condition, op1.gpr(), op2.gpr(), taken);
1405 // Returns true if the compare is fused with a subsequent branch.
1406 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1408 // Fused compare & branch.
1409 unsigned branchIndexInBlock = detectPeepHoleBranch();
1410 if (branchIndexInBlock != UINT_MAX) {
1411 Node* branchNode = m_block->at(branchIndexInBlock);
1413 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1414 // so can be no intervening nodes to also reference the compare.
1415 ASSERT(node->adjustedRefCount() == 1);
1417 if (node->isBinaryUseKind(Int32Use))
1418 compilePeepHoleInt32Branch(node, branchNode, condition);
1419 else if (node->isBinaryUseKind(NumberUse))
1420 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1421 else if (node->op() == CompareEq) {
1422 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1423 // Use non-peephole comparison, for now.
1426 if (node->isBinaryUseKind(BooleanUse))
1427 compilePeepHoleBooleanBranch(node, branchNode, condition);
1428 else if (node->isBinaryUseKind(ObjectUse))
1429 compilePeepHoleObjectEquality(node, branchNode);
1430 else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1431 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1432 else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1433 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1435 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1439 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1443 use(node->child1());
1444 use(node->child2());
1445 m_indexInBlock = branchIndexInBlock;
1446 m_currentNode = branchNode;
1452 void SpeculativeJIT::noticeOSRBirth(Node* node)
1454 if (!node->hasVirtualRegister())
1457 VirtualRegister virtualRegister = node->virtualRegister();
1458 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1460 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1463 void SpeculativeJIT::compileMovHint(Node* node)
1465 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1467 m_lastSetOperand = node->local();
1469 Node* child = node->child1().node();
1470 noticeOSRBirth(child);
1472 if (child->op() == UInt32ToNumber)
1473 noticeOSRBirth(child->child1().node());
1475 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1478 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1480 compileMovHint(node);
1481 speculate(node, node->child1());
1485 void SpeculativeJIT::compileInlineStart(Node* node)
1487 InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1488 int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1489 unsigned argumentPositionStart = node->argumentPositionStart();
1490 CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1491 for (int i = 0; i < argumentCountIncludingThis; ++i) {
1492 ValueRecovery recovery;
1493 if (codeBlock->isCaptured(argumentToOperand(i)))
1494 recovery = ValueRecovery::alreadyInJSStack();
1496 ArgumentPosition& argumentPosition =
1497 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1498 ValueSource valueSource;
1499 switch (argumentPosition.flushFormat()) {
1501 case FlushedJSValue:
1502 valueSource = ValueSource(ValueInJSStack);
1505 valueSource = ValueSource(DoubleInJSStack);
1508 valueSource = ValueSource(Int32InJSStack);
1511 valueSource = ValueSource(CellInJSStack);
1513 case FlushedBoolean:
1514 valueSource = ValueSource(BooleanInJSStack);
1517 recovery = computeValueRecoveryFor(valueSource);
1519 // The recovery should refer either to something that has already been
1520 // stored into the stack at the right place, or to a constant,
1521 // since the Arguments code isn't smart enough to handle anything else.
1522 // The exception is the this argument, which we don't really need to be
1524 #if DFG_ENABLE(DEBUG_VERBOSE)
1525 dataLogF("\nRecovery for argument %d: ", i);
1526 recovery.dump(WTF::dataFile());
1528 inlineCallFrame->arguments[i] = recovery;
1532 void SpeculativeJIT::bail()
1534 m_compileOkay = true;
1536 clearGenerationInfo();
1539 void SpeculativeJIT::compileCurrentBlock()
1541 ASSERT(m_compileOkay);
1546 ASSERT(m_block->isReachable);
1548 if (!m_block->cfaHasVisited) {
1549 // Don't generate code for basic blocks that are unreachable according to CFA.
1550 // But to be sure that nobody has generated a jump to this block, drop in a
1556 m_jit.blockHeads()[m_block->index] = m_jit.label();
1557 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1561 #if DFG_ENABLE(DEBUG_VERBOSE)
1562 dataLog("Setting up state for block ", *m_block, ": ");
1565 m_stream->appendAndLog(VariableEvent::reset());
1567 m_jit.jitAssertHasValidCallFrame();
1569 ASSERT(m_arguments.size() == m_block->variablesAtHead.numberOfArguments());
1570 for (size_t i = 0; i < m_arguments.size(); ++i) {
1571 ValueSource valueSource = ValueSource(ValueInJSStack);
1572 m_arguments[i] = valueSource;
1573 m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
1577 m_state.beginBasicBlock(m_block);
1579 ASSERT(m_variables.size() == m_block->variablesAtHead.numberOfLocals());
1580 for (size_t i = 0; i < m_variables.size(); ++i) {
1581 Node* node = m_block->variablesAtHead.local(i);
1582 ValueSource valueSource;
1584 valueSource = ValueSource(SourceIsDead);
1585 else if (node->variableAccessData()->isArgumentsAlias())
1586 valueSource = ValueSource(ArgumentsSource);
1587 else if (!node->refCount())
1588 valueSource = ValueSource(SourceIsDead);
1590 valueSource = ValueSource::forFlushFormat(node->variableAccessData()->flushFormat());
1591 m_variables[i] = valueSource;
1592 // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1593 m_stream->appendAndLog(VariableEvent::setLocal(localToOperand(i), valueSource.dataFormat()));
1596 m_lastSetOperand = std::numeric_limits<int>::max();
1597 m_codeOriginForExitTarget = CodeOrigin();
1598 m_codeOriginForExitProfile = CodeOrigin();
1600 #if DFG_ENABLE(DEBUG_VERBOSE)
1604 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1605 m_currentNode = m_block->at(m_indexInBlock);
1607 // We may have his a contradiction that the CFA was aware of but that the JIT
1608 // didn't cause directly.
1609 if (!m_state.isValid()) {
1614 m_canExit = m_currentNode->canExit();
1615 bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1616 m_jit.setForNode(m_currentNode);
1617 m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
1618 m_codeOriginForExitProfile = m_currentNode->codeOrigin;
1619 if (!m_currentNode->shouldGenerate()) {
1620 #if DFG_ENABLE(DEBUG_VERBOSE)
1621 dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1623 switch (m_currentNode->op()) {
1625 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1628 case WeakJSConstant:
1629 m_jit.addWeakReference(m_currentNode->weakConstant());
1630 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1634 RELEASE_ASSERT_NOT_REACHED();
1638 compileMovHint(m_currentNode);
1642 m_lastSetOperand = m_currentNode->local();
1643 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1648 if (belongsInMinifiedGraph(m_currentNode->op()))
1649 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1654 if (verboseCompilationEnabled()) {
1656 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1657 (int)m_currentNode->index(),
1658 m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1659 #if DFG_ENABLE(DEBUG_VERBOSE)
1665 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1668 #if DFG_ENABLE(XOR_DEBUG_AID)
1669 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1670 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1674 m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1676 compile(m_currentNode);
1677 if (!m_compileOkay) {
1682 if (belongsInMinifiedGraph(m_currentNode->op())) {
1683 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1684 noticeOSRBirth(m_currentNode);
1687 #if DFG_ENABLE(DEBUG_VERBOSE)
1688 if (m_currentNode->hasResult()) {
1689 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1690 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1691 if (info.registerFormat() != DataFormatNone) {
1692 if (info.registerFormat() == DataFormatDouble)
1693 dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1694 #if USE(JSVALUE32_64)
1695 else if (info.registerFormat() & DataFormatJS)
1696 dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1699 dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1707 #if DFG_ENABLE(DEBUG_VERBOSE)
1711 // Make sure that the abstract state is rematerialized for the next node.
1712 if (shouldExecuteEffects)
1713 m_interpreter.executeEffects(m_indexInBlock);
1715 if (m_currentNode->shouldGenerate())
1719 // Perform the most basic verification that children have been used correctly.
1720 #if !ASSERT_DISABLED
1721 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1722 GenerationInfo& info = m_generationInfo[index];
1723 ASSERT(!info.alive());
1728 // If we are making type predictions about our arguments then
1729 // we need to check that they are correct on function entry.
1730 void SpeculativeJIT::checkArgumentTypes()
1732 ASSERT(!m_currentNode);
1733 m_isCheckingArgumentTypes = true;
1734 m_speculationDirection = BackwardSpeculation;
1735 m_codeOriginForExitTarget = CodeOrigin(0);
1736 m_codeOriginForExitProfile = CodeOrigin(0);
1738 for (size_t i = 0; i < m_arguments.size(); ++i)
1739 m_arguments[i] = ValueSource(ValueInJSStack);
1740 for (size_t i = 0; i < m_variables.size(); ++i)
1741 m_variables[i] = ValueSource(ValueInJSStack);
1743 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1744 Node* node = m_jit.graph().m_arguments[i];
1745 ASSERT(node->op() == SetArgument);
1746 if (!node->shouldGenerate()) {
1747 // The argument is dead. We don't do any checks for such arguments.
1751 VariableAccessData* variableAccessData = node->variableAccessData();
1752 FlushFormat format = variableAccessData->flushFormat();
1754 if (format == FlushedJSValue)
1757 VirtualRegister virtualRegister = variableAccessData->local();
1759 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1763 case FlushedInt32: {
1764 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1767 case FlushedBoolean: {
1768 GPRTemporary temp(this);
1769 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1770 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1771 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1775 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1779 RELEASE_ASSERT_NOT_REACHED();
1784 case FlushedInt32: {
1785 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1788 case FlushedBoolean: {
1789 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1793 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1797 RELEASE_ASSERT_NOT_REACHED();
1802 m_isCheckingArgumentTypes = false;
1805 bool SpeculativeJIT::compile()
1807 checkArgumentTypes();
1809 ASSERT(!m_currentNode);
1810 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1811 m_jit.setForBlockIndex(blockIndex);
1812 m_block = m_jit.graph().block(blockIndex);
1813 compileCurrentBlock();
1819 void SpeculativeJIT::createOSREntries()
1821 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1822 BasicBlock* block = m_jit.graph().block(blockIndex);
1825 if (!block->isOSRTarget)
1828 // Currently we don't have OSR entry trampolines. We could add them
1830 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1834 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1836 unsigned osrEntryIndex = 0;
1837 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1838 BasicBlock* block = m_jit.graph().block(blockIndex);
1841 if (!block->isOSRTarget)
1843 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1845 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1848 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1850 if (valueSource.isInJSStack())
1851 return valueSource.valueRecovery();
1853 ASSERT(valueSource.kind() == HaveNode);
1854 Node* node = valueSource.id().node(m_jit.graph());
1855 if (isConstant(node))
1856 return ValueRecovery::constant(valueOfJSConstant(node));
1858 return ValueRecovery();
1861 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1863 Edge child3 = m_jit.graph().varArgChild(node, 2);
1864 Edge child4 = m_jit.graph().varArgChild(node, 3);
1866 ArrayMode arrayMode = node->arrayMode();
1868 GPRReg baseReg = base.gpr();
1869 GPRReg propertyReg = property.gpr();
1871 SpeculateDoubleOperand value(this, child3);
1873 FPRReg valueReg = value.fpr();
1876 JSValueRegs(), child3, SpecRealNumber,
1878 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1883 StorageOperand storage(this, child4);
1884 GPRReg storageReg = storage.gpr();
1886 if (node->op() == PutByValAlias) {
1887 // Store the value to the array.
1888 GPRReg propertyReg = property.gpr();
1889 FPRReg valueReg = value.fpr();
1890 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1892 noResult(m_currentNode);
1896 GPRTemporary temporary;
1897 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1899 MacroAssembler::Jump slowCase;
1901 if (arrayMode.isInBounds()) {
1903 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
1904 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1906 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1908 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1910 if (!arrayMode.isOutOfBounds())
1911 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1913 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1914 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1916 inBounds.link(&m_jit);
1919 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1926 if (arrayMode.isOutOfBounds()) {
1927 addSlowPathGenerator(
1930 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1931 NoResult, baseReg, propertyReg, valueReg));
1934 noResult(m_currentNode, UseChildrenCalledExplicitly);
1937 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1939 SpeculateCellOperand string(this, node->child1());
1940 SpeculateStrictInt32Operand index(this, node->child2());
1941 StorageOperand storage(this, node->child3());
1943 GPRReg stringReg = string.gpr();
1944 GPRReg indexReg = index.gpr();
1945 GPRReg storageReg = storage.gpr();
1947 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1949 // unsigned comparison so we can filter out negative indices and indices that are too large
1950 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1952 GPRTemporary scratch(this);
1953 GPRReg scratchReg = scratch.gpr();
1955 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1957 // Load the character into scratchReg
1958 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1960 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1961 JITCompiler::Jump cont8Bit = m_jit.jump();
1963 is16Bit.link(&m_jit);
1965 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1967 cont8Bit.link(&m_jit);
1969 int32Result(scratchReg, m_currentNode);
1972 void SpeculativeJIT::compileGetByValOnString(Node* node)
1974 SpeculateCellOperand base(this, node->child1());
1975 SpeculateStrictInt32Operand property(this, node->child2());
1976 StorageOperand storage(this, node->child3());
1977 GPRReg baseReg = base.gpr();
1978 GPRReg propertyReg = property.gpr();
1979 GPRReg storageReg = storage.gpr();
1981 GPRTemporary scratch(this);
1982 GPRReg scratchReg = scratch.gpr();
1983 #if USE(JSVALUE32_64)
1984 GPRTemporary resultTag;
1985 GPRReg resultTagReg = InvalidGPRReg;
1986 if (node->arrayMode().isOutOfBounds()) {
1987 GPRTemporary realResultTag(this);
1988 resultTag.adopt(realResultTag);
1989 resultTagReg = resultTag.gpr();
1993 if (node->arrayMode().isOutOfBounds()) {
1994 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
1995 if (globalObject->stringPrototypeChainIsSane()) {
1997 speculationWatchpoint(),
1998 globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2000 speculationWatchpoint(),
2001 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2005 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2007 // unsigned comparison so we can filter out negative indices and indices that are too large
2008 JITCompiler::Jump outOfBounds = m_jit.branch32(
2009 MacroAssembler::AboveOrEqual, propertyReg,
2010 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2011 if (node->arrayMode().isInBounds())
2012 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2014 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2016 // Load the character into scratchReg
2017 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2019 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2020 JITCompiler::Jump cont8Bit = m_jit.jump();
2022 is16Bit.link(&m_jit);
2024 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2026 JITCompiler::Jump bigCharacter =
2027 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2029 // 8 bit string values don't need the isASCII check.
2030 cont8Bit.link(&m_jit);
2033 // Don't have enough register, construct our own indexed address and load.
2034 m_jit.lshift32(MacroAssembler::TrustedImm32(2), scratchReg);
2035 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2036 m_jit.loadPtr(scratchReg, scratchReg);
2038 GPRTemporary smallStrings(this);
2039 GPRReg smallStringsReg = smallStrings.gpr();
2040 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2041 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2044 addSlowPathGenerator(
2046 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2048 if (node->arrayMode().isOutOfBounds()) {
2049 #if USE(JSVALUE32_64)
2050 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2053 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2054 if (globalObject->stringPrototypeChainIsSane()) {
2056 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2057 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
2059 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2060 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2061 baseReg, propertyReg)));
2065 addSlowPathGenerator(
2067 outOfBounds, this, operationGetByValStringInt,
2068 scratchReg, baseReg, propertyReg));
2070 addSlowPathGenerator(
2072 outOfBounds, this, operationGetByValStringInt,
2073 resultTagReg, scratchReg, baseReg, propertyReg));
2078 jsValueResult(scratchReg, m_currentNode);
2080 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2083 cellResult(scratchReg, m_currentNode);
2086 void SpeculativeJIT::compileFromCharCode(Node* node)
2088 SpeculateStrictInt32Operand property(this, node->child1());
2089 GPRReg propertyReg = property.gpr();
2090 GPRTemporary smallStrings(this);
2091 GPRTemporary scratch(this);
2092 GPRReg scratchReg = scratch.gpr();
2093 GPRReg smallStringsReg = smallStrings.gpr();
2095 JITCompiler::JumpList slowCases;
2096 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2097 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2098 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2100 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2101 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2102 cellResult(scratchReg, m_currentNode);
2105 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2107 #if DFG_ENABLE(DEBUG_VERBOSE)
2108 dataLogF("checkGeneratedTypeForToInt32@%d ", node->index());
2110 VirtualRegister virtualRegister = node->virtualRegister();
2111 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2113 switch (info.registerFormat()) {
2114 case DataFormatStorage:
2115 RELEASE_ASSERT_NOT_REACHED();
2117 case DataFormatBoolean:
2118 case DataFormatCell:
2119 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2120 return GeneratedOperandTypeUnknown;
2122 case DataFormatNone:
2123 case DataFormatJSCell:
2125 case DataFormatJSBoolean:
2126 return GeneratedOperandJSValue;
2128 case DataFormatJSInt32:
2129 case DataFormatInt32:
2130 return GeneratedOperandInteger;
2132 case DataFormatJSDouble:
2133 case DataFormatDouble:
2134 return GeneratedOperandDouble;
2137 RELEASE_ASSERT_NOT_REACHED();
2138 return GeneratedOperandTypeUnknown;
2142 void SpeculativeJIT::compileValueToInt32(Node* node)
2144 switch (node->child1().useKind()) {
2146 SpeculateInt32Operand op1(this, node->child1());
2147 GPRTemporary result(this, Reuse, op1);
2148 m_jit.move(op1.gpr(), result.gpr());
2149 int32Result(result.gpr(), node, op1.format());
2155 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2156 case GeneratedOperandInteger: {
2157 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2158 GPRTemporary result(this, Reuse, op1);
2159 m_jit.move(op1.gpr(), result.gpr());
2160 int32Result(result.gpr(), node, op1.format());
2163 case GeneratedOperandDouble: {
2164 GPRTemporary result(this);
2165 SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2166 FPRReg fpr = op1.fpr();
2167 GPRReg gpr = result.gpr();
2168 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2170 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2172 int32Result(gpr, node);
2175 case GeneratedOperandJSValue: {
2176 GPRTemporary result(this);
2178 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2180 GPRReg gpr = op1.gpr();
2181 GPRReg resultGpr = result.gpr();
2182 FPRTemporary tempFpr(this);
2183 FPRReg fpr = tempFpr.fpr();
2185 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2186 JITCompiler::JumpList converted;
2188 if (node->child1().useKind() == NumberUse) {
2190 JSValueRegs(gpr), node->child1(), SpecNumber,
2192 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2194 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2197 JSValueRegs(gpr), node->child1(), ~SpecCell,
2199 JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2201 // It's not a cell: so true turns into 1 and all else turns into 0.
2202 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2203 converted.append(m_jit.jump());
2205 isNumber.link(&m_jit);
2208 // First, if we get here we have a double encoded as a JSValue
2209 m_jit.move(gpr, resultGpr);
2210 unboxDouble(resultGpr, fpr);
2212 silentSpillAllRegisters(resultGpr);
2213 callOperation(toInt32, resultGpr, fpr);
2214 silentFillAllRegisters(resultGpr);
2216 converted.append(m_jit.jump());
2218 isInteger.link(&m_jit);
2219 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2221 converted.link(&m_jit);
2223 Node* childNode = node->child1().node();
2224 VirtualRegister virtualRegister = childNode->virtualRegister();
2225 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2227 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2229 GPRReg payloadGPR = op1.payloadGPR();
2230 GPRReg resultGpr = result.gpr();
2232 JITCompiler::JumpList converted;
2234 if (info.registerFormat() == DataFormatJSInt32)
2235 m_jit.move(payloadGPR, resultGpr);
2237 GPRReg tagGPR = op1.tagGPR();
2238 FPRTemporary tempFpr(this);
2239 FPRReg fpr = tempFpr.fpr();
2240 FPRTemporary scratch(this);
2242 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2244 if (node->child1().useKind() == NumberUse) {
2246 JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecNumber,
2248 MacroAssembler::AboveOrEqual, tagGPR,
2249 TrustedImm32(JSValue::LowestTag)));
2251 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2254 JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2256 JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2258 // It's not a cell: so true turns into 1 and all else turns into 0.
2259 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2260 m_jit.move(TrustedImm32(0), resultGpr);
2261 converted.append(m_jit.jump());
2263 isBoolean.link(&m_jit);
2264 m_jit.move(payloadGPR, resultGpr);
2265 converted.append(m_jit.jump());
2267 isNumber.link(&m_jit);
2270 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2272 silentSpillAllRegisters(resultGpr);
2273 callOperation(toInt32, resultGpr, fpr);
2274 silentFillAllRegisters(resultGpr);
2276 converted.append(m_jit.jump());
2278 isInteger.link(&m_jit);
2279 m_jit.move(payloadGPR, resultGpr);
2281 converted.link(&m_jit);
2284 int32Result(resultGpr, node);
2287 case GeneratedOperandTypeUnknown:
2288 RELEASE_ASSERT(!m_compileOkay);
2291 RELEASE_ASSERT_NOT_REACHED();
2296 SpeculateBooleanOperand op1(this, node->child1());
2297 GPRTemporary result(this, Reuse, op1);
2299 m_jit.move(op1.gpr(), result.gpr());
2300 m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2302 int32Result(result.gpr(), node);
2307 ASSERT(!m_compileOkay);
2312 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2314 if (!nodeCanSpeculateInt32(node->arithNodeFlags())) {
2315 // We know that this sometimes produces doubles. So produce a double every
2316 // time. This at least allows subsequent code to not have weird conditionals.
2318 SpeculateInt32Operand op1(this, node->child1());
2319 FPRTemporary result(this);
2321 GPRReg inputGPR = op1.gpr();
2322 FPRReg outputFPR = result.fpr();
2324 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2326 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2327 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2328 positive.link(&m_jit);
2330 doubleResult(outputFPR, node);
2334 SpeculateInt32Operand op1(this, node->child1());
2335 GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2337 m_jit.move(op1.gpr(), result.gpr());
2339 // Test the operand is positive. This is a very special speculation check - we actually
2340 // use roll-forward speculation here, where if this fails, we jump to the baseline
2341 // instruction that follows us, rather than the one we're executing right now. We have
2342 // to do this because by this point, the original values necessary to compile whatever
2343 // operation the UInt32ToNumber originated from might be dead.
2344 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2346 int32Result(result.gpr(), node, op1.format());
2349 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2351 SpeculateDoubleOperand op1(this, node->child1());
2352 FPRTemporary scratch(this);
2353 GPRTemporary result(this);
2355 FPRReg valueFPR = op1.fpr();
2356 FPRReg scratchFPR = scratch.fpr();
2357 GPRReg resultGPR = result.gpr();
2359 JITCompiler::JumpList failureCases;
2360 bool negZeroCheck = !bytecodeCanIgnoreNegativeZero(node->arithNodeFlags());
2361 m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2362 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2364 int32Result(resultGPR, node);
2367 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2369 ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2371 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2372 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2373 FPRTemporary result(this);
2374 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2375 doubleResult(result.fpr(), node);
2379 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2380 FPRTemporary result(this);
2383 GPRTemporary temp(this);
2385 GPRReg op1GPR = op1.gpr();
2386 GPRReg tempGPR = temp.gpr();
2387 FPRReg resultFPR = result.fpr();
2389 JITCompiler::Jump isInteger = m_jit.branch64(
2390 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2392 if (needsTypeCheck(node->child1(), SpecNumber)) {
2393 if (node->flags() & NodeExitsForward) {
2395 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2396 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2397 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2400 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2401 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2405 m_jit.move(op1GPR, tempGPR);
2406 unboxDouble(tempGPR, resultFPR);
2407 JITCompiler::Jump done = m_jit.jump();
2409 isInteger.link(&m_jit);
2410 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2413 FPRTemporary temp(this);
2415 GPRReg op1TagGPR = op1.tagGPR();
2416 GPRReg op1PayloadGPR = op1.payloadGPR();
2417 FPRReg tempFPR = temp.fpr();
2418 FPRReg resultFPR = result.fpr();
2420 JITCompiler::Jump isInteger = m_jit.branch32(
2421 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2423 if (needsTypeCheck(node->child1(), SpecNumber)) {
2424 if (node->flags() & NodeExitsForward) {
2426 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2427 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2428 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2431 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2432 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2436 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2437 JITCompiler::Jump done = m_jit.jump();
2439 isInteger.link(&m_jit);
2440 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2444 doubleResult(resultFPR, node);
2447 static double clampDoubleToByte(double d)
2457 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2459 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2460 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2461 jit.xorPtr(result, result);
2462 MacroAssembler::Jump clamped = jit.jump();
2464 jit.move(JITCompiler::TrustedImm32(255), result);
2466 inBounds.link(&jit);
2469 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2471 // Unordered compare so we pick up NaN
2472 static const double zero = 0;
2473 static const double byteMax = 255;
2474 static const double half = 0.5;
2475 jit.loadDouble(&zero, scratch);
2476 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2477 jit.loadDouble(&byteMax, scratch);
2478 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2480 jit.loadDouble(&half, scratch);
2481 // FIXME: This should probably just use a floating point round!
2482 // https://bugs.webkit.org/show_bug.cgi?id=72054
2483 jit.addDouble(source, scratch);
2484 jit.truncateDoubleToInt32(scratch, result);
2485 MacroAssembler::Jump truncatedInt = jit.jump();
2487 tooSmall.link(&jit);
2488 jit.xorPtr(result, result);
2489 MacroAssembler::Jump zeroed = jit.jump();
2492 jit.move(JITCompiler::TrustedImm32(255), result);
2494 truncatedInt.link(&jit);
2499 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2501 ASSERT(isInt(type));
2503 SpeculateCellOperand base(this, node->child1());
2504 SpeculateStrictInt32Operand property(this, node->child2());
2505 StorageOperand storage(this, node->child3());
2507 GPRReg baseReg = base.gpr();
2508 GPRReg propertyReg = property.gpr();
2509 GPRReg storageReg = storage.gpr();
2511 GPRTemporary result(this);
2512 GPRReg resultReg = result.gpr();
2514 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2517 Uncountable, JSValueRegs(), 0,
2519 MacroAssembler::AboveOrEqual, propertyReg,
2520 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2521 switch (elementSize(type)) {
2524 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2526 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2530 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2532 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2535 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2540 if (elementSize(type) < 4 || isSigned(type)) {
2541 int32Result(resultReg, node);
2545 ASSERT(elementSize(type) == 4 && !isSigned(type));
2546 if (node->shouldSpeculateInt32()) {
2547 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2548 int32Result(resultReg, node);
2552 FPRTemporary fresult(this);
2553 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2554 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2555 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2556 positive.link(&m_jit);
2557 doubleResult(fresult.fpr(), node);
2560 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2562 ASSERT(isInt(type));
2564 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2565 GPRReg storageReg = storage.gpr();
2567 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2570 GPRReg valueGPR = InvalidGPRReg;
2572 if (valueUse->isConstant()) {
2573 JSValue jsValue = valueOfJSConstant(valueUse.node());
2574 if (!jsValue.isNumber()) {
2575 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2579 double d = jsValue.asNumber();
2580 if (isClamped(type)) {
2581 ASSERT(elementSize(type) == 1);
2582 d = clampDoubleToByte(d);
2584 GPRTemporary scratch(this);
2585 GPRReg scratchReg = scratch.gpr();
2586 m_jit.move(Imm32(toInt32(d)), scratchReg);
2587 value.adopt(scratch);
2588 valueGPR = scratchReg;
2590 switch (valueUse.useKind()) {
2592 SpeculateInt32Operand valueOp(this, valueUse);
2593 GPRTemporary scratch(this);
2594 GPRReg scratchReg = scratch.gpr();
2595 m_jit.move(valueOp.gpr(), scratchReg);
2596 if (isClamped(type)) {
2597 ASSERT(elementSize(type) == 1);
2598 compileClampIntegerToByte(m_jit, scratchReg);
2600 value.adopt(scratch);
2601 valueGPR = scratchReg;
2606 if (isClamped(type)) {
2607 ASSERT(elementSize(type) == 1);
2608 SpeculateDoubleOperand valueOp(this, valueUse);
2609 GPRTemporary result(this);
2610 FPRTemporary floatScratch(this);
2611 FPRReg fpr = valueOp.fpr();
2612 GPRReg gpr = result.gpr();
2613 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2614 value.adopt(result);
2617 SpeculateDoubleOperand valueOp(this, valueUse);
2618 GPRTemporary result(this);
2619 FPRReg fpr = valueOp.fpr();
2620 GPRReg gpr = result.gpr();
2621 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2622 m_jit.xorPtr(gpr, gpr);
2623 MacroAssembler::Jump fixed = m_jit.jump();
2624 notNaN.link(&m_jit);
2626 MacroAssembler::Jump failed;
2628 failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2630 failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2632 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2635 value.adopt(result);
2642 RELEASE_ASSERT_NOT_REACHED();
2647 ASSERT_UNUSED(valueGPR, valueGPR != property);
2648 ASSERT(valueGPR != base);
2649 ASSERT(valueGPR != storageReg);
2650 MacroAssembler::Jump outOfBounds;
2651 if (node->op() == PutByVal)
2652 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2654 switch (elementSize(type)) {
2656 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2659 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2662 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2667 if (node->op() == PutByVal)
2668 outOfBounds.link(&m_jit);
2672 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2674 ASSERT(isFloat(type));
2676 SpeculateCellOperand base(this, node->child1());
2677 SpeculateStrictInt32Operand property(this, node->child2());
2678 StorageOperand storage(this, node->child3());
2680 GPRReg baseReg = base.gpr();
2681 GPRReg propertyReg = property.gpr();
2682 GPRReg storageReg = storage.gpr();
2684 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2686 FPRTemporary result(this);
2687 FPRReg resultReg = result.fpr();
2689 Uncountable, JSValueRegs(), 0,
2691 MacroAssembler::AboveOrEqual, propertyReg,
2692 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2693 switch (elementSize(type)) {
2695 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2696 m_jit.convertFloatToDouble(resultReg, resultReg);
2699 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2703 RELEASE_ASSERT_NOT_REACHED();
2706 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2707 static const double NaN = QNaN;
2708 m_jit.loadDouble(&NaN, resultReg);
2709 notNaN.link(&m_jit);
2711 doubleResult(resultReg, node);
2714 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2716 ASSERT(isFloat(type));
2718 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2719 GPRReg storageReg = storage.gpr();
2721 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2722 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2724 SpeculateDoubleOperand valueOp(this, valueUse);
2725 FPRTemporary scratch(this);
2726 FPRReg valueFPR = valueOp.fpr();
2727 FPRReg scratchFPR = scratch.fpr();
2729 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2731 MacroAssembler::Jump outOfBounds;
2732 if (node->op() == PutByVal) {
2733 outOfBounds = m_jit.branch32(
2734 MacroAssembler::AboveOrEqual, property,
2735 MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2738 switch (elementSize(type)) {
2740 m_jit.moveDouble(valueFPR, scratchFPR);
2741 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2742 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2746 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2749 RELEASE_ASSERT_NOT_REACHED();
2751 if (node->op() == PutByVal)
2752 outOfBounds.link(&m_jit);
2756 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2758 // Check that prototype is an object.
2759 m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2760 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2762 // Initialize scratchReg with the value being checked.
2763 m_jit.move(valueReg, scratchReg);
2765 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2766 MacroAssembler::Label loop(&m_jit);
2767 m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2769 m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2770 MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2771 m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2773 m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2774 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2775 m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2778 // No match - result is false.
2780 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2782 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2784 MacroAssembler::Jump putResult = m_jit.jump();
2786 isInstance.link(&m_jit);
2788 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2790 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2793 putResult.link(&m_jit);
2796 void SpeculativeJIT::compileInstanceOf(Node* node)
2798 if (node->child1().useKind() == UntypedUse) {
2799 // It might not be a cell. Speculate less aggressively.
2800 // Or: it might only be used once (i.e. by us), so we get zero benefit
2801 // from speculating any more aggressively than we absolutely need to.
2803 JSValueOperand value(this, node->child1());
2804 SpeculateCellOperand prototype(this, node->child2());
2805 GPRTemporary scratch(this);
2807 GPRReg prototypeReg = prototype.gpr();
2808 GPRReg scratchReg = scratch.gpr();
2811 GPRReg valueReg = value.gpr();
2812 MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2813 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2815 GPRReg valueTagReg = value.tagGPR();
2816 GPRReg valueReg = value.payloadGPR();
2817 MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2818 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2821 MacroAssembler::Jump done = m_jit.jump();
2823 isCell.link(&m_jit);
2825 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2830 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2832 booleanResult(scratchReg, node);
2837 SpeculateCellOperand value(this, node->child1());
2838 SpeculateCellOperand prototype(this, node->child2());
2840 GPRTemporary scratch(this);
2842 GPRReg valueReg = value.gpr();
2843 GPRReg prototypeReg = prototype.gpr();
2844 GPRReg scratchReg = scratch.gpr();
2846 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2849 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2851 booleanResult(scratchReg, node);
2855 void SpeculativeJIT::compileAdd(Node* node)
2857 switch (node->binaryUseKind()) {
2859 if (isNumberConstant(node->child1().node())) {
2860 int32_t imm1 = valueOfInt32Constant(node->child1().node());
2861 SpeculateInt32Operand op2(this, node->child2());
2862 GPRTemporary result(this);
2864 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2865 m_jit.move(op2.gpr(), result.gpr());
2866 m_jit.add32(Imm32(imm1), result.gpr());
2868 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2870 int32Result(result.gpr(), node);
2874 if (isNumberConstant(node->child2().node())) {
2875 SpeculateInt32Operand op1(this, node->child1());
2876 int32_t imm2 = valueOfInt32Constant(node->child2().node());
2877 GPRTemporary result(this);
2879 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2880 m_jit.move(op1.gpr(), result.gpr());
2881 m_jit.add32(Imm32(imm2), result.gpr());
2883 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2885 int32Result(result.gpr(), node);
2889 SpeculateInt32Operand op1(this, node->child1());
2890 SpeculateInt32Operand op2(this, node->child2());
2891 GPRTemporary result(this, Reuse, op1, op2);
2893 GPRReg gpr1 = op1.gpr();
2894 GPRReg gpr2 = op2.gpr();
2895 GPRReg gprResult = result.gpr();
2897 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2898 if (gpr1 == gprResult)
2899 m_jit.add32(gpr2, gprResult);
2901 m_jit.move(gpr2, gprResult);
2902 m_jit.add32(gpr1, gprResult);
2905 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2907 if (gpr1 == gprResult)
2908 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2909 else if (gpr2 == gprResult)
2910 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2912 speculationCheck(Overflow, JSValueRegs(), 0, check);
2915 int32Result(gprResult, node);
2920 SpeculateDoubleOperand op1(this, node->child1());
2921 SpeculateDoubleOperand op2(this, node->child2());
2922 FPRTemporary result(this, op1, op2);
2924 FPRReg reg1 = op1.fpr();
2925 FPRReg reg2 = op2.fpr();
2926 m_jit.addDouble(reg1, reg2, result.fpr());
2928 doubleResult(result.fpr(), node);
2933 RELEASE_ASSERT(node->op() == ValueAdd);
2934 compileValueAdd(node);
2939 RELEASE_ASSERT_NOT_REACHED();
2944 void SpeculativeJIT::compileMakeRope(Node* node)
2946 ASSERT(node->child1().useKind() == KnownStringUse);
2947 ASSERT(node->child2().useKind() == KnownStringUse);
2948 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2950 SpeculateCellOperand op1(this, node->child1());
2951 SpeculateCellOperand op2(this, node->child2());
2952 SpeculateCellOperand op3(this, node->child3());
2953 GPRTemporary result(this);
2954 GPRTemporary allocator(this);
2955 GPRTemporary scratch(this);
2959 opGPRs[0] = op1.gpr();
2960 opGPRs[1] = op2.gpr();
2961 if (node->child3()) {
2962 opGPRs[2] = op3.gpr();
2965 opGPRs[2] = InvalidGPRReg;
2968 GPRReg resultGPR = result.gpr();
2969 GPRReg allocatorGPR = allocator.gpr();
2970 GPRReg scratchGPR = scratch.gpr();
2972 JITCompiler::JumpList slowPath;
2973 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2974 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2975 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2977 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2978 for (unsigned i = 0; i < numOpGPRs; ++i)
2979 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2980 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2981 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2982 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2983 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2984 for (unsigned i = 1; i < numOpGPRs; ++i) {
2985 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2986 m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
2988 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2989 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2990 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2992 switch (numOpGPRs) {
2994 addSlowPathGenerator(slowPathCall(
2995 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2998 addSlowPathGenerator(slowPathCall(
2999 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3002 RELEASE_ASSERT_NOT_REACHED();
3006 cellResult(resultGPR, node);
3009 void SpeculativeJIT::compileArithSub(Node* node)
3011 switch (node->binaryUseKind()) {
3013 if (isNumberConstant(node->child2().node())) {
3014 SpeculateInt32Operand op1(this, node->child1());
3015 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3016 GPRTemporary result(this);
3018 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3019 m_jit.move(op1.gpr(), result.gpr());
3020 m_jit.sub32(Imm32(imm2), result.gpr());
3022 #if ENABLE(JIT_CONSTANT_BLINDING)
3023 GPRTemporary scratch(this);
3024 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3026 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3030 int32Result(result.gpr(), node);
3034 if (isNumberConstant(node->child1().node())) {
3035 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3036 SpeculateInt32Operand op2(this, node->child2());
3037 GPRTemporary result(this);
3039 m_jit.move(Imm32(imm1), result.gpr());
3040 if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3041 m_jit.sub32(op2.gpr(), result.gpr());
3043 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3045 int32Result(result.gpr(), node);
3049 SpeculateInt32Operand op1(this, node->child1());
3050 SpeculateInt32Operand op2(this, node->child2());
3051 GPRTemporary result(this);
3053 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3054 m_jit.move(op1.gpr(), result.gpr());
3055 m_jit.sub32(op2.gpr(), result.gpr());
3057 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3059 int32Result(result.gpr(), node);
3064 SpeculateDoubleOperand op1(this, node->child1());
3065 SpeculateDoubleOperand op2(this, node->child2());
3066 FPRTemporary result(this, op1);
3068 FPRReg reg1 = op1.fpr();
3069 FPRReg reg2 = op2.fpr();
3070 m_jit.subDouble(reg1, reg2, result.fpr());
3072 doubleResult(result.fpr(), node);
3077 RELEASE_ASSERT_NOT_REACHED();
3082 void SpeculativeJIT::compileArithNegate(Node* node)
3084 switch (node->child1().useKind()) {
3086 SpeculateInt32Operand op1(this, node->child1());
3087 GPRTemporary result(this);
3089 m_jit.move(op1.gpr(), result.gpr());
3091 if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3092 m_jit.neg32(result.gpr());
3093 else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3094 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3096 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3097 m_jit.neg32(result.gpr());
3100 int32Result(result.gpr(), node);
3105 SpeculateDoubleOperand op1(this, node->child1());
3106 FPRTemporary result(this);
3108 m_jit.negateDouble(op1.fpr(), result.fpr());
3110 doubleResult(result.fpr(), node);
3115 RELEASE_ASSERT_NOT_REACHED();
3119 void SpeculativeJIT::compileArithIMul(Node* node)
3121 SpeculateInt32Operand op1(this, node->child1());
3122 SpeculateInt32Operand op2(this, node->child2());
3123 GPRTemporary result(this);
3125 GPRReg reg1 = op1.gpr();
3126 GPRReg reg2 = op2.gpr();
3128 m_jit.move(reg1, result.gpr());
3129 m_jit.mul32(reg2, result.gpr());
3130 int32Result(result.gpr(), node);
3134 void SpeculativeJIT::compileArithMul(Node* node)
3136 switch (node->binaryUseKind()) {
3138 SpeculateInt32Operand op1(this, node->child1());
3139 SpeculateInt32Operand op2(this, node->child2());
3140 GPRTemporary result(this);
3142 GPRReg reg1 = op1.gpr();
3143 GPRReg reg2 = op2.gpr();
3145 // We can perform truncated multiplications if we get to this point, because if the
3146 // fixup phase could not prove that it would be safe, it would have turned us into
3147 // a double multiplication.
3148 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3149 m_jit.move(reg1, result.gpr());
3150 m_jit.mul32(reg2, result.gpr());
3153 Overflow, JSValueRegs(), 0,
3154 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3157 // Check for negative zero, if the users of this node care about such things.
3158 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3159 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3160 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3161 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3162 resultNonZero.link(&m_jit);
3165 int32Result(result.gpr(), node);
3170 SpeculateDoubleOperand op1(this, node->child1());
3171 SpeculateDoubleOperand op2(this, node->child2());
3172 FPRTemporary result(this, op1, op2);
3174 FPRReg reg1 = op1.fpr();
3175 FPRReg reg2 = op2.fpr();
3177 m_jit.mulDouble(reg1, reg2, result.fpr());
3179 doubleResult(result.fpr(), node);
3184 RELEASE_ASSERT_NOT_REACHED();
3189 void SpeculativeJIT::compileArithDiv(Node* node)
3191 switch (node->binaryUseKind()) {
3193 #if CPU(X86) || CPU(X86_64)
3194 SpeculateInt32Operand op1(this, node->child1());
3195 SpeculateInt32Operand op2(this, node->child2());
3196 GPRTemporary eax(this, X86Registers::eax);
3197 GPRTemporary edx(this, X86Registers::edx);
3198 GPRReg op1GPR = op1.gpr();
3199 GPRReg op2GPR = op2.gpr();
3203 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3204 op2TempGPR = allocate();
3207 op2TempGPR = InvalidGPRReg;
3208 if (op1GPR == X86Registers::eax)
3209 temp = X86Registers::edx;
3211 temp = X86Registers::eax;
3214 ASSERT(temp != op1GPR);
3215 ASSERT(temp != op2GPR);
3217 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3219 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3221 JITCompiler::JumpList done;
3222 if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3223 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3224 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3226 // This is the case where we convert the result to an int after we're done, and we
3227 // already know that the denominator is either -1 or 0. So, if the denominator is
3228 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3229 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3230 // are happy to fall through to a normal division, since we're just dividing
3231 // something by negative 1.
3233 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3234 m_jit.move(TrustedImm32(0), eax.gpr());
3235 done.append(m_jit.jump());
3237 notZero.link(&m_jit);
3238 JITCompiler::Jump notNeg2ToThe31 =
3239 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3240 m_jit.move(op1GPR, eax.gpr());
3241 done.append(m_jit.jump());
3243 notNeg2ToThe31.link(&m_jit);
3246 safeDenominator.link(&m_jit);
3248 // If the user cares about negative zero, then speculate that we're not about
3249 // to produce negative zero.
3250 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3251 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3252 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3253 numeratorNonZero.link(&m_jit);
3256 if (op2TempGPR != InvalidGPRReg) {
3257 m_jit.move(op2GPR, op2TempGPR);
3258 op2GPR = op2TempGPR;
3261 m_jit.move(op1GPR, eax.gpr());
3262 m_jit.assembler().cdq();
3263 m_jit.assembler().idivl_r(op2GPR);
3265 if (op2TempGPR != InvalidGPRReg)
3268 // Check that there was no remainder. If there had been, then we'd be obligated to
3269 // produce a double result instead.
3270 if (bytecodeUsesAsNumber(node->arithNodeFlags()))
3271 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3274 int32Result(eax.gpr(), node);
3275 #elif CPU(APPLE_ARMV7S)
3276 SpeculateInt32Operand op1(this, node->child1());
3277 SpeculateInt32Operand op2(this, node->child2());
3278 GPRReg op1GPR = op1.gpr();
3279 GPRReg op2GPR = op2.gpr();
3280 GPRTemporary quotient(this);
3281 GPRTemporary multiplyAnswer(this);
3283 // If the user cares about negative zero, then speculate that we're not about
3284 // to produce negative zero.
3285 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3286 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3287 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3288 numeratorNonZero.link(&m_jit);
3291 m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3293 // Check that there was no remainder. If there had been, then we'd be obligated to
3294 // produce a double result instead.
3295 if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3296 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3297 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3300 int32Result(quotient.gpr(), node);
3302 RELEASE_ASSERT_NOT_REACHED();