2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGArrayifySlowPathGenerator.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGSlowPathGenerator.h"
35 #include "JSCJSValueInlines.h"
36 #include "LinkBuffer.h"
38 namespace JSC { namespace DFG {
40 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
45 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
46 , m_blockHeads(jit.graph().m_blocks.size())
47 , m_arguments(jit.codeBlock()->numParameters())
48 , m_variables(jit.graph().m_localVars)
49 , m_lastSetOperand(std::numeric_limits<int>::max())
50 , m_state(m_jit.graph())
51 , m_stream(&jit.codeBlock()->variableEventStream())
52 , m_minifiedGraph(&jit.codeBlock()->minifiedDFG())
53 , m_isCheckingArgumentTypes(false)
57 SpeculativeJIT::~SpeculativeJIT()
61 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
63 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
65 GPRTemporary scratch(this);
66 GPRTemporary scratch2(this);
67 GPRReg scratchGPR = scratch.gpr();
68 GPRReg scratch2GPR = scratch2.gpr();
70 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
72 JITCompiler::JumpList slowCases;
75 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
76 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
77 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
79 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
80 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
82 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
84 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
85 for (unsigned i = numElements; i < vectorLength; ++i)
86 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
88 EncodedValueDescriptor value;
89 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
90 for (unsigned i = numElements; i < vectorLength; ++i) {
91 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
92 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
97 // I want a slow path that also loads out the storage pointer, and that's
98 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
99 // of work for a very small piece of functionality. :-/
100 addSlowPathGenerator(adoptPtr(
101 new CallArrayAllocatorSlowPathGenerator(
102 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
103 structure, numElements)));
106 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
110 ASSERT(m_isCheckingArgumentTypes || m_canExit);
111 m_jit.appendExitInfo(jumpToFail);
112 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
115 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
119 ASSERT(m_isCheckingArgumentTypes || m_canExit);
120 m_jit.appendExitInfo(jumpsToFail);
121 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
124 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
128 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
129 if (m_speculationDirection == ForwardSpeculation)
130 convertLastOSRExitToForward();
133 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
135 ASSERT(m_isCheckingArgumentTypes || m_canExit);
136 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
139 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
142 return OSRExitJumpPlaceholder();
143 ASSERT(m_isCheckingArgumentTypes || m_canExit);
144 unsigned index = m_jit.codeBlock()->numberOfOSRExits();
145 m_jit.appendExitInfo();
146 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
147 return OSRExitJumpPlaceholder(index);
150 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
152 ASSERT(m_isCheckingArgumentTypes || m_canExit);
153 return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
156 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
160 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
161 if (m_speculationDirection == ForwardSpeculation)
162 convertLastOSRExitToForward();
165 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
167 ASSERT(m_isCheckingArgumentTypes || m_canExit);
168 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
171 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
175 ASSERT(m_isCheckingArgumentTypes || m_canExit);
176 m_jit.codeBlock()->appendSpeculationRecovery(recovery);
177 m_jit.appendExitInfo(jumpToFail);
178 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries()));
181 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
183 ASSERT(m_isCheckingArgumentTypes || m_canExit);
184 backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
187 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
191 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
192 if (m_speculationDirection == ForwardSpeculation)
193 convertLastOSRExitToForward();
196 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
198 speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
201 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
205 ASSERT(m_isCheckingArgumentTypes || m_canExit);
206 m_jit.appendExitInfo(JITCompiler::JumpList());
207 OSRExit& exit = m_jit.codeBlock()->osrExit(
208 m_jit.codeBlock()->appendOSRExit(OSRExit(
210 m_jit.graph().methodOfGettingAValueProfileFor(node),
211 this, m_stream->size())));
212 exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint(
213 JumpReplacementWatchpoint(m_jit.watchpointLabel()));
214 if (m_speculationDirection == ForwardSpeculation)
215 convertLastOSRExitToForward();
216 return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex);
219 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
221 return speculationWatchpoint(kind, JSValueSource(), 0);
224 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
226 if (!valueRecovery) {
227 // Check that either the current node is a SetLocal, or the preceding node was a
228 // SetLocal with the same code origin.
229 if (!m_currentNode->containsMovHint()) {
230 Node* setLocal = m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1);
231 ASSERT_UNUSED(setLocal, setLocal->containsMovHint());
232 ASSERT_UNUSED(setLocal, setLocal->codeOrigin == m_currentNode->codeOrigin);
235 // Find the next node.
236 unsigned indexInBlock = m_indexInBlock + 1;
239 if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) {
240 // This is an inline return. Give up and do a backwards speculation. This is safe
241 // because an inline return has its own bytecode index and it's always safe to
242 // reexecute that bytecode.
243 ASSERT(node->op() == Jump);
246 node = m_jit.graph().m_blocks[m_block]->at(indexInBlock);
247 if (node->codeOrigin != m_currentNode->codeOrigin)
252 ASSERT(node->codeOrigin != m_currentNode->codeOrigin);
253 OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
254 exit.m_codeOrigin = node->codeOrigin;
258 unsigned setLocalIndexInBlock = m_indexInBlock + 1;
260 Node* setLocal = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock);
261 bool hadInt32ToDouble = false;
263 if (setLocal->op() == ForwardInt32ToDouble) {
264 setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
265 hadInt32ToDouble = true;
267 if (setLocal->op() == Flush || setLocal->op() == Phantom)
268 setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
270 if (hadInt32ToDouble)
271 ASSERT(setLocal->child1()->child1() == m_currentNode);
273 ASSERT(setLocal->child1() == m_currentNode);
274 ASSERT(setLocal->containsMovHint());
275 ASSERT(setLocal->codeOrigin == m_currentNode->codeOrigin);
277 Node* nextNode = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1);
278 if (nextNode->op() == Jump && nextNode->codeOrigin == m_currentNode->codeOrigin) {
279 // We're at an inlined return. Use a backward speculation instead.
282 ASSERT(nextNode->codeOrigin != m_currentNode->codeOrigin);
284 OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
285 exit.m_codeOrigin = nextNode->codeOrigin;
287 exit.m_lastSetOperand = setLocal->local();
288 exit.m_valueRecoveryOverride = adoptRef(
289 new ValueRecoveryOverride(setLocal->local(), valueRecovery));
292 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
294 ASSERT(m_isCheckingArgumentTypes || m_canExit);
295 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
296 convertLastOSRExitToForward(valueRecovery);
299 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
301 ASSERT(m_isCheckingArgumentTypes || m_canExit);
302 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
303 convertLastOSRExitToForward(valueRecovery);
306 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
308 ASSERT(m_isCheckingArgumentTypes || m_canExit);
309 #if DFG_ENABLE(DEBUG_VERBOSE)
310 dataLogF("SpeculativeJIT was terminated.\n");
314 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315 m_compileOkay = false;
318 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
320 ASSERT(m_isCheckingArgumentTypes || m_canExit);
321 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
324 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
326 ASSERT(needsTypeCheck(edge, typesPassedThrough));
327 m_state.forNode(edge).filter(typesPassedThrough);
328 backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
331 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
333 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
334 if (m_speculationDirection == ForwardSpeculation)
335 convertLastOSRExitToForward();
338 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
340 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
341 convertLastOSRExitToForward(valueRecovery);
344 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
346 m_slowPathGenerators.append(slowPathGenerator);
349 void SpeculativeJIT::runSlowPathGenerators()
351 #if DFG_ENABLE(DEBUG_VERBOSE)
352 dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
354 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
355 m_slowPathGenerators[i]->generate(this);
358 // On Windows we need to wrap fmod; on other platforms we can call it directly.
359 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
360 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
361 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
366 #define fmodAsDFGOperation fmod
369 void SpeculativeJIT::clearGenerationInfo()
371 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
372 m_generationInfo[i] = GenerationInfo();
373 m_gprs = RegisterBank<GPRInfo>();
374 m_fprs = RegisterBank<FPRInfo>();
377 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
379 GenerationInfo& info = m_generationInfo[spillMe];
380 Node* node = info.node();
381 DataFormat registerFormat = info.registerFormat();
382 ASSERT(registerFormat != DataFormatNone);
383 ASSERT(registerFormat != DataFormatDouble);
385 SilentSpillAction spillAction;
386 SilentFillAction fillAction;
388 if (!info.needsSpill())
389 spillAction = DoNothingForSpill;
392 ASSERT(info.gpr() == source);
393 if (registerFormat == DataFormatInteger)
394 spillAction = Store32Payload;
395 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
396 spillAction = StorePtr;
398 ASSERT(registerFormat & DataFormatJS);
399 spillAction = Store64;
401 #elif USE(JSVALUE32_64)
402 if (registerFormat & DataFormatJS) {
403 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
404 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
406 ASSERT(info.gpr() == source);
407 spillAction = Store32Payload;
412 if (registerFormat == DataFormatInteger) {
413 ASSERT(info.gpr() == source);
414 ASSERT(isJSInteger(info.registerFormat()));
415 if (node->hasConstant()) {
416 ASSERT(isInt32Constant(node));
417 fillAction = SetInt32Constant;
419 fillAction = Load32Payload;
420 } else if (registerFormat == DataFormatBoolean) {
422 RELEASE_ASSERT_NOT_REACHED();
423 fillAction = DoNothingForFill;
424 #elif USE(JSVALUE32_64)
425 ASSERT(info.gpr() == source);
426 if (node->hasConstant()) {
427 ASSERT(isBooleanConstant(node));
428 fillAction = SetBooleanConstant;
430 fillAction = Load32Payload;
432 } else if (registerFormat == DataFormatCell) {
433 ASSERT(info.gpr() == source);
434 if (node->hasConstant()) {
435 JSValue value = valueOfJSConstant(node);
436 ASSERT_UNUSED(value, value.isCell());
437 fillAction = SetCellConstant;
440 fillAction = LoadPtr;
442 fillAction = Load32Payload;
445 } else if (registerFormat == DataFormatStorage) {
446 ASSERT(info.gpr() == source);
447 fillAction = LoadPtr;
449 ASSERT(registerFormat & DataFormatJS);
451 ASSERT(info.gpr() == source);
452 if (node->hasConstant()) {
453 if (valueOfJSConstant(node).isCell())
454 fillAction = SetTrustedJSConstant;
456 fillAction = SetJSConstant;
457 } else if (info.spillFormat() == DataFormatInteger) {
458 ASSERT(registerFormat == DataFormatJSInteger);
459 fillAction = Load32PayloadBoxInt;
460 } else if (info.spillFormat() == DataFormatDouble) {
461 ASSERT(registerFormat == DataFormatJSDouble);
462 fillAction = LoadDoubleBoxDouble;
466 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
467 if (node->hasConstant())
468 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
469 else if (info.payloadGPR() == source)
470 fillAction = Load32Payload;
471 else { // Fill the Tag
472 switch (info.spillFormat()) {
473 case DataFormatInteger:
474 ASSERT(registerFormat == DataFormatJSInteger);
475 fillAction = SetInt32Tag;
478 ASSERT(registerFormat == DataFormatJSCell);
479 fillAction = SetCellTag;
481 case DataFormatBoolean:
482 ASSERT(registerFormat == DataFormatJSBoolean);
483 fillAction = SetBooleanTag;
486 fillAction = Load32Tag;
493 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
496 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
498 GenerationInfo& info = m_generationInfo[spillMe];
499 Node* node = info.node();
500 ASSERT(info.registerFormat() == DataFormatDouble);
502 SilentSpillAction spillAction;
503 SilentFillAction fillAction;
505 if (!info.needsSpill())
506 spillAction = DoNothingForSpill;
508 ASSERT(!node->hasConstant());
509 ASSERT(info.spillFormat() == DataFormatNone);
510 ASSERT(info.fpr() == source);
511 spillAction = StoreDouble;
515 if (node->hasConstant()) {
516 ASSERT(isNumberConstant(node));
517 fillAction = SetDoubleConstant;
518 } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
519 // it was already spilled previously and not as a double, which means we need unboxing.
520 ASSERT(info.spillFormat() & DataFormatJS);
521 fillAction = LoadJSUnboxDouble;
523 fillAction = LoadDouble;
524 #elif USE(JSVALUE32_64)
525 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
526 if (node->hasConstant()) {
527 ASSERT(isNumberConstant(node));
528 fillAction = SetDoubleConstant;
530 fillAction = LoadDouble;
533 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
536 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
538 switch (plan.spillAction()) {
539 case DoNothingForSpill:
542 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
545 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
548 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
552 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
556 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
559 RELEASE_ASSERT_NOT_REACHED();
563 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
565 #if USE(JSVALUE32_64)
566 UNUSED_PARAM(canTrample);
568 switch (plan.fillAction()) {
569 case DoNothingForFill:
571 case SetInt32Constant:
572 m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
574 case SetBooleanConstant:
575 m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
577 case SetCellConstant:
578 m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
581 case SetTrustedJSConstant:
582 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
585 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
587 case SetDoubleConstant:
588 m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
589 m_jit.move64ToDouble(canTrample, plan.fpr());
591 case Load32PayloadBoxInt:
592 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
593 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
595 case LoadDoubleBoxDouble:
596 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
597 m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
599 case LoadJSUnboxDouble:
600 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
601 unboxDouble(canTrample, plan.fpr());
604 case SetJSConstantTag:
605 m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
607 case SetJSConstantPayload:
608 m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
611 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
614 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
617 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
619 case SetDoubleConstant:
620 m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
624 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
627 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
630 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
634 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
638 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
641 RELEASE_ASSERT_NOT_REACHED();
645 const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
647 switch (arrayMode.type()) {
648 case Array::Int8Array:
649 return &m_jit.vm()->int8ArrayDescriptor();
650 case Array::Int16Array:
651 return &m_jit.vm()->int16ArrayDescriptor();
652 case Array::Int32Array:
653 return &m_jit.vm()->int32ArrayDescriptor();
654 case Array::Uint8Array:
655 return &m_jit.vm()->uint8ArrayDescriptor();
656 case Array::Uint8ClampedArray:
657 return &m_jit.vm()->uint8ClampedArrayDescriptor();
658 case Array::Uint16Array:
659 return &m_jit.vm()->uint16ArrayDescriptor();
660 case Array::Uint32Array:
661 return &m_jit.vm()->uint32ArrayDescriptor();
662 case Array::Float32Array:
663 return &m_jit.vm()->float32ArrayDescriptor();
664 case Array::Float64Array:
665 return &m_jit.vm()->float64ArrayDescriptor();
671 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
673 switch (arrayMode.arrayClass()) {
674 case Array::OriginalArray: {
676 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
682 return m_jit.branch32(
683 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
686 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
687 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
691 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
693 JITCompiler::JumpList result;
695 switch (arrayMode.type()) {
697 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
700 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
702 case Array::Contiguous:
703 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
705 case Array::ArrayStorage:
706 case Array::SlowPutArrayStorage: {
707 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
709 if (arrayMode.isJSArray()) {
710 if (arrayMode.isSlowPut()) {
713 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
714 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
715 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
718 MacroAssembler::Above, tempGPR,
719 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
722 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
724 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
727 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
728 if (arrayMode.isSlowPut()) {
729 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
732 MacroAssembler::Above, tempGPR,
733 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
737 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
748 void SpeculativeJIT::checkArray(Node* node)
750 ASSERT(node->arrayMode().isSpecific());
751 ASSERT(!node->arrayMode().doesConversion());
753 SpeculateCellOperand base(this, node->child1());
754 GPRReg baseReg = base.gpr();
756 const TypedArrayDescriptor* result = typedArrayDescriptor(node->arrayMode());
758 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
759 noResult(m_currentNode);
763 const ClassInfo* expectedClassInfo = 0;
765 switch (node->arrayMode().type()) {
767 expectedClassInfo = &JSString::s_info;
771 case Array::Contiguous:
772 case Array::ArrayStorage:
773 case Array::SlowPutArrayStorage: {
774 GPRTemporary temp(this);
775 GPRReg tempGPR = temp.gpr();
777 MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
778 m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
780 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
781 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
783 noResult(m_currentNode);
786 case Array::Arguments:
787 expectedClassInfo = &Arguments::s_info;
789 case Array::Int8Array:
790 case Array::Int16Array:
791 case Array::Int32Array:
792 case Array::Uint8Array:
793 case Array::Uint8ClampedArray:
794 case Array::Uint16Array:
795 case Array::Uint32Array:
796 case Array::Float32Array:
797 case Array::Float64Array:
798 expectedClassInfo = result->m_classInfo;
801 RELEASE_ASSERT_NOT_REACHED();
805 GPRTemporary temp(this);
807 MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
809 Uncountable, JSValueRegs(), 0,
811 MacroAssembler::NotEqual,
812 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
813 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
815 noResult(m_currentNode);
818 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
820 ASSERT(node->arrayMode().doesConversion());
822 GPRTemporary temp(this);
823 GPRTemporary structure;
824 GPRReg tempGPR = temp.gpr();
825 GPRReg structureGPR = InvalidGPRReg;
827 if (node->op() != ArrayifyToStructure) {
828 GPRTemporary realStructure(this);
829 structure.adopt(realStructure);
830 structureGPR = structure.gpr();
833 // We can skip all that comes next if we already have array storage.
834 MacroAssembler::JumpList slowPath;
836 if (node->op() == ArrayifyToStructure) {
837 slowPath.append(m_jit.branchWeakPtr(
838 JITCompiler::NotEqual,
839 JITCompiler::Address(baseReg, JSCell::structureOffset()),
843 MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
846 MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
848 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
852 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
854 noResult(m_currentNode);
857 void SpeculativeJIT::arrayify(Node* node)
859 ASSERT(node->arrayMode().isSpecific());
861 SpeculateCellOperand base(this, node->child1());
863 if (!node->child2()) {
864 arrayify(node, base.gpr(), InvalidGPRReg);
868 SpeculateIntegerOperand property(this, node->child2());
870 arrayify(node, base.gpr(), property.gpr());
873 GPRReg SpeculativeJIT::fillStorage(Edge edge)
875 VirtualRegister virtualRegister = edge->virtualRegister();
876 GenerationInfo& info = m_generationInfo[virtualRegister];
878 switch (info.registerFormat()) {
879 case DataFormatNone: {
880 if (info.spillFormat() == DataFormatStorage) {
881 GPRReg gpr = allocate();
882 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
883 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
884 info.fillStorage(*m_stream, gpr);
888 // Must be a cell; fill it as a cell and then return the pointer.
889 return fillSpeculateCell(edge);
892 case DataFormatStorage: {
893 GPRReg gpr = info.gpr();
899 return fillSpeculateCell(edge);
903 void SpeculativeJIT::useChildren(Node* node)
905 if (node->flags() & NodeHasVarArgs) {
906 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
907 if (!!m_jit.graph().m_varArgChildren[childIdx])
908 use(m_jit.graph().m_varArgChildren[childIdx]);
911 Edge child1 = node->child1();
913 ASSERT(!node->child2() && !node->child3());
918 Edge child2 = node->child2();
920 ASSERT(!node->child3());
925 Edge child3 = node->child3();
932 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
936 UNUSED_PARAM(scratch1);
937 UNUSED_PARAM(scratch2);
938 UNUSED_PARAM(useKind);
939 ASSERT(owner != scratch1);
940 ASSERT(owner != scratch2);
941 ASSERT(scratch1 != scratch2);
943 #if ENABLE(WRITE_BARRIER_PROFILING)
944 JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
948 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
950 UNUSED_PARAM(ownerGPR);
951 UNUSED_PARAM(valueGPR);
952 UNUSED_PARAM(scratch1);
953 UNUSED_PARAM(scratch2);
954 UNUSED_PARAM(useKind);
956 if (isKnownNotCell(valueUse.node()))
959 #if ENABLE(WRITE_BARRIER_PROFILING)
960 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
964 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
966 UNUSED_PARAM(ownerGPR);
968 UNUSED_PARAM(scratch1);
969 UNUSED_PARAM(scratch2);
970 UNUSED_PARAM(useKind);
972 if (Heap::isMarked(value))
975 #if ENABLE(WRITE_BARRIER_PROFILING)
976 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
980 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
983 UNUSED_PARAM(valueGPR);
984 UNUSED_PARAM(scratch);
985 UNUSED_PARAM(useKind);
987 if (isKnownNotCell(valueUse.node()))
990 #if ENABLE(WRITE_BARRIER_PROFILING)
991 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
995 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
997 unsigned branchIndexInBlock = detectPeepHoleBranch();
998 if (branchIndexInBlock != UINT_MAX) {
999 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1001 ASSERT(node->adjustedRefCount() == 1);
1003 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1005 m_indexInBlock = branchIndexInBlock;
1006 m_currentNode = branchNode;
1011 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1016 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1018 unsigned branchIndexInBlock = detectPeepHoleBranch();
1019 if (branchIndexInBlock != UINT_MAX) {
1020 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1022 ASSERT(node->adjustedRefCount() == 1);
1024 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1026 m_indexInBlock = branchIndexInBlock;
1027 m_currentNode = branchNode;
1032 nonSpeculativeNonPeepholeStrictEq(node, invert);
1038 static const char* dataFormatString(DataFormat format)
1040 // These values correspond to the DataFormat enum.
1041 const char* strings[] = {
1059 return strings[format];
1062 void SpeculativeJIT::dump(const char* label)
1065 dataLogF("<%s>\n", label);
1067 dataLogF(" gprs:\n");
1069 dataLogF(" fprs:\n");
1071 dataLogF(" VirtualRegisters:\n");
1072 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1073 GenerationInfo& info = m_generationInfo[i];
1075 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1077 dataLogF(" % 3d:[__][__]", i);
1078 if (info.registerFormat() == DataFormatDouble)
1079 dataLogF(":fpr%d\n", info.fpr());
1080 else if (info.registerFormat() != DataFormatNone
1081 #if USE(JSVALUE32_64)
1082 && !(info.registerFormat() & DataFormatJS)
1085 ASSERT(info.gpr() != InvalidGPRReg);
1086 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1091 dataLogF("</%s>\n", label);
1096 #if DFG_ENABLE(CONSISTENCY_CHECK)
1097 void SpeculativeJIT::checkConsistency()
1099 bool failed = false;
1101 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1102 if (iter.isLocked()) {
1103 dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1107 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1108 if (iter.isLocked()) {
1109 dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1114 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1115 VirtualRegister virtualRegister = (VirtualRegister)i;
1116 GenerationInfo& info = m_generationInfo[virtualRegister];
1119 switch (info.registerFormat()) {
1120 case DataFormatNone:
1123 case DataFormatJSInteger:
1124 case DataFormatJSDouble:
1125 case DataFormatJSCell:
1126 case DataFormatJSBoolean:
1127 #if USE(JSVALUE32_64)
1130 case DataFormatInteger:
1131 case DataFormatCell:
1132 case DataFormatBoolean:
1133 case DataFormatStorage: {
1134 GPRReg gpr = info.gpr();
1135 ASSERT(gpr != InvalidGPRReg);
1136 if (m_gprs.name(gpr) != virtualRegister) {
1137 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1142 case DataFormatDouble: {
1143 FPRReg fpr = info.fpr();
1144 ASSERT(fpr != InvalidFPRReg);
1145 if (m_fprs.name(fpr) != virtualRegister) {
1146 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1151 case DataFormatOSRMarker:
1152 case DataFormatDead:
1153 case DataFormatArguments:
1154 RELEASE_ASSERT_NOT_REACHED();
1159 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1160 VirtualRegister virtualRegister = iter.name();
1161 if (virtualRegister == InvalidVirtualRegister)
1164 GenerationInfo& info = m_generationInfo[virtualRegister];
1166 if (iter.regID() != info.gpr()) {
1167 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1171 if (!(info.registerFormat() & DataFormatJS)) {
1172 if (iter.regID() != info.gpr()) {
1173 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1177 if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1178 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1185 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1186 VirtualRegister virtualRegister = iter.name();
1187 if (virtualRegister == InvalidVirtualRegister)
1190 GenerationInfo& info = m_generationInfo[virtualRegister];
1191 if (iter.regID() != info.fpr()) {
1192 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1204 GPRTemporary::GPRTemporary()
1206 , m_gpr(InvalidGPRReg)
1210 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1212 , m_gpr(InvalidGPRReg)
1214 m_gpr = m_jit->allocate();
1217 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1219 , m_gpr(InvalidGPRReg)
1221 m_gpr = m_jit->allocate(specific);
1224 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
1226 , m_gpr(InvalidGPRReg)
1228 if (m_jit->canReuse(op1.node()))
1229 m_gpr = m_jit->reuse(op1.gpr());
1231 m_gpr = m_jit->allocate();
1234 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
1236 , m_gpr(InvalidGPRReg)
1238 if (m_jit->canReuse(op1.node()))
1239 m_gpr = m_jit->reuse(op1.gpr());
1240 else if (m_jit->canReuse(op2.node()))
1241 m_gpr = m_jit->reuse(op2.gpr());
1243 m_gpr = m_jit->allocate();
1246 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
1248 , m_gpr(InvalidGPRReg)
1250 if (m_jit->canReuse(op1.node()))
1251 m_gpr = m_jit->reuse(op1.gpr());
1253 m_gpr = m_jit->allocate();
1256 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
1258 , m_gpr(InvalidGPRReg)
1260 if (m_jit->canReuse(op1.node()))
1261 m_gpr = m_jit->reuse(op1.gpr());
1263 m_gpr = m_jit->allocate();
1266 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
1268 , m_gpr(InvalidGPRReg)
1270 if (m_jit->canReuse(op1.node()))
1271 m_gpr = m_jit->reuse(op1.gpr());
1272 else if (m_jit->canReuse(op2.node()))
1273 m_gpr = m_jit->reuse(op2.gpr());
1275 m_gpr = m_jit->allocate();
1278 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
1280 , m_gpr(InvalidGPRReg)
1282 if (m_jit->canReuse(op1.node()))
1283 m_gpr = m_jit->reuse(op1.gpr());
1285 m_gpr = m_jit->allocate();
1288 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
1290 , m_gpr(InvalidGPRReg)
1292 if (m_jit->canReuse(op1.node()))
1293 m_gpr = m_jit->reuse(op1.gpr());
1295 m_gpr = m_jit->allocate();
1299 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1301 , m_gpr(InvalidGPRReg)
1303 if (m_jit->canReuse(op1.node()))
1304 m_gpr = m_jit->reuse(op1.gpr());
1306 m_gpr = m_jit->allocate();
1309 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
1311 , m_gpr(InvalidGPRReg)
1313 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1314 m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
1316 m_gpr = m_jit->allocate();
1320 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
1322 , m_gpr(InvalidGPRReg)
1324 if (m_jit->canReuse(op1.node()))
1325 m_gpr = m_jit->reuse(op1.gpr());
1327 m_gpr = m_jit->allocate();
1330 void GPRTemporary::adopt(GPRTemporary& other)
1333 ASSERT(m_gpr == InvalidGPRReg);
1334 ASSERT(other.m_jit);
1335 ASSERT(other.m_gpr != InvalidGPRReg);
1336 m_jit = other.m_jit;
1337 m_gpr = other.m_gpr;
1339 other.m_gpr = InvalidGPRReg;
1342 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1344 , m_fpr(InvalidFPRReg)
1346 m_fpr = m_jit->fprAllocate();
1349 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1351 , m_fpr(InvalidFPRReg)
1353 if (m_jit->canReuse(op1.node()))
1354 m_fpr = m_jit->reuse(op1.fpr());
1356 m_fpr = m_jit->fprAllocate();
1359 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1361 , m_fpr(InvalidFPRReg)
1363 if (m_jit->canReuse(op1.node()))
1364 m_fpr = m_jit->reuse(op1.fpr());
1365 else if (m_jit->canReuse(op2.node()))
1366 m_fpr = m_jit->reuse(op2.fpr());
1368 m_fpr = m_jit->fprAllocate();
1371 #if USE(JSVALUE32_64)
1372 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1374 , m_fpr(InvalidFPRReg)
1376 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1377 m_fpr = m_jit->reuse(op1.fpr());
1379 m_fpr = m_jit->fprAllocate();
1383 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1385 BlockIndex taken = branchNode->takenBlockIndex();
1386 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1388 SpeculateDoubleOperand op1(this, node->child1());
1389 SpeculateDoubleOperand op2(this, node->child2());
1391 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1395 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1397 BlockIndex taken = branchNode->takenBlockIndex();
1398 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1400 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1402 if (taken == nextBlock()) {
1403 condition = MacroAssembler::NotEqual;
1404 BlockIndex tmp = taken;
1409 SpeculateCellOperand op1(this, node->child1());
1410 SpeculateCellOperand op2(this, node->child2());
1412 GPRReg op1GPR = op1.gpr();
1413 GPRReg op2GPR = op2.gpr();
1415 if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1416 m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1417 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1419 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1421 MacroAssembler::Equal,
1422 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1423 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1425 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1427 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1429 MacroAssembler::Equal,
1430 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1431 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1434 GPRTemporary structure(this);
1435 GPRReg structureGPR = structure.gpr();
1437 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1438 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1440 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1442 MacroAssembler::Equal,
1444 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1446 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1448 MacroAssembler::NonZero,
1449 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1450 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1452 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1453 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1455 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1457 MacroAssembler::Equal,
1459 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1461 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1463 MacroAssembler::NonZero,
1464 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1465 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1468 branchPtr(condition, op1GPR, op2GPR, taken);
1472 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1474 BlockIndex taken = branchNode->takenBlockIndex();
1475 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1477 // The branch instruction will branch to the taken block.
1478 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1479 if (taken == nextBlock()) {
1480 condition = JITCompiler::invert(condition);
1481 BlockIndex tmp = taken;
1486 if (isBooleanConstant(node->child1().node())) {
1487 bool imm = valueOfBooleanConstant(node->child1().node());
1488 SpeculateBooleanOperand op2(this, node->child2());
1489 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1490 } else if (isBooleanConstant(node->child2().node())) {
1491 SpeculateBooleanOperand op1(this, node->child1());
1492 bool imm = valueOfBooleanConstant(node->child2().node());
1493 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1495 SpeculateBooleanOperand op1(this, node->child1());
1496 SpeculateBooleanOperand op2(this, node->child2());
1497 branch32(condition, op1.gpr(), op2.gpr(), taken);
1503 void SpeculativeJIT::compilePeepHoleIntegerBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1505 BlockIndex taken = branchNode->takenBlockIndex();
1506 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1508 // The branch instruction will branch to the taken block.
1509 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1510 if (taken == nextBlock()) {
1511 condition = JITCompiler::invert(condition);
1512 BlockIndex tmp = taken;
1517 if (isInt32Constant(node->child1().node())) {
1518 int32_t imm = valueOfInt32Constant(node->child1().node());
1519 SpeculateIntegerOperand op2(this, node->child2());
1520 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1521 } else if (isInt32Constant(node->child2().node())) {
1522 SpeculateIntegerOperand op1(this, node->child1());
1523 int32_t imm = valueOfInt32Constant(node->child2().node());
1524 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1526 SpeculateIntegerOperand op1(this, node->child1());
1527 SpeculateIntegerOperand op2(this, node->child2());
1528 branch32(condition, op1.gpr(), op2.gpr(), taken);
1534 // Returns true if the compare is fused with a subsequent branch.
1535 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1537 // Fused compare & branch.
1538 unsigned branchIndexInBlock = detectPeepHoleBranch();
1539 if (branchIndexInBlock != UINT_MAX) {
1540 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1542 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1543 // so can be no intervening nodes to also reference the compare.
1544 ASSERT(node->adjustedRefCount() == 1);
1546 if (node->isBinaryUseKind(Int32Use))
1547 compilePeepHoleIntegerBranch(node, branchNode, condition);
1548 else if (node->isBinaryUseKind(NumberUse))
1549 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1550 else if (node->op() == CompareEq) {
1551 if (node->isBinaryUseKind(StringUse)) {
1552 // Use non-peephole comparison, for now.
1555 if (node->isBinaryUseKind(BooleanUse))
1556 compilePeepHoleBooleanBranch(node, branchNode, condition);
1557 else if (node->isBinaryUseKind(ObjectUse))
1558 compilePeepHoleObjectEquality(node, branchNode);
1559 else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1560 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1561 else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1562 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1564 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1568 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1572 use(node->child1());
1573 use(node->child2());
1574 m_indexInBlock = branchIndexInBlock;
1575 m_currentNode = branchNode;
1581 void SpeculativeJIT::noticeOSRBirth(Node* node)
1583 if (!node->hasVirtualRegister())
1586 VirtualRegister virtualRegister = node->virtualRegister();
1587 GenerationInfo& info = m_generationInfo[virtualRegister];
1589 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1592 void SpeculativeJIT::compileMovHint(Node* node)
1594 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1596 m_lastSetOperand = node->local();
1598 Node* child = node->child1().node();
1599 noticeOSRBirth(child);
1601 if (child->op() == UInt32ToNumber)
1602 noticeOSRBirth(child->child1().node());
1604 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1607 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1609 compileMovHint(node);
1610 speculate(node, node->child1());
1614 void SpeculativeJIT::compileInlineStart(Node* node)
1616 InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1617 int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1618 unsigned argumentPositionStart = node->argumentPositionStart();
1619 CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1620 for (int i = 0; i < argumentCountIncludingThis; ++i) {
1621 ValueRecovery recovery;
1622 if (codeBlock->isCaptured(argumentToOperand(i)))
1623 recovery = ValueRecovery::alreadyInJSStack();
1625 ArgumentPosition& argumentPosition =
1626 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1627 ValueSource valueSource;
1628 if (!argumentPosition.shouldUnboxIfPossible())
1629 valueSource = ValueSource(ValueInJSStack);
1630 else if (argumentPosition.shouldUseDoubleFormat())
1631 valueSource = ValueSource(DoubleInJSStack);
1632 else if (isInt32Speculation(argumentPosition.prediction()))
1633 valueSource = ValueSource(Int32InJSStack);
1634 else if (isCellSpeculation(argumentPosition.prediction()))
1635 valueSource = ValueSource(CellInJSStack);
1636 else if (isBooleanSpeculation(argumentPosition.prediction()))
1637 valueSource = ValueSource(BooleanInJSStack);
1639 valueSource = ValueSource(ValueInJSStack);
1640 recovery = computeValueRecoveryFor(valueSource);
1642 // The recovery should refer either to something that has already been
1643 // stored into the stack at the right place, or to a constant,
1644 // since the Arguments code isn't smart enough to handle anything else.
1645 // The exception is the this argument, which we don't really need to be
1647 #if DFG_ENABLE(DEBUG_VERBOSE)
1648 dataLogF("\nRecovery for argument %d: ", i);
1649 recovery.dump(WTF::dataFile());
1651 inlineCallFrame->arguments[i] = recovery;
1655 void SpeculativeJIT::compile(BasicBlock& block)
1657 ASSERT(m_compileOkay);
1659 if (!block.isReachable)
1662 if (!block.cfaHasVisited) {
1663 // Don't generate code for basic blocks that are unreachable according to CFA.
1664 // But to be sure that nobody has generated a jump to this block, drop in a
1666 #if !ASSERT_DISABLED
1672 m_blockHeads[m_block] = m_jit.label();
1673 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1677 #if DFG_ENABLE(DEBUG_VERBOSE)
1678 dataLogF("Setting up state for block #%u: ", m_block);
1681 m_stream->appendAndLog(VariableEvent::reset());
1683 m_jit.jitAssertHasValidCallFrame();
1685 ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
1686 for (size_t i = 0; i < m_arguments.size(); ++i) {
1687 ValueSource valueSource = ValueSource(ValueInJSStack);
1688 m_arguments[i] = valueSource;
1689 m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
1693 m_state.beginBasicBlock(&block);
1695 ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
1696 for (size_t i = 0; i < m_variables.size(); ++i) {
1697 Node* node = block.variablesAtHead.local(i);
1698 ValueSource valueSource;
1700 valueSource = ValueSource(SourceIsDead);
1701 else if (node->variableAccessData()->isArgumentsAlias())
1702 valueSource = ValueSource(ArgumentsSource);
1703 else if (!node->refCount())
1704 valueSource = ValueSource(SourceIsDead);
1705 else if (!node->variableAccessData()->shouldUnboxIfPossible())
1706 valueSource = ValueSource(ValueInJSStack);
1707 else if (node->variableAccessData()->shouldUseDoubleFormat())
1708 valueSource = ValueSource(DoubleInJSStack);
1710 valueSource = ValueSource::forSpeculation(node->variableAccessData()->argumentAwarePrediction());
1711 m_variables[i] = valueSource;
1712 // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1713 m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat()));
1716 m_lastSetOperand = std::numeric_limits<int>::max();
1717 m_codeOriginForOSR = CodeOrigin();
1719 if (DFG_ENABLE_EDGE_CODE_VERIFICATION) {
1720 JITCompiler::Jump verificationSucceeded =
1721 m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, TrustedImm32(m_block));
1723 verificationSucceeded.link(&m_jit);
1726 #if DFG_ENABLE(DEBUG_VERBOSE)
1730 for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
1731 m_currentNode = block[m_indexInBlock];
1732 #if !ASSERT_DISABLED
1733 m_canExit = m_currentNode->canExit();
1735 bool shouldExecuteEffects = m_state.startExecuting(m_currentNode);
1736 m_jit.setForNode(m_currentNode);
1737 m_codeOriginForOSR = m_currentNode->codeOrigin;
1738 if (!m_currentNode->shouldGenerate()) {
1739 #if DFG_ENABLE(DEBUG_VERBOSE)
1740 dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1742 switch (m_currentNode->op()) {
1744 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1747 case WeakJSConstant:
1748 m_jit.addWeakReference(m_currentNode->weakConstant());
1749 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1753 RELEASE_ASSERT_NOT_REACHED();
1757 compileMovHint(m_currentNode);
1761 m_lastSetOperand = m_currentNode->local();
1762 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1767 if (belongsInMinifiedGraph(m_currentNode->op()))
1768 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1773 if (verboseCompilationEnabled()) {
1775 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1776 (int)m_currentNode->index(),
1777 m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1778 #if DFG_ENABLE(DEBUG_VERBOSE)
1784 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1787 #if DFG_ENABLE(XOR_DEBUG_AID)
1788 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1789 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1793 m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1795 compile(m_currentNode);
1796 if (!m_compileOkay) {
1797 m_compileOkay = true;
1798 clearGenerationInfo();
1802 if (belongsInMinifiedGraph(m_currentNode->op())) {
1803 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1804 noticeOSRBirth(m_currentNode);
1807 #if DFG_ENABLE(DEBUG_VERBOSE)
1808 if (m_currentNode->hasResult()) {
1809 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1810 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1811 if (info.registerFormat() != DataFormatNone) {
1812 if (info.registerFormat() == DataFormatDouble)
1813 dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1814 #if USE(JSVALUE32_64)
1815 else if (info.registerFormat() & DataFormatJS)
1816 dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1819 dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1827 #if DFG_ENABLE(DEBUG_VERBOSE)
1831 // Make sure that the abstract state is rematerialized for the next node.
1832 if (shouldExecuteEffects)
1833 m_state.executeEffects(m_indexInBlock);
1835 if (m_currentNode->shouldGenerate())
1839 // Perform the most basic verification that children have been used correctly.
1840 #if !ASSERT_DISABLED
1841 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1842 GenerationInfo& info = m_generationInfo[index];
1843 ASSERT(!info.alive());
1848 // If we are making type predictions about our arguments then
1849 // we need to check that they are correct on function entry.
1850 void SpeculativeJIT::checkArgumentTypes()
1852 ASSERT(!m_currentNode);
1853 m_isCheckingArgumentTypes = true;
1854 m_speculationDirection = BackwardSpeculation;
1855 m_codeOriginForOSR = CodeOrigin(0);
1857 for (size_t i = 0; i < m_arguments.size(); ++i)
1858 m_arguments[i] = ValueSource(ValueInJSStack);
1859 for (size_t i = 0; i < m_variables.size(); ++i)
1860 m_variables[i] = ValueSource(ValueInJSStack);
1862 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1863 Node* node = m_jit.graph().m_arguments[i];
1864 ASSERT(node->op() == SetArgument);
1865 if (!node->shouldGenerate()) {
1866 // The argument is dead. We don't do any checks for such arguments.
1870 VariableAccessData* variableAccessData = node->variableAccessData();
1871 if (!variableAccessData->isProfitableToUnbox())
1874 VirtualRegister virtualRegister = variableAccessData->local();
1875 SpeculatedType predictedType = variableAccessData->prediction();
1877 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1880 if (isInt32Speculation(predictedType))
1881 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1882 else if (isBooleanSpeculation(predictedType)) {
1883 GPRTemporary temp(this);
1884 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1885 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1886 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1887 } else if (isCellSpeculation(predictedType))
1888 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1890 if (isInt32Speculation(predictedType))
1891 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1892 else if (isBooleanSpeculation(predictedType))
1893 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1894 else if (isCellSpeculation(predictedType))
1895 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1898 m_isCheckingArgumentTypes = false;
1901 bool SpeculativeJIT::compile()
1903 checkArgumentTypes();
1905 if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
1906 m_jit.move(TrustedImm32(0), GPRInfo::regT0);
1908 ASSERT(!m_currentNode);
1909 for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
1910 m_jit.setForBlock(m_block);
1911 BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
1919 void SpeculativeJIT::createOSREntries()
1921 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
1922 BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
1925 if (!block->isOSRTarget)
1928 // Currently we only need to create OSR entry trampolines when using edge code
1929 // verification. But in the future, we'll need this for other things as well (like
1930 // when we have global reg alloc).
1931 // If we don't need OSR entry trampolin
1932 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) {
1933 m_osrEntryHeads.append(m_blockHeads[blockIndex]);
1937 m_osrEntryHeads.append(m_jit.label());
1938 m_jit.move(TrustedImm32(blockIndex), GPRInfo::regT0);
1939 m_jit.jump().linkTo(m_blockHeads[blockIndex], &m_jit);
1943 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1945 unsigned osrEntryIndex = 0;
1946 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
1947 BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
1950 if (!block->isOSRTarget)
1952 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1954 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1957 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1959 if (valueSource.isInJSStack())
1960 return valueSource.valueRecovery();
1962 ASSERT(valueSource.kind() == HaveNode);
1963 Node* node = valueSource.id().node(m_jit.graph());
1964 if (isConstant(node))
1965 return ValueRecovery::constant(valueOfJSConstant(node));
1967 return ValueRecovery();
1970 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1972 Edge child3 = m_jit.graph().varArgChild(node, 2);
1973 Edge child4 = m_jit.graph().varArgChild(node, 3);
1975 ArrayMode arrayMode = node->arrayMode();
1977 GPRReg baseReg = base.gpr();
1978 GPRReg propertyReg = property.gpr();
1980 SpeculateDoubleOperand value(this, child3);
1982 FPRReg valueReg = value.fpr();
1985 JSValueRegs(), child3, SpecRealNumber,
1987 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1992 StorageOperand storage(this, child4);
1993 GPRReg storageReg = storage.gpr();
1995 if (node->op() == PutByValAlias) {
1996 // Store the value to the array.
1997 GPRReg propertyReg = property.gpr();
1998 FPRReg valueReg = value.fpr();
1999 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2001 noResult(m_currentNode);
2005 GPRTemporary temporary;
2006 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2008 MacroAssembler::Jump slowCase;
2010 if (arrayMode.isInBounds()) {
2012 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
2013 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2015 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2017 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2019 if (!arrayMode.isOutOfBounds())
2020 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2022 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2023 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2025 inBounds.link(&m_jit);
2028 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2035 if (arrayMode.isOutOfBounds()) {
2036 addSlowPathGenerator(
2039 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2040 NoResult, baseReg, propertyReg, valueReg));
2043 noResult(m_currentNode, UseChildrenCalledExplicitly);
2046 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2048 SpeculateCellOperand string(this, node->child1());
2049 SpeculateStrictInt32Operand index(this, node->child2());
2050 StorageOperand storage(this, node->child3());
2052 GPRReg stringReg = string.gpr();
2053 GPRReg indexReg = index.gpr();
2054 GPRReg storageReg = storage.gpr();
2056 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2058 // unsigned comparison so we can filter out negative indices and indices that are too large
2059 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2061 GPRTemporary scratch(this);
2062 GPRReg scratchReg = scratch.gpr();
2064 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2066 // Load the character into scratchReg
2067 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2069 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2070 JITCompiler::Jump cont8Bit = m_jit.jump();
2072 is16Bit.link(&m_jit);
2074 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2076 cont8Bit.link(&m_jit);
2078 integerResult(scratchReg, m_currentNode);
2081 void SpeculativeJIT::compileGetByValOnString(Node* node)
2083 SpeculateCellOperand base(this, node->child1());
2084 SpeculateStrictInt32Operand property(this, node->child2());
2085 StorageOperand storage(this, node->child3());
2086 GPRReg baseReg = base.gpr();
2087 GPRReg propertyReg = property.gpr();
2088 GPRReg storageReg = storage.gpr();
2090 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2092 // unsigned comparison so we can filter out negative indices and indices that are too large
2093 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
2095 GPRTemporary scratch(this);
2096 GPRReg scratchReg = scratch.gpr();
2098 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2100 // Load the character into scratchReg
2101 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2103 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2104 JITCompiler::Jump cont8Bit = m_jit.jump();
2106 is16Bit.link(&m_jit);
2108 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2110 // We only support ascii characters
2111 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
2113 // 8 bit string values don't need the isASCII check.
2114 cont8Bit.link(&m_jit);
2116 GPRTemporary smallStrings(this);
2117 GPRReg smallStringsReg = smallStrings.gpr();
2118 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2119 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2120 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2121 cellResult(scratchReg, m_currentNode);
2124 void SpeculativeJIT::compileFromCharCode(Node* node)
2126 SpeculateStrictInt32Operand property(this, node->child1());
2127 GPRReg propertyReg = property.gpr();
2128 GPRTemporary smallStrings(this);
2129 GPRTemporary scratch(this);
2130 GPRReg scratchReg = scratch.gpr();
2131 GPRReg smallStringsReg = smallStrings.gpr();
2133 JITCompiler::JumpList slowCases;
2134 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2135 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2136 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2138 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2139 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2140 cellResult(scratchReg, m_currentNode);
2143 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2145 #if DFG_ENABLE(DEBUG_VERBOSE)
2146 dataLogF("checkGeneratedTypeForToInt32@%d ", node->index());
2148 VirtualRegister virtualRegister = node->virtualRegister();
2149 GenerationInfo& info = m_generationInfo[virtualRegister];
2151 switch (info.registerFormat()) {
2152 case DataFormatStorage:
2153 RELEASE_ASSERT_NOT_REACHED();
2155 case DataFormatBoolean:
2156 case DataFormatCell:
2157 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2158 return GeneratedOperandTypeUnknown;
2160 case DataFormatNone:
2161 case DataFormatJSCell:
2163 case DataFormatJSBoolean:
2164 return GeneratedOperandJSValue;
2166 case DataFormatJSInteger:
2167 case DataFormatInteger:
2168 return GeneratedOperandInteger;
2170 case DataFormatJSDouble:
2171 case DataFormatDouble:
2172 return GeneratedOperandDouble;
2175 RELEASE_ASSERT_NOT_REACHED();
2176 return GeneratedOperandTypeUnknown;
2180 void SpeculativeJIT::compileValueToInt32(Node* node)
2182 switch (node->child1().useKind()) {
2184 SpeculateIntegerOperand op1(this, node->child1());
2185 GPRTemporary result(this, op1);
2186 m_jit.move(op1.gpr(), result.gpr());
2187 integerResult(result.gpr(), node, op1.format());
2193 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2194 case GeneratedOperandInteger: {
2195 SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
2196 GPRTemporary result(this, op1);
2197 m_jit.move(op1.gpr(), result.gpr());
2198 integerResult(result.gpr(), node, op1.format());
2201 case GeneratedOperandDouble: {
2202 GPRTemporary result(this);
2203 SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2204 FPRReg fpr = op1.fpr();
2205 GPRReg gpr = result.gpr();
2206 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2208 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2210 integerResult(gpr, node);
2213 case GeneratedOperandJSValue: {
2214 GPRTemporary result(this);
2216 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2218 GPRReg gpr = op1.gpr();
2219 GPRReg resultGpr = result.gpr();
2220 FPRTemporary tempFpr(this);
2221 FPRReg fpr = tempFpr.fpr();
2223 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2224 JITCompiler::JumpList converted;
2226 if (node->child1().useKind() == NumberUse) {
2228 JSValueRegs(gpr), node->child1(), SpecNumber,
2230 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2232 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2235 JSValueRegs(gpr), node->child1(), ~SpecCell,
2237 JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2239 // It's not a cell: so true turns into 1 and all else turns into 0.
2240 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2241 converted.append(m_jit.jump());
2243 isNumber.link(&m_jit);
2246 // First, if we get here we have a double encoded as a JSValue
2247 m_jit.move(gpr, resultGpr);
2248 unboxDouble(resultGpr, fpr);
2250 silentSpillAllRegisters(resultGpr);
2251 callOperation(toInt32, resultGpr, fpr);
2252 silentFillAllRegisters(resultGpr);
2254 converted.append(m_jit.jump());
2256 isInteger.link(&m_jit);
2257 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2259 converted.link(&m_jit);
2261 Node* childNode = node->child1().node();
2262 VirtualRegister virtualRegister = childNode->virtualRegister();
2263 GenerationInfo& info = m_generationInfo[virtualRegister];
2265 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2267 GPRReg payloadGPR = op1.payloadGPR();
2268 GPRReg resultGpr = result.gpr();
2270 JITCompiler::JumpList converted;
2272 if (info.registerFormat() == DataFormatJSInteger)
2273 m_jit.move(payloadGPR, resultGpr);
2275 GPRReg tagGPR = op1.tagGPR();
2276 FPRTemporary tempFpr(this);
2277 FPRReg fpr = tempFpr.fpr();
2278 FPRTemporary scratch(this);
2280 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2282 if (node->child1().useKind() == NumberUse) {
2284 JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecNumber,
2286 MacroAssembler::AboveOrEqual, tagGPR,
2287 TrustedImm32(JSValue::LowestTag)));
2289 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2292 JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2294 JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2296 // It's not a cell: so true turns into 1 and all else turns into 0.
2297 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2298 m_jit.move(TrustedImm32(0), resultGpr);
2299 converted.append(m_jit.jump());
2301 isBoolean.link(&m_jit);
2302 m_jit.move(payloadGPR, resultGpr);
2303 converted.append(m_jit.jump());
2305 isNumber.link(&m_jit);
2308 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2310 silentSpillAllRegisters(resultGpr);
2311 callOperation(toInt32, resultGpr, fpr);
2312 silentFillAllRegisters(resultGpr);
2314 converted.append(m_jit.jump());
2316 isInteger.link(&m_jit);
2317 m_jit.move(payloadGPR, resultGpr);
2319 converted.link(&m_jit);
2322 integerResult(resultGpr, node);
2325 case GeneratedOperandTypeUnknown:
2326 RELEASE_ASSERT(!m_compileOkay);
2329 RELEASE_ASSERT_NOT_REACHED();
2334 SpeculateBooleanOperand op1(this, node->child1());
2335 GPRTemporary result(this, op1);
2337 m_jit.move(op1.gpr(), result.gpr());
2338 m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2340 integerResult(result.gpr(), node);
2345 ASSERT(!m_compileOkay);
2350 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2352 if (!nodeCanSpeculateInteger(node->arithNodeFlags())) {
2353 // We know that this sometimes produces doubles. So produce a double every
2354 // time. This at least allows subsequent code to not have weird conditionals.
2356 IntegerOperand op1(this, node->child1());
2357 FPRTemporary result(this);
2359 GPRReg inputGPR = op1.gpr();
2360 FPRReg outputFPR = result.fpr();
2362 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2364 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2365 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2366 positive.link(&m_jit);
2368 doubleResult(outputFPR, node);
2372 IntegerOperand op1(this, node->child1());
2373 GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2375 m_jit.move(op1.gpr(), result.gpr());
2377 // Test the operand is positive. This is a very special speculation check - we actually
2378 // use roll-forward speculation here, where if this fails, we jump to the baseline
2379 // instruction that follows us, rather than the one we're executing right now. We have
2380 // to do this because by this point, the original values necessary to compile whatever
2381 // operation the UInt32ToNumber originated from might be dead.
2382 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2384 integerResult(result.gpr(), node, op1.format());
2387 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2389 SpeculateDoubleOperand op1(this, node->child1());
2390 FPRTemporary scratch(this);
2391 GPRTemporary result(this);
2393 FPRReg valueFPR = op1.fpr();
2394 FPRReg scratchFPR = scratch.fpr();
2395 GPRReg resultGPR = result.gpr();
2397 JITCompiler::JumpList failureCases;
2398 bool negZeroCheck = !nodeCanIgnoreNegativeZero(node->arithNodeFlags());
2399 m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2400 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2402 integerResult(resultGPR, node);
2405 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2407 ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2409 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2410 SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
2411 FPRTemporary result(this);
2412 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2413 doubleResult(result.fpr(), node);
2417 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2418 FPRTemporary result(this);
2421 GPRTemporary temp(this);
2423 GPRReg op1GPR = op1.gpr();
2424 GPRReg tempGPR = temp.gpr();
2425 FPRReg resultFPR = result.fpr();
2427 JITCompiler::Jump isInteger = m_jit.branch64(
2428 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2430 if (needsTypeCheck(node->child1(), SpecNumber)) {
2431 if (node->op() == ForwardInt32ToDouble) {
2433 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2434 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2435 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2438 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2439 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2443 m_jit.move(op1GPR, tempGPR);
2444 unboxDouble(tempGPR, resultFPR);
2445 JITCompiler::Jump done = m_jit.jump();
2447 isInteger.link(&m_jit);
2448 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2451 FPRTemporary temp(this);
2453 GPRReg op1TagGPR = op1.tagGPR();
2454 GPRReg op1PayloadGPR = op1.payloadGPR();
2455 FPRReg tempFPR = temp.fpr();
2456 FPRReg resultFPR = result.fpr();
2458 JITCompiler::Jump isInteger = m_jit.branch32(
2459 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2461 if (needsTypeCheck(node->child1(), SpecNumber)) {
2462 if (node->op() == ForwardInt32ToDouble) {
2464 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2465 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2466 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2469 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2470 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2474 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2475 JITCompiler::Jump done = m_jit.jump();
2477 isInteger.link(&m_jit);
2478 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2482 doubleResult(resultFPR, node);
2485 static double clampDoubleToByte(double d)
2495 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2497 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2498 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2499 jit.xorPtr(result, result);
2500 MacroAssembler::Jump clamped = jit.jump();
2502 jit.move(JITCompiler::TrustedImm32(255), result);
2504 inBounds.link(&jit);
2507 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2509 // Unordered compare so we pick up NaN
2510 static const double zero = 0;
2511 static const double byteMax = 255;
2512 static const double half = 0.5;
2513 jit.loadDouble(&zero, scratch);
2514 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2515 jit.loadDouble(&byteMax, scratch);
2516 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2518 jit.loadDouble(&half, scratch);
2519 // FIXME: This should probably just use a floating point round!
2520 // https://bugs.webkit.org/show_bug.cgi?id=72054
2521 jit.addDouble(source, scratch);
2522 jit.truncateDoubleToInt32(scratch, result);
2523 MacroAssembler::Jump truncatedInt = jit.jump();
2525 tooSmall.link(&jit);
2526 jit.xorPtr(result, result);
2527 MacroAssembler::Jump zeroed = jit.jump();
2530 jit.move(JITCompiler::TrustedImm32(255), result);
2532 truncatedInt.link(&jit);
2537 void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize, TypedArraySignedness signedness)
2539 SpeculateCellOperand base(this, node->child1());
2540 SpeculateStrictInt32Operand property(this, node->child2());
2541 StorageOperand storage(this, node->child3());
2543 GPRReg baseReg = base.gpr();
2544 GPRReg propertyReg = property.gpr();
2545 GPRReg storageReg = storage.gpr();
2547 GPRTemporary result(this);
2548 GPRReg resultReg = result.gpr();
2550 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2553 Uncountable, JSValueRegs(), 0,
2555 MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
2556 switch (elementSize) {
2558 if (signedness == SignedTypedArray)
2559 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2561 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2564 if (signedness == SignedTypedArray)
2565 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2567 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2570 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2575 if (elementSize < 4 || signedness == SignedTypedArray) {
2576 integerResult(resultReg, node);
2580 ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
2581 if (node->shouldSpeculateInteger()) {
2582 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2583 integerResult(resultReg, node);
2587 FPRTemporary fresult(this);
2588 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2589 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2590 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2591 positive.link(&m_jit);
2592 doubleResult(fresult.fpr(), node);
2595 void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
2597 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2598 GPRReg storageReg = storage.gpr();
2600 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2605 if (valueUse->isConstant()) {
2606 JSValue jsValue = valueOfJSConstant(valueUse.node());
2607 if (!jsValue.isNumber()) {
2608 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2612 double d = jsValue.asNumber();
2613 if (rounding == ClampRounding) {
2614 ASSERT(elementSize == 1);
2615 d = clampDoubleToByte(d);
2617 GPRTemporary scratch(this);
2618 GPRReg scratchReg = scratch.gpr();
2619 m_jit.move(Imm32(toInt32(d)), scratchReg);
2620 value.adopt(scratch);
2621 valueGPR = scratchReg;
2623 switch (valueUse.useKind()) {
2625 SpeculateIntegerOperand valueOp(this, valueUse);
2626 GPRTemporary scratch(this);
2627 GPRReg scratchReg = scratch.gpr();
2628 m_jit.move(valueOp.gpr(), scratchReg);
2629 if (rounding == ClampRounding) {
2630 ASSERT(elementSize == 1);
2631 compileClampIntegerToByte(m_jit, scratchReg);
2633 value.adopt(scratch);
2634 valueGPR = scratchReg;
2639 if (rounding == ClampRounding) {
2640 ASSERT(elementSize == 1);
2641 SpeculateDoubleOperand valueOp(this, valueUse);
2642 GPRTemporary result(this);
2643 FPRTemporary floatScratch(this);
2644 FPRReg fpr = valueOp.fpr();
2645 GPRReg gpr = result.gpr();
2646 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2647 value.adopt(result);
2650 SpeculateDoubleOperand valueOp(this, valueUse);
2651 GPRTemporary result(this);
2652 FPRReg fpr = valueOp.fpr();
2653 GPRReg gpr = result.gpr();
2654 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2655 m_jit.xorPtr(gpr, gpr);
2656 MacroAssembler::Jump fixed = m_jit.jump();
2657 notNaN.link(&m_jit);
2659 MacroAssembler::Jump failed;
2660 if (signedness == SignedTypedArray)
2661 failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2663 failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2665 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2668 value.adopt(result);
2675 RELEASE_ASSERT_NOT_REACHED();
2680 ASSERT_UNUSED(valueGPR, valueGPR != property);
2681 ASSERT(valueGPR != base);
2682 ASSERT(valueGPR != storageReg);
2683 MacroAssembler::Jump outOfBounds;
2684 if (node->op() == PutByVal)
2685 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
2687 switch (elementSize) {
2689 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2692 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2695 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2700 if (node->op() == PutByVal)
2701 outOfBounds.link(&m_jit);
2705 void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize)
2707 SpeculateCellOperand base(this, node->child1());
2708 SpeculateStrictInt32Operand property(this, node->child2());
2709 StorageOperand storage(this, node->child3());
2711 GPRReg baseReg = base.gpr();
2712 GPRReg propertyReg = property.gpr();
2713 GPRReg storageReg = storage.gpr();
2715 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2717 FPRTemporary result(this);
2718 FPRReg resultReg = result.fpr();
2720 Uncountable, JSValueRegs(), 0,
2722 MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
2723 switch (elementSize) {
2725 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2726 m_jit.convertFloatToDouble(resultReg, resultReg);
2729 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2733 RELEASE_ASSERT_NOT_REACHED();
2736 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2737 static const double NaN = QNaN;
2738 m_jit.loadDouble(&NaN, resultReg);
2739 notNaN.link(&m_jit);
2741 doubleResult(resultReg, node);
2744 void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize)
2746 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2747 GPRReg storageReg = storage.gpr();
2749 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2750 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2752 SpeculateDoubleOperand valueOp(this, valueUse);
2753 FPRTemporary scratch(this);
2754 FPRReg valueFPR = valueOp.fpr();
2755 FPRReg scratchFPR = scratch.fpr();
2757 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2759 MacroAssembler::Jump outOfBounds;
2760 if (node->op() == PutByVal)
2761 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
2763 switch (elementSize) {
2765 m_jit.moveDouble(valueFPR, scratchFPR);
2766 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2767 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2771 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2774 RELEASE_ASSERT_NOT_REACHED();
2776 if (node->op() == PutByVal)
2777 outOfBounds.link(&m_jit);
2781 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2783 // Check that prototype is an object.
2784 m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2785 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2787 // Initialize scratchReg with the value being checked.
2788 m_jit.move(valueReg, scratchReg);
2790 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2791 MacroAssembler::Label loop(&m_jit);
2792 m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2794 m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2795 MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2796 m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2798 m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2799 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2800 m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2803 // No match - result is false.
2805 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2807 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2809 MacroAssembler::Jump putResult = m_jit.jump();
2811 isInstance.link(&m_jit);
2813 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2815 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2818 putResult.link(&m_jit);
2821 void SpeculativeJIT::compileInstanceOf(Node* node)
2823 if (node->child1().useKind() == UntypedUse) {
2824 // It might not be a cell. Speculate less aggressively.
2825 // Or: it might only be used once (i.e. by us), so we get zero benefit
2826 // from speculating any more aggressively than we absolutely need to.
2828 JSValueOperand value(this, node->child1());
2829 SpeculateCellOperand prototype(this, node->child2());
2830 GPRTemporary scratch(this);
2832 GPRReg prototypeReg = prototype.gpr();
2833 GPRReg scratchReg = scratch.gpr();
2836 GPRReg valueReg = value.gpr();
2837 MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2838 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2840 GPRReg valueTagReg = value.tagGPR();
2841 GPRReg valueReg = value.payloadGPR();
2842 MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2843 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2846 MacroAssembler::Jump done = m_jit.jump();
2848 isCell.link(&m_jit);
2850 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2855 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2857 booleanResult(scratchReg, node);
2862 SpeculateCellOperand value(this, node->child1());
2863 SpeculateCellOperand prototype(this, node->child2());
2865 GPRTemporary scratch(this);
2867 GPRReg valueReg = value.gpr();
2868 GPRReg prototypeReg = prototype.gpr();
2869 GPRReg scratchReg = scratch.gpr();
2871 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2874 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2876 booleanResult(scratchReg, node);
2880 #if CPU(X86) || CPU(X86_64)
2881 void SpeculativeJIT::compileSoftModulo(Node* node)
2883 // In the fast path, the dividend value could be the final result
2884 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
2885 SpeculateStrictInt32Operand op1(this, node->child1());
2886 if (isInt32Constant(node->child2().node())) {
2887 int32_t divisor = valueOfInt32Constant(node->child2().node());
2889 GPRReg op1Gpr = op1.gpr();
2891 GPRTemporary eax(this, X86Registers::eax);
2892 GPRTemporary edx(this, X86Registers::edx);
2893 GPRTemporary scratch(this);
2894 GPRReg scratchGPR = scratch.gpr();
2897 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
2898 op1SaveGPR = allocate();
2899 ASSERT(op1Gpr != op1SaveGPR);
2900 m_jit.move(op1Gpr, op1SaveGPR);
2902 op1SaveGPR = op1Gpr;
2903 ASSERT(op1SaveGPR != X86Registers::eax);
2904 ASSERT(op1SaveGPR != X86Registers::edx);
2906 m_jit.move(op1Gpr, eax.gpr());
2907 m_jit.move(TrustedImm32(divisor), scratchGPR);
2909 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, eax.gpr(), TrustedImm32(-2147483647-1)));
2910 m_jit.assembler().cdq();
2911 m_jit.assembler().idivl_r(scratchGPR);
2912 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
2913 // Check that we're not about to create negative zero.
2914 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
2915 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
2916 numeratorPositive.link(&m_jit);
2918 if (op1SaveGPR != op1Gpr)
2921 integerResult(edx.gpr(), node);
2926 SpeculateIntegerOperand op2(this, node->child2());
2928 GPRTemporary eax(this, X86Registers::eax);
2929 GPRTemporary edx(this, X86Registers::edx);
2930 GPRReg op1GPR = op1.gpr();
2931 GPRReg op2GPR = op2.gpr();
2937 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
2938 op2TempGPR = allocate();
2941 op2TempGPR = InvalidGPRReg;
2942 if (op1GPR == X86Registers::eax)
2943 temp = X86Registers::edx;
2945 temp = X86Registers::eax;
2948 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
2949 op1SaveGPR = allocate();
2950 ASSERT(op1GPR != op1SaveGPR);
2951 m_jit.move(op1GPR, op1SaveGPR);
2953 op1SaveGPR = op1GPR;
2955 ASSERT(temp != op1GPR);
2956 ASSERT(temp != op2GPR);
2957 ASSERT(op1SaveGPR != X86Registers::eax);
2958 ASSERT(op1SaveGPR != X86Registers::edx);
2960 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
2962 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
2964 JITCompiler::Jump done;
2965 // FIXME: if the node is not used as number then we can do this more easily.
2966 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
2967 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
2969 safeDenominator.link(&m_jit);
2971 if (op2TempGPR != InvalidGPRReg) {
2972 m_jit.move(op2GPR, op2TempGPR);
2973 op2GPR = op2TempGPR;
2976 m_jit.move(op1GPR, eax.gpr());
2977 m_jit.assembler().cdq();
2978 m_jit.assembler().idivl_r(op2GPR);
2980 if (op2TempGPR != InvalidGPRReg)
2983 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
2984 // Check that we're not about to create negative zero.
2985 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
2986 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
2987 numeratorPositive.link(&m_jit);
2990 if (op1SaveGPR != op1GPR)
2993 integerResult(edx.gpr(), node);
2995 #elif CPU(ARM_THUMB2)
2996 void SpeculativeJIT::compileSoftModulo(Node* node)
2998 // In the fast path, the dividend value could be the final result
2999 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3000 SpeculateStrictInt32Operand op1(this, node->child1());
3001 if (isInt32Constant(node->child2().node())) {
3002 int32_t divisor = valueOfInt32Constant(node->child2().node());
3003 if (divisor > 0 && hasOneBitSet(divisor)) { // If power of 2 then just mask
3004 GPRReg dividendGPR = op1.gpr();
3005 GPRTemporary result(this);
3006 GPRReg resultGPR = result.gpr();
3008 m_jit.assembler().cmp(dividendGPR, ARMThumbImmediate::makeEncodedImm(0));
3009 m_jit.assembler().it(ARMv7Assembler::ConditionLT, false);
3010 m_jit.assembler().neg(resultGPR, dividendGPR);
3011 m_jit.assembler().mov(resultGPR, dividendGPR);
3012 m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
3013 m_jit.assembler().it(ARMv7Assembler::ConditionLT);
3014 m_jit.assembler().neg(resultGPR, resultGPR);
3016 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3017 // Check that we're not about to create negative zero.
3018 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3019 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3020 numeratorPositive.link(&m_jit);
3022 integerResult(resultGPR, node);
3027 SpeculateIntegerOperand op2(this, node->child2());
3029 GPRReg dividendGPR = op1.gpr();
3030 GPRReg divisorGPR = op2.gpr();
3032 GPRResult result(this);
3033 GPRReg resultGPR = result.gpr();
3035 if (MacroAssembler::supportsIntegerDiv()) {
3036 GPRTemporary multiplyAnswer(this);
3037 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3038 m_jit.assembler().sdiv(resultGPR, dividendGPR, divisorGPR);
3039 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, resultGPR, divisorGPR, multiplyAnswerGPR));
3040 m_jit.assembler().sub(resultGPR, dividendGPR, multiplyAnswerGPR);
3043 callOperation(operationModOnInts, resultGPR, dividendGPR, divisorGPR);
3046 // If the user cares about negative zero, then speculate that we're not about
3047 // to produce negative zero.
3048 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3049 // Check that we're not about to create negative zero.
3050 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3051 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3052 numeratorPositive.link(&m_jit);
3055 integerResult(resultGPR, node);
3057 #else // CPU type without integer division
3058 void SpeculativeJIT::compileSoftModulo(Node* node)
3060 SpeculateStrictInt32Operand op1(this, node->child1());
3061 SpeculateIntegerOperand op2(this, node->child2());
3063 // Do this the *safest* way possible: call out to a C function that will do the modulo,
3064 // and then attempt to convert back.
3065 GPRReg dividendGPR = op1.gpr();
3066 GPRReg divisorGPR = op2.gpr();
3068 FPRResult result(this);
3071 callOperation(operationFModOnInts, result.fpr(), dividendGPR, divisorGPR);
3073 FPRTemporary scratch(this);
3074 GPRTemporary intResult(this);
3075 JITCompiler::JumpList failureCases;
3076 m_jit.branchConvertDoubleToInt32(result.fpr(), intResult.gpr(), failureCases, scratch.fpr(), false);
3077 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
3078 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3079 // Check that we're not about to create negative zero.
3080 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3081 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, intResult.gpr()));
3082 numeratorPositive.link(&m_jit);
3085 integerResult(intResult.gpr(), node);
3089 void SpeculativeJIT::compileAdd(Node* node)
3091 switch (node->binaryUseKind()) {
3093 if (isNumberConstant(node->child1().node())) {
3094 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3095 SpeculateIntegerOperand op2(this, node->child2());
3096 GPRTemporary result(this);
3098 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3099 m_jit.move(op2.gpr(), result.gpr());
3100 m_jit.add32(Imm32(imm1), result.gpr());
3102 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
3104 integerResult(result.gpr(), node);
3108 if (isNumberConstant(node->child2().node())) {
3109 SpeculateIntegerOperand op1(this, node->child1());
3110 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3111 GPRTemporary result(this);
3113 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3114 m_jit.move(op1.gpr(), result.gpr());
3115 m_jit.add32(Imm32(imm2), result.gpr());
3117 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3119 integerResult(result.gpr(), node);
3123 SpeculateIntegerOperand op1(this, node->child1());
3124 SpeculateIntegerOperand op2(this, node->child2());
3125 GPRTemporary result(this, op1, op2);
3127 GPRReg gpr1 = op1.gpr();
3128 GPRReg gpr2 = op2.gpr();
3129 GPRReg gprResult = result.gpr();
3131 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3132 if (gpr1 == gprResult)
3133 m_jit.add32(gpr2, gprResult);
3135 m_jit.move(gpr2, gprResult);
3136 m_jit.add32(gpr1, gprResult);
3139 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3141 if (gpr1 == gprResult)
3142 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3143 else if (gpr2 == gprResult)
3144 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3146 speculationCheck(Overflow, JSValueRegs(), 0, check);
3149 integerResult(gprResult, node);
3154 SpeculateDoubleOperand op1(this, node->child1());
3155 SpeculateDoubleOperand op2(this, node->child2());
3156 FPRTemporary result(this, op1, op2);
3158 FPRReg reg1 = op1.fpr();
3159 FPRReg reg2 = op2.fpr();
3160 m_jit.addDouble(reg1, reg2, result.fpr());
3162 doubleResult(result.fpr(), node);
3167 RELEASE_ASSERT(node->op() == ValueAdd);
3168 compileValueAdd(node);
3173 RELEASE_ASSERT_NOT_REACHED();
3178 void SpeculativeJIT::compileMakeRope(Node* node)
3180 ASSERT(node->child1().useKind() == KnownStringUse);
3181 ASSERT(node->child2().useKind() == KnownStringUse);
3182 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3184 SpeculateCellOperand op1(this, node->child1());
3185 SpeculateCellOperand op2(this, node->child2());
3186 SpeculateCellOperand op3(this, node->child3());
3187 GPRTemporary result(this);
3188 GPRTemporary allocator(this);
3189 GPRTemporary scratch(this);
3193 opGPRs[0] = op1.gpr();
3194 opGPRs[1] = op2.gpr();
3195 if (node->child3()) {
3196 opGPRs[2] = op3.gpr();
3199 opGPRs[2] = InvalidGPRReg;
3202 GPRReg resultGPR = result.gpr();
3203 GPRReg allocatorGPR = allocator.gpr();
3204 GPRReg scratchGPR = scratch.gpr();
3206 JITCompiler::JumpList slowPath;
3207 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
3208 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3209 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3211 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3212 for (unsigned i = 0; i < numOpGPRs; ++i)
3213 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3214 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3215 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3216 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3217 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3218 for (unsigned i = 1; i < numOpGPRs; ++i) {
3219 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3220 m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
3222 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3223 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3224 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3226 switch (numOpGPRs) {
3228 addSlowPathGenerator(slowPathCall(
3229 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3232 addSlowPathGenerator(slowPathCall(
3233 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3236 RELEASE_ASSERT_NOT_REACHED();
3240 cellResult(resultGPR, node);
3243 void SpeculativeJIT::compileArithSub(Node* node)
3245 switch (node->binaryUseKind()) {
3247 if (isNumberConstant(node->child2().node())) {
3248 SpeculateIntegerOperand op1(this, node->child1());
3249 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3250 GPRTemporary result(this);
3252 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3253 m_jit.move(op1.gpr(), result.gpr());
3254 m_jit.sub32(Imm32(imm2), result.gpr());
3256 #if ENABLE(JIT_CONSTANT_BLINDING)
3257 GPRTemporary scratch(this);
3258 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3260 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3264 integerResult(result.gpr(), node);
3268 if (isNumberConstant(node->child1().node())) {
3269 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3270 SpeculateIntegerOperand op2(this, node->child2());
3271 GPRTemporary result(this);
3273 m_jit.move(Imm32(imm1), result.gpr());
3274 if (nodeCanTruncateInteger(node->arithNodeFlags()))
3275 m_jit.sub32(op2.gpr(), result.gpr());
3277 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3279 integerResult(result.gpr(), node);
3283 SpeculateIntegerOperand op1(this, node->child1());
3284 SpeculateIntegerOperand op2(this, node->child2());
3285 GPRTemporary result(this);
3287 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3288 m_jit.move(op1.gpr(), result.gpr());
3289 m_jit.sub32(op2.gpr(), result.gpr());
3291 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3293 integerResult(result.gpr(), node);
3298 SpeculateDoubleOperand op1(this, node->child1());
3299 SpeculateDoubleOperand op2(this, node->child2());
3300 FPRTemporary result(this, op1);
3302 FPRReg reg1 = op1.fpr();
3303 FPRReg reg2 = op2.fpr();
3304 m_jit.subDouble(reg1, reg2, result.fpr());
3306 doubleResult(result.fpr(), node);
3311 RELEASE_ASSERT_NOT_REACHED();
3316 void SpeculativeJIT::compileArithNegate(Node* node)
3318 switch (node->child1().useKind()) {
3320 SpeculateIntegerOperand op1(this, node->child1());
3321 GPRTemporary result(this);
3323 m_jit.move(op1.gpr(), result.gpr());
3325 if (nodeCanTruncateInteger(node->arithNodeFlags()))
3326 m_jit.neg32(result.gpr());
3328 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3329 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3330 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr()));
3333 integerResult(result.gpr(), node);
3338 SpeculateDoubleOperand op1(this, node->child1());
3339 FPRTemporary result(this);
3341 m_jit.negateDouble(op1.fpr(), result.fpr());
3343 doubleResult(result.fpr(), node);
3348 RELEASE_ASSERT_NOT_REACHED();
3352 void SpeculativeJIT::compileArithIMul(Node* node)
3354 SpeculateIntegerOperand op1(this, node->child1());
3355 SpeculateIntegerOperand op2(this, node->child2());
3356 GPRTemporary result(this);
3358 GPRReg reg1 = op1.gpr();
3359 GPRReg reg2 = op2.gpr();
3361 m_jit.move(reg1, result.gpr());
3362 m_jit.mul32(reg2, result.gpr());
3363 integerResult(result.gpr(), node);
3367 void SpeculativeJIT::compileArithMul(Node* node)
3369 switch (node->binaryUseKind()) {
3371 SpeculateIntegerOperand op1(this, node->child1());
3372 SpeculateIntegerOperand op2(this, node->child2());
3373 GPRTemporary result(this);
3375 GPRReg reg1 = op1.gpr();
3376 GPRReg reg2 = op2.gpr();
3378 // We can perform truncated multiplications if we get to this point, because if the
3379 // fixup phase could not prove that it would be safe, it would have turned us into
3380 // a double multiplication.
3381 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3382 m_jit.move(reg1, result.gpr());
3383 m_jit.mul32(reg2, result.gpr());
3386 Overflow, JSValueRegs(), 0,
3387 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3390 // Check for negative zero, if the users of this node care about such things.
3391 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3392 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3393 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3394 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3395 resultNonZero.link(&m_jit);
3398 integerResult(result.gpr(), node);
3403 SpeculateDoubleOperand op1(this, node->child1());
3404 SpeculateDoubleOperand op2(this, node->child2());
3405 FPRTemporary result(this, op1, op2);
3407 FPRReg reg1 = op1.fpr();
3408 FPRReg reg2 = op2.fpr();
3410 m_jit.mulDouble(reg1, reg2, result.fpr());
3412 doubleResult(result.fpr(), node);
3417 RELEASE_ASSERT_NOT_REACHED();
3422 #if CPU(X86) || CPU(X86_64)
3423 void SpeculativeJIT::compileIntegerArithDivForX86(Node* node)
3425 SpeculateIntegerOperand op1(this, node->child1());
3426 SpeculateIntegerOperand op2(this, node->child2());
3427 GPRTemporary eax(this, X86Registers::eax);
3428 GPRTemporary edx(this, X86Registers::edx);
3429 GPRReg op1GPR = op1.gpr();
3430 GPRReg op2GPR = op2.gpr();
3434 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3435 op2TempGPR = allocate();
3438 op2TempGPR = InvalidGPRReg;
3439 if (op1GPR == X86Registers::eax)
3440 temp = X86Registers::edx;
3442 temp = X86Registers::eax;
3445 ASSERT(temp != op1GPR);
3446 ASSERT(temp != op2GPR);
3448 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3450 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3452 JITCompiler::Jump done;
3453 if (nodeUsedAsNumber(node->arithNodeFlags())) {
3454 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3455 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3457 JITCompiler::Jump zero = m_jit.branchTest32(JITCompiler::Zero, op2GPR);
3458 JITCompiler::Jump isNeg2ToThe31 = m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1));
3460 m_jit.move(TrustedImm32(0), eax.gpr());
3461 isNeg2ToThe31.link(&m_jit);
3462 done = m_jit.jump();
3465 safeDenominator.link(&m_jit);
3467 // If the user cares about negative zero, then speculate that we're not about
3468 // to produce negative zero.
3469 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3470 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3471 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3472 numeratorNonZero.link(&m_jit);
3475 if (op2TempGPR != InvalidGPRReg) {
3476 m_jit.move(op2GPR, op2TempGPR);
3477 op2GPR = op2TempGPR;
3480 m_jit.move(op1GPR, eax.gpr());
3481 m_jit.assembler().cdq();
3482 m_jit.assembler().idivl_r(op2GPR);
3484 if (op2TempGPR != InvalidGPRReg)
3487 // Check that there was no remainder. If there had been, then we'd be obligated to
3488 // produce a double result instead.
3489 if (nodeUsedAsNumber(node->arithNodeFlags()))
3490 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3494 integerResult(eax.gpr(), node);
3496 #elif ENABLE(ARM_INTEGER_DIV)
3497 void SpeculativeJIT::compileIntegerArithDivForARM(Node* node)
3499 SpeculateIntegerOperand op1(this, node->child1());
3500 SpeculateIntegerOperand op2(this, node->child2());
3501 GPRReg op1GPR = op1.gpr();
3502 GPRReg op2GPR = op2.gpr();
3503 GPRTemporary quotient(this);
3504 GPRTemporary multiplyAnswer(this);
3506 // If the user cares about negative zero, then speculate that we're not about
3507 // to produce negative zero.
3508 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3509 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3510 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3511 numeratorNonZero.link(&m_jit);
3514 m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3516 // Check that there was no remainder. If there had been, then we'd be obligated to
3517 // produce a double result instead.
3518 if (nodeUsedAsNumber(node->arithNodeFlags())) {
3519 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3520 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3523 integerResult(quotient.gpr(), node);
3527 void SpeculativeJIT::compileArithMod(Node* node)
3529 switch (node->binaryUseKind()) {
3531 compileSoftModulo(node);
3536 SpeculateDoubleOperand op1(this, node->child1());
3537 SpeculateDoubleOperand op2(this, node->child2());
3539 FPRReg op1FPR = op1.fpr();
3540 FPRReg op2FPR = op2.fpr();
3544 FPRResult result(this);
3546 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3548 doubleResult(result.fpr(), node);
3553 RELEASE_ASSERT_NOT_REACHED();
3558 // Returns true if the compare is fused with a subsequent branch.
3559 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
3561 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3564 if (node->isBinaryUseKind(Int32Use)) {
3565 compileIntegerCompare(node, condition);
3569 if (node->isBinaryUseKind(NumberUse)) {
3570 compileDoubleCompare(node, doubleCondition);
3574 if (node->op() == CompareEq) {
3575 if (node->isBinaryUseKind(StringUse)) {
3576 compileStringEquality(node);
3580 if (node->isBinaryUseKind(BooleanUse)) {
3581 compileBooleanCompare(node, condition);
3585 if (node->isBinaryUseKind(ObjectUse)) {
3586 compileObjectEquality(node);
3590 if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
3591 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3595 if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
3596 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3601 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3605 bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
3607 JSValueOperand op1(this, value);
3609 // FIXME: This code is wrong for the case that the constant is null or undefined,
3610 // and the value is an object that MasqueradesAsUndefined.
3611 // https://bugs.webkit.org/show_bug.cgi?id=109487
3613 unsigned branchIndexInBlock = detectPeepHoleBranch();
3614 if (branchIndexInBlock != UINT_MAX) {
3615 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3616 BlockIndex taken = branchNode->takenBlockIndex();
3617 BlockIndex notTaken = branchNode->notTakenBlockIndex();
3618 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
3620 // The branch instruction will branch to the taken block.
3621 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
3622 if (taken == nextBlock()) {
3623 condition = MacroAssembler::NotEqual;
3624 BlockIndex tmp = taken;
3630 branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
3632 GPRReg payloadGPR = op1.payloadGPR();
3633 GPRReg tagGPR = op1.tagGPR();
3634 if (condition == MacroAssembler::Equal) {
3635 // Drop down if not equal, go elsewhere if equal.
3636 MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
3637 branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3638 notEqual.link(&m_jit);
3640 // Drop down if equal, go elsehwere if not equal.
3641 branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
3642 branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3648 use(node->child1());
3649 use(node->child2());
3650 m_indexInBlock = branchIndexInBlock;
3651 m_currentNode = branchNode;
3655 GPRTemporary result(this);
3658 GPRReg op1GPR = op1.gpr();
3659 GPRReg resultGPR = result.gpr();
3660 m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
3661 MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
3662 m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
3663 notEqual.link(&m_jit);
3664 jsValueResult(resultGPR, node, DataFormatJSBoolean);
3666 GPRReg op1PayloadGPR = op1.payloadGPR();
3667 GPRReg op1TagGPR = op1.tagGPR();
3668 GPRReg resultGPR = result.gpr();
3669 m_jit.move(TrustedImm32(0), resultGPR);
3670 MacroAssembler::JumpList notEqual;
3671 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
3672 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
3673 m_jit.move(TrustedImm32(1), resultGPR);
3674 notEqual.link(&m_jit);
3675 booleanResult(resultGPR, node);
3681 bool SpeculativeJIT::compileStrictEq(Node* node)
3683 switch (node->binaryUseKind()) {
3685 unsigned branchIndexInBlock = detectPeepHoleBranch();
3686 if (branchIndexInBlock != UINT_MAX) {
3687 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3688 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3689 use(node->child1());
3690 use(node->child2());
3691 m_indexInBlock = branchIndexInBlock;
3692 m_currentNode = branchNode;
3695 compileBooleanCompare(node, MacroAssembler::Equal);
3700 unsigned branchIndexInBlock = detectPeepHoleBranch();
3701 if (branchIndexInBlock != UINT_MAX) {
3702 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3703 compilePeepHoleIntegerBranch(node, branchNode, MacroAssembler::Equal);
3704 use(node->child1());
3705 use(node->child2());
3706 m_indexInBlock = branchIndexInBlock;
3707 m_currentNode = branchNode;
3710 compileIntegerCompare(node, MacroAssembler::Equal);
3715 unsigned branchIndexInBlock = detectPeepHoleBranch();
3716 if (branchIndexInBlock != UINT_MAX) {
3717 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3718 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3719 use(node->child1());
3720 use(node->child2());
3721 m_indexInBlock = branchIndexInBlock;
3722 m_currentNode = branchNode;
3725 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3730 compileStringEquality(node);
3735 unsigned branchIndexInBlock = detectPeepHoleBranch();
3736 if (branchIndexInBlock != UINT_MAX) {
3737 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3738 compilePeepHoleObjectEquality(node, branchNode);
3739 use(node->child1());
3740 use(node->child2());
3741 m_indexInBlock = branchIndexInBlock;
3742 m_currentNode = branchNode;
3745 compileObjectEquality(node);
3750 return nonSpeculativeStrictEq(node);
3754 RELEASE_ASSERT_NOT_REACHED();
3759 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3761 SpeculateBooleanOperand op1(this, node->child1());
3762 SpeculateBooleanOperand op2(this, node->child2());
3763 GPRTemporary result(this);
3765 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3767 // If we add a DataFormatBool, we should use it here.
3768 #if USE(JSVALUE32_64)
3769 booleanResult(result.gpr(), node);
3771 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3772 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
3776 void SpeculativeJIT::compileStringEquality(Node* node)
3778 SpeculateCellOperand left(this, node->child1());
3779 SpeculateCellOperand right(this, node->child2());
3780 GPRTemporary length(this);
3781 GPRTemporary leftTemp(this);
3782 GPRTemporary rightTemp(this);
3783 GPRTemporary leftTemp2(this, left);
3784 GPRTemporary rightTemp2(this, right);
3786 GPRReg leftGPR = left.gpr();
3787 GPRReg rightGPR = right.gpr();
3788 GPRReg lengthGPR = length.gpr();
3789 GPRReg leftTempGPR = leftTemp.gpr();
3790 GPRReg rightTempGPR = rightTemp.gpr();
3791 GPRReg leftTemp2GPR = leftTemp2.gpr();
3792 GPRReg rightTemp2GPR = rightTemp2.gpr();
3794 JITCompiler::JumpList trueCase;
3795 JITCompiler::JumpList falseCase;
3796 JITCompiler::JumpList slowCase;
3799 JSValueSource::unboxedCell(leftGPR), node->child1(), SpecString, m_jit.branchPtr(
3800 MacroAssembler::NotEqual,
3801 MacroAssembler::Address(leftGPR, JSCell::structureOffset()),
3802 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
3804 // It's safe to branch around the type check below, since proving that the values are
3805 // equal does indeed prove that the right value is a string.
3806 trueCase.append(m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR));
3809 JSValueSource::unboxedCell(rightGPR), node->child2(), SpecString, m_jit.branchPtr(
3810 MacroAssembler::NotEqual,
3811 MacroAssembler::Address(rightGPR, JSCell::structureOffset()),
3812 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
3814 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3816 falseCase.append(m_jit.branch32(
3817 MacroAssembler::NotEqual,
3818 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3821 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3823 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3824 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3826 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3827 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3829 slowCase.append(m_jit.branchTest32(
3830 MacroAssembler::Zero,
3831 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3832 TrustedImm32(StringImpl::flagIs8Bit())));
3833 slowCase.append(m_jit.branchTest32(
3834 MacroAssembler::Zero,
3835 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3836 TrustedImm32(StringImpl::flagIs8Bit())));
3838 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3839 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3841 MacroAssembler::Label loop = m_jit.label();
3843 m_jit.sub32(TrustedImm32(1), lengthGPR);
3845 // This isn't going to generate the best code on x86. But that's OK, it's still better
3846 // than not inlining.
3847 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3848 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);