2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "ArrayPrototype.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGSlowPathGenerator.h"
36 #include "JSCJSValueInlines.h"
37 #include "ObjectPrototype.h"
39 namespace JSC { namespace DFG {
43 GPRReg SpeculativeJIT::fillJSValue(Edge edge)
45 VirtualRegister virtualRegister = edge->virtualRegister();
46 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
48 switch (info.registerFormat()) {
49 case DataFormatNone: {
50 GPRReg gpr = allocate();
52 if (edge->hasConstant()) {
53 if (isInt32Constant(edge.node())) {
54 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
55 JSValue jsValue = jsNumber(valueOfInt32Constant(edge.node()));
56 m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
57 } else if (isNumberConstant(edge.node())) {
58 info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
59 JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(edge.node()));
60 m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
62 ASSERT(isJSConstant(edge.node()));
63 JSValue jsValue = valueOfJSConstant(edge.node());
64 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
65 info.fillJSValue(*m_stream, gpr, DataFormatJS);
68 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
70 DataFormat spillFormat = info.spillFormat();
71 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
72 if (spillFormat == DataFormatInt32) {
73 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
74 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
75 spillFormat = DataFormatJSInt32;
77 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
78 if (spillFormat == DataFormatDouble) {
79 // Need to box the double, since we want a JSValue.
80 m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
81 spillFormat = DataFormatJSDouble;
83 RELEASE_ASSERT(spillFormat & DataFormatJS);
85 info.fillJSValue(*m_stream, gpr, spillFormat);
90 case DataFormatInt32: {
91 GPRReg gpr = info.gpr();
92 // If the register has already been locked we need to take a copy.
93 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
94 if (m_gprs.isLocked(gpr)) {
95 GPRReg result = allocate();
96 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
100 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
101 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
105 case DataFormatDouble: {
106 FPRReg fpr = info.fpr();
107 GPRReg gpr = boxDouble(fpr);
110 info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
112 m_gprs.retain(gpr, virtualRegister, SpillOrderJS);
118 // No retag required on JSVALUE64!
120 case DataFormatJSInt32:
121 case DataFormatJSDouble:
122 case DataFormatJSCell:
123 case DataFormatJSBoolean: {
124 GPRReg gpr = info.gpr();
129 case DataFormatBoolean:
130 case DataFormatStorage:
131 // this type currently never occurs
132 RELEASE_ASSERT_NOT_REACHED();
135 RELEASE_ASSERT_NOT_REACHED();
136 return InvalidGPRReg;
140 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node* node)
142 SpeculateInt32Operand op1(this, node->child1());
143 FPRTemporary boxer(this);
144 GPRTemporary result(this, Reuse, op1);
146 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
148 m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
149 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), boxer.fpr());
151 boxDouble(boxer.fpr(), result.gpr());
153 JITCompiler::Jump done = m_jit.jump();
155 positive.link(&m_jit);
157 m_jit.or64(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
161 jsValueResult(result.gpr(), m_currentNode);
164 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
166 JITCompiler::DataLabelPtr structureToCompare;
167 JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
169 JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
170 m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
171 JITCompiler::DataLabelCompact loadWithPatch = m_jit.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
173 JITCompiler::Label doneLabel = m_jit.label();
175 OwnPtr<SlowPathGenerator> slowPath;
176 if (!slowPathTarget.isSet()) {
177 slowPath = slowPathCall(
178 structureCheck.m_jump, this, operationGetByIdOptimize, resultGPR, baseGPR,
179 identifierUID(identifierNumber), spillMode);
181 JITCompiler::JumpList slowCases;
182 slowCases.append(structureCheck.m_jump);
183 slowCases.append(slowPathTarget);
184 slowPath = slowPathCall(
185 slowCases, this, operationGetByIdOptimize, resultGPR, baseGPR,
186 identifierUID(identifierNumber), spillMode);
188 m_jit.addPropertyAccess(
189 PropertyAccessRecord(
190 codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch,
191 slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR),
193 spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
194 addSlowPathGenerator(slowPath.release());
197 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
200 JITCompiler::DataLabelPtr structureToCompare;
201 JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
203 writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
205 JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
206 m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
207 JITCompiler::DataLabel32 storeWithPatch = m_jit.store64WithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
209 JITCompiler::Label doneLabel = m_jit.label();
211 V_DFGOperation_EJCI optimizedCall;
212 if (m_jit.strictModeFor(m_currentNode->codeOrigin)) {
213 if (putKind == Direct)
214 optimizedCall = operationPutByIdDirectStrictOptimize;
216 optimizedCall = operationPutByIdStrictOptimize;
218 if (putKind == Direct)
219 optimizedCall = operationPutByIdDirectNonStrictOptimize;
221 optimizedCall = operationPutByIdNonStrictOptimize;
223 OwnPtr<SlowPathGenerator> slowPath;
224 if (!slowPathTarget.isSet()) {
225 slowPath = slowPathCall(
226 structureCheck.m_jump, this, optimizedCall, NoResult, valueGPR, baseGPR,
227 identifierUID(identifierNumber));
229 JITCompiler::JumpList slowCases;
230 slowCases.append(structureCheck.m_jump);
231 slowCases.append(slowPathTarget);
232 slowPath = slowPathCall(
233 slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR,
234 identifierUID(identifierNumber));
236 RegisterSet currentlyUsedRegisters = usedRegisters();
237 currentlyUsedRegisters.clear(scratchGPR);
238 ASSERT(currentlyUsedRegisters.get(baseGPR));
239 ASSERT(currentlyUsedRegisters.get(valueGPR));
240 m_jit.addPropertyAccess(
241 PropertyAccessRecord(
242 codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
243 JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel,
244 safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), currentlyUsedRegisters));
245 addSlowPathGenerator(slowPath.release());
248 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
250 JSValueOperand arg(this, operand);
251 GPRReg argGPR = arg.gpr();
253 GPRTemporary result(this, Reuse, arg);
254 GPRReg resultGPR = result.gpr();
256 JITCompiler::Jump notCell;
258 JITCompiler::Jump notMasqueradesAsUndefined;
259 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
260 if (!isKnownCell(operand.node()))
261 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
263 speculationWatchpointForMasqueradesAsUndefined();
265 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
266 notMasqueradesAsUndefined = m_jit.jump();
268 GPRTemporary localGlobalObject(this);
269 GPRTemporary remoteGlobalObject(this);
271 if (!isKnownCell(operand.node()))
272 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
274 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
275 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined));
277 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
278 notMasqueradesAsUndefined = m_jit.jump();
280 isMasqueradesAsUndefined.link(&m_jit);
281 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
282 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
283 m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
284 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
285 m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
288 if (!isKnownCell(operand.node())) {
289 JITCompiler::Jump done = m_jit.jump();
291 notCell.link(&m_jit);
293 m_jit.move(argGPR, resultGPR);
294 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
295 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
300 notMasqueradesAsUndefined.link(&m_jit);
302 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
303 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
306 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
308 BasicBlock* taken = branchNode->takenBlock();
309 BasicBlock* notTaken = branchNode->notTakenBlock();
311 if (taken == nextBlock()) {
313 BasicBlock* tmp = taken;
318 JSValueOperand arg(this, operand);
319 GPRReg argGPR = arg.gpr();
321 GPRTemporary result(this, Reuse, arg);
322 GPRReg resultGPR = result.gpr();
324 JITCompiler::Jump notCell;
326 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
327 if (!isKnownCell(operand.node()))
328 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
330 speculationWatchpointForMasqueradesAsUndefined();
332 jump(invert ? taken : notTaken, ForceJump);
334 GPRTemporary localGlobalObject(this);
335 GPRTemporary remoteGlobalObject(this);
337 if (!isKnownCell(operand.node()))
338 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
340 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
341 branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken);
343 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
344 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
345 m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
346 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
347 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
350 if (!isKnownCell(operand.node())) {
351 jump(notTaken, ForceJump);
353 notCell.link(&m_jit);
355 m_jit.move(argGPR, resultGPR);
356 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
357 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
363 bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert)
365 unsigned branchIndexInBlock = detectPeepHoleBranch();
366 if (branchIndexInBlock != UINT_MAX) {
367 Node* branchNode = m_block->at(branchIndexInBlock);
369 RELEASE_ASSERT(node->adjustedRefCount() == 1);
371 nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
375 m_indexInBlock = branchIndexInBlock;
376 m_currentNode = branchNode;
381 nonSpeculativeNonPeepholeCompareNull(operand, invert);
386 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
388 BasicBlock* taken = branchNode->takenBlock();
389 BasicBlock* notTaken = branchNode->notTakenBlock();
391 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
393 // The branch instruction will branch to the taken block.
394 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
395 if (taken == nextBlock()) {
396 cond = JITCompiler::invert(cond);
397 callResultCondition = JITCompiler::Zero;
398 BasicBlock* tmp = taken;
403 JSValueOperand arg1(this, node->child1());
404 JSValueOperand arg2(this, node->child2());
405 GPRReg arg1GPR = arg1.gpr();
406 GPRReg arg2GPR = arg2.gpr();
408 JITCompiler::JumpList slowPath;
410 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
411 GPRResult result(this);
412 GPRReg resultGPR = result.gpr();
418 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
420 branchTest32(callResultCondition, resultGPR, taken);
422 GPRTemporary result(this, Reuse, arg2);
423 GPRReg resultGPR = result.gpr();
428 if (!isKnownInteger(node->child1().node()))
429 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
430 if (!isKnownInteger(node->child2().node()))
431 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
433 branch32(cond, arg1GPR, arg2GPR, taken);
435 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
436 jump(notTaken, ForceJump);
438 slowPath.link(&m_jit);
440 silentSpillAllRegisters(resultGPR);
441 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
442 silentFillAllRegisters(resultGPR);
444 branchTest32(callResultCondition, resultGPR, taken);
450 m_indexInBlock = m_block->size() - 1;
451 m_currentNode = branchNode;
454 template<typename JumpType>
455 class CompareAndBoxBooleanSlowPathGenerator
456 : public CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg> {
458 CompareAndBoxBooleanSlowPathGenerator(
459 JumpType from, SpeculativeJIT* jit,
460 S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
461 : CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg>(
462 from, jit, function, NeedToSpill, result)
469 virtual void generateInternal(SpeculativeJIT* jit)
472 this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
473 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
474 jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
483 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
485 JSValueOperand arg1(this, node->child1());
486 JSValueOperand arg2(this, node->child2());
487 GPRReg arg1GPR = arg1.gpr();
488 GPRReg arg2GPR = arg2.gpr();
490 JITCompiler::JumpList slowPath;
492 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
493 GPRResult result(this);
494 GPRReg resultGPR = result.gpr();
500 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
502 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
503 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
505 GPRTemporary result(this, Reuse, arg2);
506 GPRReg resultGPR = result.gpr();
511 if (!isKnownInteger(node->child1().node()))
512 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
513 if (!isKnownInteger(node->child2().node()))
514 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
516 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
517 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
519 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
520 addSlowPathGenerator(adoptPtr(
521 new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
522 slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR)));
525 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
529 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
531 BasicBlock* taken = branchNode->takenBlock();
532 BasicBlock* notTaken = branchNode->notTakenBlock();
534 // The branch instruction will branch to the taken block.
535 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
536 if (taken == nextBlock()) {
538 BasicBlock* tmp = taken;
543 JSValueOperand arg1(this, node->child1());
544 JSValueOperand arg2(this, node->child2());
545 GPRReg arg1GPR = arg1.gpr();
546 GPRReg arg2GPR = arg2.gpr();
548 GPRTemporary result(this);
549 GPRReg resultGPR = result.gpr();
554 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
555 // see if we get lucky: if the arguments are cells and they reference the same
556 // cell, then they must be strictly equal.
557 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
559 silentSpillAllRegisters(resultGPR);
560 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
561 silentFillAllRegisters(resultGPR);
563 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
565 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
567 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
569 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
570 JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
572 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
573 JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
574 rightOK.link(&m_jit);
576 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
577 jump(notTaken, ForceJump);
579 twoCellsCase.link(&m_jit);
580 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
582 leftDouble.link(&m_jit);
583 rightDouble.link(&m_jit);
585 silentSpillAllRegisters(resultGPR);
586 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
587 silentFillAllRegisters(resultGPR);
589 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
595 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
597 JSValueOperand arg1(this, node->child1());
598 JSValueOperand arg2(this, node->child2());
599 GPRReg arg1GPR = arg1.gpr();
600 GPRReg arg2GPR = arg2.gpr();
602 GPRTemporary result(this);
603 GPRReg resultGPR = result.gpr();
608 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
609 // see if we get lucky: if the arguments are cells and they reference the same
610 // cell, then they must be strictly equal.
611 // FIXME: this should flush registers instead of silent spill/fill.
612 JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
614 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
616 JITCompiler::Jump done = m_jit.jump();
618 notEqualCase.link(&m_jit);
620 silentSpillAllRegisters(resultGPR);
621 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
622 silentFillAllRegisters(resultGPR);
624 m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
625 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
629 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
631 JITCompiler::JumpList slowPathCases;
633 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
635 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
636 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
638 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
639 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
640 rightOK.link(&m_jit);
642 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
643 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
645 JITCompiler::Jump done = m_jit.jump();
647 twoCellsCase.link(&m_jit);
648 slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
650 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
652 addSlowPathGenerator(
654 new CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>(
655 slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
661 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
664 void SpeculativeJIT::emitCall(Node* node)
666 if (node->op() != Call)
667 RELEASE_ASSERT(node->op() == Construct);
669 // For constructors, the this argument is not passed but we have to make space
671 int dummyThisArgument = node->op() == Call ? 0 : 1;
673 CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
675 Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()];
676 JSValueOperand callee(this, calleeEdge);
677 GPRReg calleeGPR = callee.gpr();
680 // The call instruction's first child is the function; the subsequent children are the
682 int numPassedArgs = node->numChildren() - 1;
684 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount));
685 m_jit.store64(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
686 m_jit.store64(calleeGPR, callFrameSlot(JSStack::Callee));
688 for (int i = 0; i < numPassedArgs; i++) {
689 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
690 JSValueOperand arg(this, argEdge);
691 GPRReg argGPR = arg.gpr();
694 m_jit.store64(argGPR, argumentSlot(i + dummyThisArgument));
699 GPRResult result(this);
700 GPRReg resultGPR = result.gpr();
702 JITCompiler::DataLabelPtr targetToCheck;
703 JITCompiler::JumpList slowPath;
705 CallBeginToken token;
706 m_jit.beginCall(node->codeOrigin, token);
708 m_jit.addPtr(TrustedImm32(-(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register))), GPRInfo::callFrameRegister);
710 slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)));
712 m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
713 m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
715 CodeOrigin codeOrigin = m_currentNode->codeOrigin;
716 JITCompiler::Call fastCall = m_jit.nearCall();
717 m_jit.notifyCall(fastCall, codeOrigin, token);
719 JITCompiler::Jump done = m_jit.jump();
721 slowPath.link(&m_jit);
723 m_jit.move(calleeGPR, GPRInfo::nonArgGPR0);
724 m_jit.prepareForExceptionCheck();
725 JITCompiler::Call slowCall = m_jit.nearCall();
726 m_jit.notifyCall(slowCall, codeOrigin, token);
730 m_jit.move(GPRInfo::returnValueGPR, resultGPR);
732 jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
734 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleeGPR, m_currentNode->codeOrigin);
737 template<bool strict>
738 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
740 #if DFG_ENABLE(DEBUG_VERBOSE)
741 dataLogF("SpecInt@%d ", edge->index());
743 AbstractValue& value = m_state.forNode(edge);
744 SpeculatedType type = value.m_type;
745 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
746 m_interpreter.filter(value, SpecInt32);
747 VirtualRegister virtualRegister = edge->virtualRegister();
748 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
750 switch (info.registerFormat()) {
751 case DataFormatNone: {
752 if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) {
753 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
754 returnFormat = DataFormatInt32;
758 GPRReg gpr = allocate();
760 if (edge->hasConstant()) {
761 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
762 ASSERT(isInt32Constant(edge.node()));
763 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
764 info.fillInt32(*m_stream, gpr);
765 returnFormat = DataFormatInt32;
769 DataFormat spillFormat = info.spillFormat();
771 RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
773 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
775 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
776 // If we know this was spilled as an integer we can fill without checking.
778 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
779 info.fillInt32(*m_stream, gpr);
780 returnFormat = DataFormatInt32;
783 if (spillFormat == DataFormatInt32) {
784 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
785 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
787 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
788 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
789 returnFormat = DataFormatJSInt32;
792 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
794 // Fill as JSValue, and fall through.
795 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
800 // Check the value is an integer.
801 GPRReg gpr = info.gpr();
803 if (type & ~SpecInt32)
804 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
805 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
806 // If !strict we're done, return.
808 returnFormat = DataFormatJSInt32;
811 // else fall through & handle as DataFormatJSInt32.
815 case DataFormatJSInt32: {
816 // In a strict fill we need to strip off the value tag.
818 GPRReg gpr = info.gpr();
820 // If the register has already been locked we need to take a copy.
821 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
822 if (m_gprs.isLocked(gpr))
826 info.fillInt32(*m_stream, gpr);
829 m_jit.zeroExtend32ToPtr(gpr, result);
830 returnFormat = DataFormatInt32;
834 GPRReg gpr = info.gpr();
836 returnFormat = DataFormatJSInt32;
840 case DataFormatInt32: {
841 GPRReg gpr = info.gpr();
843 returnFormat = DataFormatInt32;
847 case DataFormatDouble:
848 case DataFormatJSDouble: {
849 if (edge->hasConstant() && isInt32Constant(edge.node())) {
850 GPRReg gpr = allocate();
851 ASSERT(isInt32Constant(edge.node()));
852 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
853 returnFormat = DataFormatInt32;
858 case DataFormatBoolean:
859 case DataFormatJSCell:
860 case DataFormatJSBoolean: {
861 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
862 returnFormat = DataFormatInt32;
866 case DataFormatStorage:
867 RELEASE_ASSERT_NOT_REACHED();
870 RELEASE_ASSERT_NOT_REACHED();
871 return InvalidGPRReg;
875 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
877 return fillSpeculateInt32Internal<false>(edge, returnFormat);
880 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
882 DataFormat mustBeDataFormatInt32;
883 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
884 RELEASE_ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
888 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
890 #if DFG_ENABLE(DEBUG_VERBOSE)
891 dataLogF("SpecDouble@%d ", edge->index());
893 AbstractValue& value = m_state.forNode(edge);
894 SpeculatedType type = value.m_type;
895 ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecNumber));
896 m_interpreter.filter(value, SpecNumber);
897 VirtualRegister virtualRegister = edge->virtualRegister();
898 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
900 if (info.registerFormat() == DataFormatNone) {
901 if (edge->hasConstant()) {
902 GPRReg gpr = allocate();
904 if (isInt32Constant(edge.node())) {
905 FPRReg fpr = fprAllocate();
906 m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(edge.node())))), gpr);
907 m_jit.move64ToDouble(gpr, fpr);
910 // Don't fill double here since that will lead to confusion: the
911 // register allocator will now think that this is a double while
912 // everyone else thinks it's an integer.
915 if (isNumberConstant(edge.node())) {
916 FPRReg fpr = fprAllocate();
917 m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge.node()))), gpr);
918 m_jit.move64ToDouble(gpr, fpr);
921 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
922 info.fillDouble(*m_stream, fpr);
925 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
926 return fprAllocate();
929 DataFormat spillFormat = info.spillFormat();
930 switch (spillFormat) {
931 case DataFormatDouble: {
932 FPRReg fpr = fprAllocate();
933 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
934 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
935 info.fillDouble(*m_stream, fpr);
939 case DataFormatInt32: {
940 GPRReg gpr = allocate();
942 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
943 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
944 info.fillInt32(*m_stream, gpr);
950 GPRReg gpr = allocate();
952 RELEASE_ASSERT(spillFormat & DataFormatJS);
953 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
954 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
955 info.fillJSValue(*m_stream, gpr, spillFormat);
961 switch (info.registerFormat()) {
962 case DataFormatNone: // Should have filled, above.
963 case DataFormatBoolean: // This type never occurs.
964 case DataFormatStorage:
965 RELEASE_ASSERT_NOT_REACHED();
968 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
969 return fprAllocate();
971 case DataFormatJSCell:
973 case DataFormatJSBoolean: {
974 GPRReg jsValueGpr = info.gpr();
975 m_gprs.lock(jsValueGpr);
976 FPRReg fpr = fprAllocate();
977 GPRReg tempGpr = allocate();
979 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
981 if (type & ~SpecNumber)
982 speculationCheck(BadType, JSValueRegs(jsValueGpr), edge, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
984 // First, if we get here we have a double encoded as a JSValue
985 m_jit.move(jsValueGpr, tempGpr);
986 unboxDouble(tempGpr, fpr);
987 JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
989 // Finally, handle integers.
990 isInteger.link(&m_jit);
991 m_jit.convertInt32ToDouble(jsValueGpr, fpr);
992 hasUnboxedDouble.link(&m_jit);
994 m_gprs.release(jsValueGpr);
995 m_gprs.unlock(jsValueGpr);
996 m_gprs.unlock(tempGpr);
997 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
998 info.fillDouble(*m_stream, fpr);
1003 case DataFormatJSInt32:
1004 case DataFormatInt32: {
1005 FPRReg fpr = fprAllocate();
1006 GPRReg gpr = info.gpr();
1008 m_jit.convertInt32ToDouble(gpr, fpr);
1014 case DataFormatJSDouble: {
1015 GPRReg gpr = info.gpr();
1016 FPRReg fpr = fprAllocate();
1017 if (m_gprs.isLocked(gpr)) {
1018 // Make sure we don't trample gpr if it is in use.
1019 GPRReg temp = allocate();
1020 m_jit.move(gpr, temp);
1021 unboxDouble(temp, fpr);
1024 unboxDouble(gpr, fpr);
1026 m_gprs.release(gpr);
1027 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1029 info.fillDouble(*m_stream, fpr);
1033 case DataFormatDouble: {
1034 FPRReg fpr = info.fpr();
1040 RELEASE_ASSERT_NOT_REACHED();
1041 return InvalidFPRReg;
1045 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1047 #if DFG_ENABLE(DEBUG_VERBOSE)
1048 dataLogF("SpecCell@%d ", edge->index());
1050 AbstractValue& value = m_state.forNode(edge);
1051 SpeculatedType type = value.m_type;
1052 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1053 m_interpreter.filter(value, SpecCell);
1054 VirtualRegister virtualRegister = edge->virtualRegister();
1055 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1057 switch (info.registerFormat()) {
1058 case DataFormatNone: {
1059 if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
1060 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1064 GPRReg gpr = allocate();
1066 if (edge->hasConstant()) {
1067 JSValue jsValue = valueOfJSConstant(edge.node());
1068 if (jsValue.isCell()) {
1069 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1070 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1071 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1074 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1077 RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
1078 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1079 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1081 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1082 if (type & ~SpecCell)
1083 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
1084 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1088 case DataFormatCell:
1089 case DataFormatJSCell: {
1090 GPRReg gpr = info.gpr();
1092 #if DFG_ENABLE(JIT_ASSERT)
1093 MacroAssembler::Jump checkCell = m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
1095 checkCell.link(&m_jit);
1100 case DataFormatJS: {
1101 GPRReg gpr = info.gpr();
1103 if (type & ~SpecCell)
1104 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
1105 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1109 case DataFormatJSInt32:
1110 case DataFormatInt32:
1111 case DataFormatJSDouble:
1112 case DataFormatDouble:
1113 case DataFormatJSBoolean:
1114 case DataFormatBoolean: {
1115 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1119 case DataFormatStorage:
1120 RELEASE_ASSERT_NOT_REACHED();
1123 RELEASE_ASSERT_NOT_REACHED();
1124 return InvalidGPRReg;
1128 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1130 #if DFG_ENABLE(DEBUG_VERBOSE)
1131 dataLogF("SpecBool@%d ", edge->index());
1133 AbstractValue& value = m_state.forNode(edge);
1134 SpeculatedType type = value.m_type;
1135 m_interpreter.filter(value, SpecBoolean);
1136 VirtualRegister virtualRegister = edge->virtualRegister();
1137 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1139 switch (info.registerFormat()) {
1140 case DataFormatNone: {
1141 if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
1142 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1146 GPRReg gpr = allocate();
1148 if (edge->hasConstant()) {
1149 JSValue jsValue = valueOfJSConstant(edge.node());
1150 if (jsValue.isBoolean()) {
1151 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1152 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1153 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1156 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1159 RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
1160 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1161 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1163 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1164 if (type & ~SpecBoolean) {
1165 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1166 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1167 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1169 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1173 case DataFormatBoolean:
1174 case DataFormatJSBoolean: {
1175 GPRReg gpr = info.gpr();
1180 case DataFormatJS: {
1181 GPRReg gpr = info.gpr();
1183 if (type & ~SpecBoolean) {
1184 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1185 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1186 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1188 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1192 case DataFormatJSInt32:
1193 case DataFormatInt32:
1194 case DataFormatJSDouble:
1195 case DataFormatDouble:
1196 case DataFormatJSCell:
1197 case DataFormatCell: {
1198 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1202 case DataFormatStorage:
1203 RELEASE_ASSERT_NOT_REACHED();
1206 RELEASE_ASSERT_NOT_REACHED();
1207 return InvalidGPRReg;
1211 JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
1213 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
1215 JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
1217 m_jit.move(value, tmp);
1218 unboxDouble(tmp, result);
1220 JITCompiler::Jump done = m_jit.jump();
1222 isInteger.link(&m_jit);
1224 m_jit.convertInt32ToDouble(value, result);
1231 void SpeculativeJIT::compileObjectEquality(Node* node)
1233 SpeculateCellOperand op1(this, node->child1());
1234 SpeculateCellOperand op2(this, node->child2());
1235 GPRTemporary result(this, Reuse, op1);
1237 GPRReg op1GPR = op1.gpr();
1238 GPRReg op2GPR = op2.gpr();
1239 GPRReg resultGPR = result.gpr();
1241 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1242 speculationWatchpointForMasqueradesAsUndefined();
1244 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
1245 MacroAssembler::Equal,
1246 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1247 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1249 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
1250 MacroAssembler::Equal,
1251 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1252 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1254 GPRTemporary structure(this);
1255 GPRReg structureGPR = structure.gpr();
1257 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1259 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
1260 MacroAssembler::Equal,
1262 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1263 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1265 MacroAssembler::NonZero,
1266 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1267 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1269 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1271 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
1272 MacroAssembler::Equal,
1274 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1275 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1277 MacroAssembler::NonZero,
1278 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1279 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1282 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1283 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1284 MacroAssembler::Jump done = m_jit.jump();
1285 falseCase.link(&m_jit);
1286 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1289 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1292 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1294 SpeculateCellOperand op1(this, leftChild);
1295 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1296 GPRTemporary result(this);
1298 GPRReg op1GPR = op1.gpr();
1299 GPRReg op2GPR = op2.gpr();
1300 GPRReg resultGPR = result.gpr();
1301 GPRTemporary structure;
1302 GPRReg structureGPR = InvalidGPRReg;
1304 bool masqueradesAsUndefinedWatchpointValid =
1305 masqueradesAsUndefinedWatchpointIsStillValid();
1307 if (!masqueradesAsUndefinedWatchpointValid) {
1308 // The masquerades as undefined case will use the structure register, so allocate it here.
1309 // Do this at the top of the function to avoid branching around a register allocation.
1310 GPRTemporary realStructure(this);
1311 structure.adopt(realStructure);
1312 structureGPR = structure.gpr();
1315 if (masqueradesAsUndefinedWatchpointValid) {
1316 speculationWatchpointForMasqueradesAsUndefined();
1318 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1319 MacroAssembler::Equal,
1320 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1321 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1323 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1325 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1326 MacroAssembler::Equal,
1328 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1329 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1331 MacroAssembler::NonZero,
1332 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1333 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1336 // It seems that most of the time when programs do a == b where b may be either null/undefined
1337 // or an object, b is usually an object. Balance the branches to make that case fast.
1338 MacroAssembler::Jump rightNotCell =
1339 m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
1341 // We know that within this branch, rightChild must be a cell.
1342 if (masqueradesAsUndefinedWatchpointValid) {
1343 speculationWatchpointForMasqueradesAsUndefined();
1345 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1346 MacroAssembler::Equal,
1347 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1348 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1350 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1352 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1353 MacroAssembler::Equal,
1355 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1356 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1358 MacroAssembler::NonZero,
1359 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1360 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1363 // At this point we know that we can perform a straight-forward equality comparison on pointer
1364 // values because both left and right are pointers to objects that have no special equality
1366 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1367 MacroAssembler::Jump trueCase = m_jit.jump();
1369 rightNotCell.link(&m_jit);
1371 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1372 // prove that it is either null or undefined.
1373 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1374 m_jit.move(op2GPR, resultGPR);
1375 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1378 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther,
1380 MacroAssembler::NotEqual, resultGPR,
1381 MacroAssembler::TrustedImm64(ValueNull)));
1384 falseCase.link(&m_jit);
1385 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1386 MacroAssembler::Jump done = m_jit.jump();
1387 trueCase.link(&m_jit);
1388 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1391 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1394 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1396 BasicBlock* taken = branchNode->takenBlock();
1397 BasicBlock* notTaken = branchNode->notTakenBlock();
1399 SpeculateCellOperand op1(this, leftChild);
1400 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1401 GPRTemporary result(this);
1403 GPRReg op1GPR = op1.gpr();
1404 GPRReg op2GPR = op2.gpr();
1405 GPRReg resultGPR = result.gpr();
1406 GPRTemporary structure;
1407 GPRReg structureGPR = InvalidGPRReg;
1409 bool masqueradesAsUndefinedWatchpointValid =
1410 masqueradesAsUndefinedWatchpointIsStillValid();
1412 if (!masqueradesAsUndefinedWatchpointValid) {
1413 // The masquerades as undefined case will use the structure register, so allocate it here.
1414 // Do this at the top of the function to avoid branching around a register allocation.
1415 GPRTemporary realStructure(this);
1416 structure.adopt(realStructure);
1417 structureGPR = structure.gpr();
1420 if (masqueradesAsUndefinedWatchpointValid) {
1421 speculationWatchpointForMasqueradesAsUndefined();
1423 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1424 MacroAssembler::Equal,
1425 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1426 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1428 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1430 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1431 MacroAssembler::Equal,
1433 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1434 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1436 MacroAssembler::NonZero,
1437 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1438 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1441 // It seems that most of the time when programs do a == b where b may be either null/undefined
1442 // or an object, b is usually an object. Balance the branches to make that case fast.
1443 MacroAssembler::Jump rightNotCell =
1444 m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
1446 // We know that within this branch, rightChild must be a cell.
1447 if (masqueradesAsUndefinedWatchpointValid) {
1448 speculationWatchpointForMasqueradesAsUndefined();
1450 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1451 MacroAssembler::Equal,
1452 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1453 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1455 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1457 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1458 MacroAssembler::Equal,
1460 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1461 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1463 MacroAssembler::NonZero,
1464 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1465 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1468 // At this point we know that we can perform a straight-forward equality comparison on pointer
1469 // values because both left and right are pointers to objects that have no special equality
1471 branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1473 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1474 // prove that it is either null or undefined.
1475 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1476 rightNotCell.link(&m_jit);
1478 jump(notTaken, ForceJump);
1480 rightNotCell.link(&m_jit);
1481 m_jit.move(op2GPR, resultGPR);
1482 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1485 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther, m_jit.branch64(
1486 MacroAssembler::NotEqual, resultGPR,
1487 MacroAssembler::TrustedImm64(ValueNull)));
1493 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1495 SpeculateInt32Operand op1(this, node->child1());
1496 SpeculateInt32Operand op2(this, node->child2());
1497 GPRTemporary result(this, Reuse, op1, op2);
1499 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
1501 // If we add a DataFormatBool, we should use it here.
1502 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1503 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1506 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1508 SpeculateDoubleOperand op1(this, node->child1());
1509 SpeculateDoubleOperand op2(this, node->child2());
1510 GPRTemporary result(this);
1512 m_jit.move(TrustedImm32(ValueTrue), result.gpr());
1513 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1514 m_jit.xor64(TrustedImm32(true), result.gpr());
1515 trueCase.link(&m_jit);
1517 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1520 void SpeculativeJIT::compileValueAdd(Node* node)
1522 JSValueOperand op1(this, node->child1());
1523 JSValueOperand op2(this, node->child2());
1525 GPRReg op1GPR = op1.gpr();
1526 GPRReg op2GPR = op2.gpr();
1530 GPRResult result(this);
1531 if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
1532 callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR);
1534 callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR);
1536 jsValueResult(result.gpr(), node);
1539 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1541 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1542 GPRTemporary result(this);
1543 GPRReg valueGPR = value.gpr();
1544 GPRReg resultGPR = result.gpr();
1545 GPRTemporary structure;
1546 GPRReg structureGPR = InvalidGPRReg;
1548 bool masqueradesAsUndefinedWatchpointValid =
1549 masqueradesAsUndefinedWatchpointIsStillValid();
1551 if (!masqueradesAsUndefinedWatchpointValid) {
1552 // The masquerades as undefined case will use the structure register, so allocate it here.
1553 // Do this at the top of the function to avoid branching around a register allocation.
1554 GPRTemporary realStructure(this);
1555 structure.adopt(realStructure);
1556 structureGPR = structure.gpr();
1559 MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
1560 if (masqueradesAsUndefinedWatchpointValid) {
1561 speculationWatchpointForMasqueradesAsUndefined();
1563 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1564 MacroAssembler::Equal,
1565 MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
1566 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1568 m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), structureGPR);
1571 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1572 MacroAssembler::Equal,
1574 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1576 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1578 MacroAssembler::Zero,
1579 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1580 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1582 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1584 MacroAssembler::Equal,
1585 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1586 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
1588 isNotMasqueradesAsUndefined.link(&m_jit);
1590 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1591 MacroAssembler::Jump done = m_jit.jump();
1593 notCell.link(&m_jit);
1595 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1596 m_jit.move(valueGPR, resultGPR);
1597 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1599 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1600 MacroAssembler::NotEqual,
1602 MacroAssembler::TrustedImm64(ValueNull)));
1604 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1608 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1611 void SpeculativeJIT::compileLogicalNot(Node* node)
1613 switch (node->child1().useKind()) {
1614 case ObjectOrOtherUse: {
1615 compileObjectOrOtherLogicalNot(node->child1());
1620 SpeculateInt32Operand value(this, node->child1());
1621 GPRTemporary result(this, Reuse, value);
1622 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
1623 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1624 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1629 SpeculateDoubleOperand value(this, node->child1());
1630 FPRTemporary scratch(this);
1631 GPRTemporary result(this);
1632 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
1633 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1634 m_jit.xor32(TrustedImm32(true), result.gpr());
1635 nonZero.link(&m_jit);
1636 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1641 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1642 SpeculateBooleanOperand value(this, node->child1());
1643 GPRTemporary result(this, Reuse, value);
1645 m_jit.move(value.gpr(), result.gpr());
1646 m_jit.xor64(TrustedImm32(true), result.gpr());
1648 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1652 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1653 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1655 m_jit.move(value.gpr(), result.gpr());
1656 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
1658 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
1659 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1660 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
1662 // If we add a DataFormatBool, we should use it here.
1663 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1668 JSValueOperand arg1(this, node->child1());
1669 GPRTemporary result(this);
1671 GPRReg arg1GPR = arg1.gpr();
1672 GPRReg resultGPR = result.gpr();
1676 m_jit.move(arg1GPR, resultGPR);
1677 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
1678 JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
1680 addSlowPathGenerator(
1681 slowPathCall(slowCase, this, dfgConvertJSValueToBoolean, resultGPR, arg1GPR));
1683 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
1684 jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
1689 RELEASE_ASSERT_NOT_REACHED();
1694 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
1696 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1697 GPRTemporary scratch(this);
1698 GPRReg valueGPR = value.gpr();
1699 GPRReg scratchGPR = scratch.gpr();
1701 MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
1702 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1703 speculationWatchpointForMasqueradesAsUndefined();
1706 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1707 MacroAssembler::Equal,
1708 MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
1709 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1711 m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), scratchGPR);
1714 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1715 MacroAssembler::Equal,
1717 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1719 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
1721 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1723 MacroAssembler::Equal,
1724 MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
1725 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
1727 isNotMasqueradesAsUndefined.link(&m_jit);
1729 jump(taken, ForceJump);
1731 notCell.link(&m_jit);
1733 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1734 m_jit.move(valueGPR, scratchGPR);
1735 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
1737 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1738 MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
1742 noResult(m_currentNode);
1745 void SpeculativeJIT::emitBranch(Node* node)
1747 BasicBlock* taken = node->takenBlock();
1748 BasicBlock* notTaken = node->notTakenBlock();
1750 switch (node->child1().useKind()) {
1751 case ObjectOrOtherUse: {
1752 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
1758 if (node->child1().useKind() == Int32Use) {
1759 bool invert = false;
1761 if (taken == nextBlock()) {
1763 BasicBlock* tmp = taken;
1768 SpeculateInt32Operand value(this, node->child1());
1769 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
1771 SpeculateDoubleOperand value(this, node->child1());
1772 FPRTemporary scratch(this);
1773 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
1784 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1785 GPRReg valueGPR = value.gpr();
1787 if (node->child1().useKind() == BooleanUse) {
1788 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1789 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
1791 if (taken == nextBlock()) {
1792 condition = MacroAssembler::Zero;
1793 BasicBlock* tmp = taken;
1798 branchTest32(condition, valueGPR, TrustedImm32(true), taken);
1801 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
1802 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
1804 typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump());
1808 GPRTemporary result(this);
1809 GPRReg resultGPR = result.gpr();
1811 if (node->child1()->prediction() & SpecInt32) {
1812 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
1813 branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
1816 if (node->child1()->prediction() & SpecBoolean) {
1817 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
1818 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
1823 silentSpillAllRegisters(resultGPR);
1824 callOperation(dfgConvertJSValueToBoolean, resultGPR, valueGPR);
1825 silentFillAllRegisters(resultGPR);
1827 branchTest32(MacroAssembler::NonZero, resultGPR, taken);
1831 noResult(node, UseChildrenCalledExplicitly);
1836 RELEASE_ASSERT_NOT_REACHED();
1840 void SpeculativeJIT::compile(Node* node)
1842 NodeType op = node->op();
1844 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1845 m_jit.clearRegisterAllocationOffsets();
1850 initConstantInfo(node);
1853 case PhantomArguments:
1854 initConstantInfo(node);
1857 case WeakJSConstant:
1858 m_jit.addWeakReference(node->weakConstant());
1859 initConstantInfo(node);
1863 // CSE should always eliminate this.
1864 RELEASE_ASSERT_NOT_REACHED();
1869 SpeculatedType prediction = node->variableAccessData()->prediction();
1870 AbstractValue& value = m_state.variables().operand(node->local());
1872 // If we have no prediction for this local, then don't attempt to compile.
1873 if (prediction == SpecNone) {
1874 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
1878 // If the CFA is tracking this variable and it found that the variable
1879 // cannot have been assigned, then don't attempt to proceed.
1880 if (value.isClear()) {
1881 // FIXME: We should trap instead.
1882 // https://bugs.webkit.org/show_bug.cgi?id=110383
1883 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
1887 switch (node->variableAccessData()->flushFormat()) {
1888 case FlushedDouble: {
1889 FPRTemporary result(this);
1890 m_jit.loadDouble(JITCompiler::addressFor(node->local()), result.fpr());
1891 VirtualRegister virtualRegister = node->virtualRegister();
1892 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
1893 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
1897 case FlushedInt32: {
1898 GPRTemporary result(this);
1899 m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr());
1901 // Like int32Result, but don't useChildren - our children are phi nodes,
1902 // and don't represent values within this dataflow with virtual registers.
1903 VirtualRegister virtualRegister = node->virtualRegister();
1904 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
1905 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
1910 GPRTemporary result(this);
1911 m_jit.load64(JITCompiler::addressFor(node->local()), result.gpr());
1913 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1914 // and don't represent values within this dataflow with virtual registers.
1915 VirtualRegister virtualRegister = node->virtualRegister();
1916 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
1919 if (isCellSpeculation(value.m_type))
1920 format = DataFormatJSCell;
1921 else if (isBooleanSpeculation(value.m_type))
1922 format = DataFormatJSBoolean;
1924 format = DataFormatJS;
1926 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format);
1932 case GetLocalUnlinked: {
1933 GPRTemporary result(this);
1935 m_jit.load64(JITCompiler::addressFor(node->unlinkedLocal()), result.gpr());
1937 jsValueResult(result.gpr(), node);
1941 case MovHintAndCheck: {
1942 compileMovHintAndCheck(node);
1947 compileInlineStart(node);
1953 RELEASE_ASSERT_NOT_REACHED();
1958 // SetLocal doubles as a hint as to where a node will be stored and
1959 // as a speculation point. So before we speculate make sure that we
1960 // know where the child of this node needs to go in the virtual
1962 compileMovHint(node);
1964 switch (node->variableAccessData()->flushFormat()) {
1965 case FlushedDouble: {
1966 SpeculateDoubleOperand value(this, node->child1());
1967 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->local()));
1969 // Indicate that it's no longer necessary to retrieve the value of
1970 // this bytecode variable from registers or other locations in the stack,
1971 // but that it is stored as a double.
1972 recordSetLocal(node->local(), ValueSource(DoubleInJSStack));
1976 case FlushedInt32: {
1977 SpeculateInt32Operand value(this, node->child1());
1978 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->local()));
1980 recordSetLocal(node->local(), ValueSource(Int32InJSStack));
1985 SpeculateCellOperand cell(this, node->child1());
1986 GPRReg cellGPR = cell.gpr();
1987 m_jit.store64(cellGPR, JITCompiler::addressFor(node->local()));
1989 recordSetLocal(node->local(), ValueSource(CellInJSStack));
1993 case FlushedBoolean: {
1994 SpeculateBooleanOperand boolean(this, node->child1());
1995 m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->local()));
1997 recordSetLocal(node->local(), ValueSource(BooleanInJSStack));
2001 case FlushedJSValue: {
2002 JSValueOperand value(this, node->child1());
2003 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->local()));
2006 recordSetLocal(node->local(), ValueSource(ValueInJSStack));
2008 // If we're storing an arguments object that has been optimized away,
2009 // our variable event stream for OSR exit now reflects the optimized
2010 // value (JSValue()). On the slow path, we want an arguments object
2011 // instead. We add an additional move hint to show OSR exit that it
2012 // needs to reconstruct the arguments object.
2013 if (node->child1()->op() == PhantomArguments)
2014 compileMovHint(node);
2019 RELEASE_ASSERT_NOT_REACHED();
2027 // This is a no-op; it just marks the fact that the argument is being used.
2028 // But it may be profitable to use this as a hook to run speculation checks
2029 // on arguments, thereby allowing us to trivially eliminate such checks if
2030 // the argument is not used.
2036 if (isInt32Constant(node->child1().node())) {
2037 SpeculateInt32Operand op2(this, node->child2());
2038 GPRTemporary result(this, Reuse, op2);
2040 bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr());
2042 int32Result(result.gpr(), node);
2043 } else if (isInt32Constant(node->child2().node())) {
2044 SpeculateInt32Operand op1(this, node->child1());
2045 GPRTemporary result(this, Reuse, op1);
2047 bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr());
2049 int32Result(result.gpr(), node);
2051 SpeculateInt32Operand op1(this, node->child1());
2052 SpeculateInt32Operand op2(this, node->child2());
2053 GPRTemporary result(this, Reuse, op1, op2);
2055 GPRReg reg1 = op1.gpr();
2056 GPRReg reg2 = op2.gpr();
2057 bitOp(op, reg1, reg2, result.gpr());
2059 int32Result(result.gpr(), node);
2066 if (isInt32Constant(node->child2().node())) {
2067 SpeculateInt32Operand op1(this, node->child1());
2068 GPRTemporary result(this, Reuse, op1);
2070 shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr());
2072 int32Result(result.gpr(), node);
2074 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2075 SpeculateInt32Operand op1(this, node->child1());
2076 SpeculateInt32Operand op2(this, node->child2());
2077 GPRTemporary result(this, Reuse, op1);
2079 GPRReg reg1 = op1.gpr();
2080 GPRReg reg2 = op2.gpr();
2081 shiftOp(op, reg1, reg2, result.gpr());
2083 int32Result(result.gpr(), node);
2087 case UInt32ToNumber: {
2088 compileUInt32ToNumber(node);
2092 case DoubleAsInt32: {
2093 compileDoubleAsInt32(node);
2097 case ValueToInt32: {
2098 compileValueToInt32(node);
2102 case Int32ToDouble: {
2103 compileInt32ToDouble(node);
2113 compileMakeRope(node);
2117 compileArithSub(node);
2121 compileArithNegate(node);
2125 compileArithMul(node);
2129 compileArithIMul(node);
2133 compileArithDiv(node);
2138 compileArithMod(node);
2143 switch (node->child1().useKind()) {
2145 SpeculateStrictInt32Operand op1(this, node->child1());
2146 GPRTemporary result(this);
2147 GPRTemporary scratch(this);
2149 m_jit.move(op1.gpr(), result.gpr());
2150 m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
2151 m_jit.add32(scratch.gpr(), result.gpr());
2152 m_jit.xor32(scratch.gpr(), result.gpr());
2153 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2154 int32Result(result.gpr(), node);
2159 SpeculateDoubleOperand op1(this, node->child1());
2160 FPRTemporary result(this);
2162 m_jit.absDouble(op1.fpr(), result.fpr());
2163 doubleResult(result.fpr(), node);
2168 RELEASE_ASSERT_NOT_REACHED();
2176 switch (node->binaryUseKind()) {
2178 SpeculateStrictInt32Operand op1(this, node->child1());
2179 SpeculateStrictInt32Operand op2(this, node->child2());
2180 GPRTemporary result(this, Reuse, op1);
2182 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
2183 m_jit.move(op2.gpr(), result.gpr());
2184 if (op1.gpr() != result.gpr()) {
2185 MacroAssembler::Jump done = m_jit.jump();
2186 op1Less.link(&m_jit);
2187 m_jit.move(op1.gpr(), result.gpr());
2190 op1Less.link(&m_jit);
2192 int32Result(result.gpr(), node);
2197 SpeculateDoubleOperand op1(this, node->child1());
2198 SpeculateDoubleOperand op2(this, node->child2());
2199 FPRTemporary result(this, op1);
2201 FPRReg op1FPR = op1.fpr();
2202 FPRReg op2FPR = op2.fpr();
2203 FPRReg resultFPR = result.fpr();
2205 MacroAssembler::JumpList done;
2207 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2209 // op2 is eather the lesser one or one of then is NaN
2210 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2212 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2213 // op1 + op2 and putting it into result.
2214 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2215 done.append(m_jit.jump());
2217 op2Less.link(&m_jit);
2218 m_jit.moveDouble(op2FPR, resultFPR);
2220 if (op1FPR != resultFPR) {
2221 done.append(m_jit.jump());
2223 op1Less.link(&m_jit);
2224 m_jit.moveDouble(op1FPR, resultFPR);
2226 op1Less.link(&m_jit);
2230 doubleResult(resultFPR, node);
2235 RELEASE_ASSERT_NOT_REACHED();
2242 SpeculateDoubleOperand op1(this, node->child1());
2243 FPRTemporary result(this, op1);
2245 m_jit.sqrtDouble(op1.fpr(), result.fpr());
2247 doubleResult(result.fpr(), node);
2252 compileLogicalNot(node);
2256 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2261 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2265 case CompareGreater:
2266 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2270 case CompareGreaterEq:
2271 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2275 case CompareEqConstant:
2276 ASSERT(isNullConstant(node->child2().node()));
2277 if (nonSpeculativeCompareNull(node, node->child1()))
2282 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2286 case CompareStrictEqConstant:
2287 if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node())))
2291 case CompareStrictEq:
2292 if (compileStrictEq(node))
2296 case StringCharCodeAt: {
2297 compileGetCharCodeAt(node);
2301 case StringCharAt: {
2302 // Relies on StringCharAt node having same basic layout as GetByVal
2303 compileGetByValOnString(node);
2307 case StringFromCharCode: {
2308 compileFromCharCode(node);
2318 case ArrayifyToStructure: {
2324 switch (node->arrayMode().type()) {
2325 case Array::SelectUsingPredictions:
2326 case Array::ForceExit:
2327 RELEASE_ASSERT_NOT_REACHED();
2328 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2330 case Array::Generic: {
2331 JSValueOperand base(this, node->child1());
2332 JSValueOperand property(this, node->child2());
2333 GPRReg baseGPR = base.gpr();
2334 GPRReg propertyGPR = property.gpr();
2337 GPRResult result(this);
2338 callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
2340 jsValueResult(result.gpr(), node);
2344 case Array::Contiguous: {
2345 if (node->arrayMode().isInBounds()) {
2346 SpeculateStrictInt32Operand property(this, node->child2());
2347 StorageOperand storage(this, node->child3());
2349 GPRReg propertyReg = property.gpr();
2350 GPRReg storageReg = storage.gpr();
2355 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2357 GPRTemporary result(this);
2358 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
2359 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2360 jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
2364 SpeculateCellOperand base(this, node->child1());
2365 SpeculateStrictInt32Operand property(this, node->child2());
2366 StorageOperand storage(this, node->child3());
2368 GPRReg baseReg = base.gpr();
2369 GPRReg propertyReg = property.gpr();
2370 GPRReg storageReg = storage.gpr();
2375 GPRTemporary result(this);
2376 GPRReg resultReg = result.gpr();
2378 MacroAssembler::JumpList slowCases;
2380 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2382 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2383 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2385 addSlowPathGenerator(
2387 slowCases, this, operationGetByValArrayInt,
2388 result.gpr(), baseReg, propertyReg));
2390 jsValueResult(resultReg, node);
2394 case Array::Double: {
2395 if (node->arrayMode().isInBounds()) {
2396 if (node->arrayMode().isSaneChain()) {
2397 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2398 ASSERT(globalObject->arrayPrototypeChainIsSane());
2400 speculationWatchpoint(),
2401 globalObject->arrayPrototype()->structure()->transitionWatchpointSet());
2403 speculationWatchpoint(),
2404 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2407 SpeculateStrictInt32Operand property(this, node->child2());
2408 StorageOperand storage(this, node->child3());
2410 GPRReg propertyReg = property.gpr();
2411 GPRReg storageReg = storage.gpr();
2416 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2418 FPRTemporary result(this);
2419 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2420 if (!node->arrayMode().isSaneChain())
2421 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2422 doubleResult(result.fpr(), node);
2426 SpeculateCellOperand base(this, node->child1());
2427 SpeculateStrictInt32Operand property(this, node->child2());
2428 StorageOperand storage(this, node->child3());
2430 GPRReg baseReg = base.gpr();
2431 GPRReg propertyReg = property.gpr();
2432 GPRReg storageReg = storage.gpr();
2437 GPRTemporary result(this);
2438 FPRTemporary temp(this);
2439 GPRReg resultReg = result.gpr();
2440 FPRReg tempReg = temp.fpr();
2442 MacroAssembler::JumpList slowCases;
2444 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2446 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2447 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2448 boxDouble(tempReg, resultReg);
2450 addSlowPathGenerator(
2452 slowCases, this, operationGetByValArrayInt,
2453 result.gpr(), baseReg, propertyReg));
2455 jsValueResult(resultReg, node);
2459 case Array::ArrayStorage:
2460 case Array::SlowPutArrayStorage: {
2461 if (node->arrayMode().isInBounds()) {
2462 SpeculateStrictInt32Operand property(this, node->child2());
2463 StorageOperand storage(this, node->child3());
2465 GPRReg propertyReg = property.gpr();
2466 GPRReg storageReg = storage.gpr();
2471 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2473 GPRTemporary result(this);
2474 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
2475 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2477 jsValueResult(result.gpr(), node);
2481 SpeculateCellOperand base(this, node->child1());
2482 SpeculateStrictInt32Operand property(this, node->child2());
2483 StorageOperand storage(this, node->child3());
2485 GPRReg baseReg = base.gpr();
2486 GPRReg propertyReg = property.gpr();
2487 GPRReg storageReg = storage.gpr();
2492 GPRTemporary result(this);
2493 GPRReg resultReg = result.gpr();
2495 MacroAssembler::JumpList slowCases;
2497 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2499 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
2500 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2502 addSlowPathGenerator(
2504 slowCases, this, operationGetByValArrayInt,
2505 result.gpr(), baseReg, propertyReg));
2507 jsValueResult(resultReg, node);
2511 compileGetByValOnString(node);
2513 case Array::Arguments:
2514 compileGetByValOnArguments(node);
2517 TypedArrayType type = node->arrayMode().typedArrayType();
2519 compileGetByValOnIntTypedArray(node, type);
2521 compileGetByValOnFloatTypedArray(node, type);
2527 case PutByValAlias: {
2528 Edge child1 = m_jit.graph().varArgChild(node, 0);
2529 Edge child2 = m_jit.graph().varArgChild(node, 1);
2530 Edge child3 = m_jit.graph().varArgChild(node, 2);
2531 Edge child4 = m_jit.graph().varArgChild(node, 3);
2533 ArrayMode arrayMode = node->arrayMode().modeForPut();
2534 bool alreadyHandled = false;
2536 switch (arrayMode.type()) {
2537 case Array::SelectUsingPredictions:
2538 case Array::ForceExit:
2539 RELEASE_ASSERT_NOT_REACHED();
2540 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2541 alreadyHandled = true;
2543 case Array::Generic: {
2544 RELEASE_ASSERT(node->op() == PutByVal);
2546 JSValueOperand arg1(this, child1);
2547 JSValueOperand arg2(this, child2);
2548 JSValueOperand arg3(this, child3);
2549 GPRReg arg1GPR = arg1.gpr();
2550 GPRReg arg2GPR = arg2.gpr();
2551 GPRReg arg3GPR = arg3.gpr();
2554 callOperation(m_jit.strictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
2557 alreadyHandled = true;
2567 // FIXME: the base may not be necessary for some array access modes. But we have to
2568 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2569 // no harm in locking it here.
2570 SpeculateCellOperand base(this, child1);
2571 SpeculateStrictInt32Operand property(this, child2);
2573 GPRReg baseReg = base.gpr();
2574 GPRReg propertyReg = property.gpr();
2576 switch (arrayMode.type()) {
2578 case Array::Contiguous: {
2579 JSValueOperand value(this, child3, ManualOperandSpeculation);
2581 GPRReg valueReg = value.gpr();
2586 if (arrayMode.type() == Array::Int32) {
2588 JSValueRegs(valueReg), child3, SpecInt32,
2590 MacroAssembler::Below, valueReg, GPRInfo::tagTypeNumberRegister));
2593 if (arrayMode.type() == Array::Contiguous && Heap::isWriteBarrierEnabled()) {
2594 GPRTemporary scratch(this);
2595 writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr());
2598 StorageOperand storage(this, child4);
2599 GPRReg storageReg = storage.gpr();
2601 if (node->op() == PutByValAlias) {
2602 // Store the value to the array.
2603 GPRReg propertyReg = property.gpr();
2604 GPRReg valueReg = value.gpr();
2605 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2611 GPRTemporary temporary;
2612 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2614 MacroAssembler::Jump slowCase;
2616 if (arrayMode.isInBounds()) {
2618 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
2619 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2621 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2623 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2625 if (!arrayMode.isOutOfBounds())
2626 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2628 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2629 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2631 inBounds.link(&m_jit);
2634 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2641 if (arrayMode.isOutOfBounds()) {
2642 addSlowPathGenerator(
2645 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2646 NoResult, baseReg, propertyReg, valueReg));
2649 noResult(node, UseChildrenCalledExplicitly);
2653 case Array::Double: {
2654 compileDoublePutByVal(node, base, property);
2658 case Array::ArrayStorage:
2659 case Array::SlowPutArrayStorage: {
2660 JSValueOperand value(this, child3);
2662 GPRReg valueReg = value.gpr();
2667 if (Heap::isWriteBarrierEnabled()) {
2668 GPRTemporary scratch(this);
2669 writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr());
2672 StorageOperand storage(this, child4);
2673 GPRReg storageReg = storage.gpr();
2675 if (node->op() == PutByValAlias) {
2676 // Store the value to the array.
2677 GPRReg propertyReg = property.gpr();
2678 GPRReg valueReg = value.gpr();
2679 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2685 GPRTemporary temporary;
2686 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2688 MacroAssembler::JumpList slowCases;
2690 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2691 if (!arrayMode.isOutOfBounds())
2692 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
2694 slowCases.append(beyondArrayBounds);
2696 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2697 if (arrayMode.isInBounds()) {
2699 StoreToHole, JSValueRegs(), 0,
2700 m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
2702 MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2703 if (arrayMode.isSlowPut()) {
2704 // This is sort of strange. If we wanted to optimize this code path, we would invert
2705 // the above branch. But it's simply not worth it since this only happens if we're
2706 // already having a bad time.
2707 slowCases.append(m_jit.jump());
2709 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
2711 // If we're writing to a hole we might be growing the array;
2712 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2713 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2714 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2716 lengthDoesNotNeedUpdate.link(&m_jit);
2718 notHoleValue.link(&m_jit);
2721 // Store the value to the array.
2722 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2729 if (!slowCases.empty()) {
2730 addSlowPathGenerator(
2733 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2734 NoResult, baseReg, propertyReg, valueReg));
2737 noResult(node, UseChildrenCalledExplicitly);
2741 case Array::Arguments: {
2742 JSValueOperand value(this, child3);
2743 GPRTemporary scratch(this);
2744 GPRTemporary scratch2(this);
2746 GPRReg valueReg = value.gpr();
2747 GPRReg scratchReg = scratch.gpr();
2748 GPRReg scratch2Reg = scratch2.gpr();
2753 // Two really lame checks.
2755 Uncountable, JSValueSource(), 0,
2757 MacroAssembler::AboveOrEqual, propertyReg,
2758 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments))));
2760 Uncountable, JSValueSource(), 0,
2761 m_jit.branchTestPtr(
2762 MacroAssembler::NonZero,
2763 MacroAssembler::Address(
2764 baseReg, OBJECT_OFFSETOF(Arguments, m_slowArguments))));
2766 m_jit.move(propertyReg, scratch2Reg);
2767 m_jit.signExtend32ToPtr(scratch2Reg, scratch2Reg);
2769 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
2774 MacroAssembler::BaseIndex(
2775 scratchReg, scratch2Reg, MacroAssembler::TimesEight,
2776 CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)));
2783 TypedArrayType type = arrayMode.typedArrayType();
2785 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
2787 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
2794 if (compileRegExpExec(node))
2796 if (!node->adjustedRefCount()) {
2797 SpeculateCellOperand base(this, node->child1());
2798 SpeculateCellOperand argument(this, node->child2());
2799 GPRReg baseGPR = base.gpr();
2800 GPRReg argumentGPR = argument.gpr();
2803 GPRResult result(this);
2804 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
2806 // Must use jsValueResult because otherwise we screw up register
2807 // allocation, which thinks that this node has a result.
2808 jsValueResult(result.gpr(), node);
2812 SpeculateCellOperand base(this, node->child1());
2813 SpeculateCellOperand argument(this, node->child2());
2814 GPRReg baseGPR = base.gpr();
2815 GPRReg argumentGPR = argument.gpr();
2818 GPRResult result(this);
2819 callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR);
2821 jsValueResult(result.gpr(), node);
2826 SpeculateCellOperand base(this, node->child1());
2827 SpeculateCellOperand argument(this, node->child2());
2828 GPRReg baseGPR = base.gpr();
2829 GPRReg argumentGPR = argument.gpr();
2832 GPRResult result(this);
2833 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
2835 // If we add a DataFormatBool, we should use it here.
2836 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
2837 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
2842 ASSERT(node->arrayMode().isJSArray());
2844 SpeculateCellOperand base(this, node->child1());
2845 GPRTemporary storageLength(this);
2847 GPRReg baseGPR = base.gpr();
2848 GPRReg storageLengthGPR = storageLength.gpr();
2850 StorageOperand storage(this, node->child3());
2851 GPRReg storageGPR = storage.gpr();
2853 switch (node->arrayMode().type()) {
2855 case Array::Contiguous: {
2856 JSValueOperand value(this, node->child2(), ManualOperandSpeculation);
2857 GPRReg valueGPR = value.gpr();
2859 if (node->arrayMode().type() == Array::Int32) {
2861 JSValueRegs(valueGPR), node->child2(), SpecInt32,
2863 MacroAssembler::Below, valueGPR, GPRInfo::tagTypeNumberRegister));
2866 if (node->arrayMode().type() != Array::Int32 && Heap::isWriteBarrierEnabled()) {
2867 GPRTemporary scratch(this);
2868 writeBarrier(baseGPR, valueGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR);
2871 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2872 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2873 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2874 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2875 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2876 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
2878 addSlowPathGenerator(
2880 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
2881 valueGPR, baseGPR));
2883 jsValueResult(storageLengthGPR, node);
2887 case Array::Double: {
2888 SpeculateDoubleOperand value(this, node->child2());
2889 FPRReg valueFPR = value.fpr();
2892 JSValueRegs(), node->child2(), SpecRealNumber,
2893 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
2895 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2896 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2897 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2898 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2899 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2900 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
2902 addSlowPathGenerator(
2904 slowPath, this, operationArrayPushDouble, NoResult, storageLengthGPR,
2905 valueFPR, baseGPR));
2907 jsValueResult(storageLengthGPR, node);
2911 case Array::ArrayStorage: {
2912 JSValueOperand value(this, node->child2());
2913 GPRReg valueGPR = value.gpr();
2915 if (Heap::isWriteBarrierEnabled()) {
2916 GPRTemporary scratch(this);
2917 writeBarrier(baseGPR, valueGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR);
2920 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
2922 // Refuse to handle bizarre lengths.
2923 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
2925 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
2927 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2929 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2930 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
2931 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
2932 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
2934 addSlowPathGenerator(
2936 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
2937 valueGPR, baseGPR));
2939 jsValueResult(storageLengthGPR, node);
2951 ASSERT(node->arrayMode().isJSArray());
2953 SpeculateCellOperand base(this, node->child1());
2954 StorageOperand storage(this, node->child2());
2955 GPRTemporary value(this);
2956 GPRTemporary storageLength(this);
2957 FPRTemporary temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
2959 GPRReg baseGPR = base.gpr();
2960 GPRReg storageGPR = storage.gpr();
2961 GPRReg valueGPR = value.gpr();
2962 GPRReg storageLengthGPR = storageLength.gpr();
2963 FPRReg tempFPR = temp.fpr();
2965 switch (node->arrayMode().type()) {
2968 case Array::Contiguous: {
2970 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2971 MacroAssembler::Jump undefinedCase =
2972 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
2973 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
2975 storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2976 MacroAssembler::Jump slowCase;
2977 if (node->arrayMode().type() == Array::Double) {
2979 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
2981 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
2982 // length and the new length.
2984 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2985 slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
2986 boxDouble(tempFPR, valueGPR);
2989 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
2991 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
2992 // length and the new length.
2994 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2995 slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
2998 addSlowPathGenerator(
3000 undefinedCase, this,
3001 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3002 addSlowPathGenerator(
3004 slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
3006 // We can't know for sure that the result is an int because of the slow paths. :-/
3007 jsValueResult(valueGPR, node);
3011 case Array::ArrayStorage: {
3012 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3014 JITCompiler::Jump undefinedCase =
3015 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3017 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3019 JITCompiler::JumpList slowCases;
3020 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
3022 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
3023 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
3025 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3027 m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3028 m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3030 addSlowPathGenerator(
3032 undefinedCase, this,
3033 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3035 addSlowPathGenerator(
3037 slowCases, this, operationArrayPop, valueGPR, baseGPR));
3039 jsValueResult(valueGPR, node);
3051 jump(node->takenBlock());
3065 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
3066 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
3067 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
3069 #if DFG_ENABLE(SUCCESS_STATS)
3070 static SamplingCounter counter("SpeculativeJIT");
3071 m_jit.emitCount(counter);
3074 // Return the result in returnValueGPR.
3075 JSValueOperand op1(this, node->child1());
3076 m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
3078 // Grab the return address.
3079 m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT1);
3080 // Restore our caller's "r".
3081 m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister);
3083 m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
3091 case ThrowReferenceError: {
3092 // We expect that throw statements are rare and are intended to exit the code block
3093 // anyway, so we just OSR back to the old JIT for now.
3094 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
3099 RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3100 JSValueOperand op1(this, node->child1());
3101 GPRTemporary result(this, Reuse, op1);
3103 GPRReg op1GPR = op1.gpr();
3104 GPRReg resultGPR = result.gpr();
3108 if (!(m_state.forNode(node->child1()).m_type & ~(SpecNumber | SpecBoolean)))
3109 m_jit.move(op1GPR, resultGPR);
3111 MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
3112 MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()));
3114 alreadyPrimitive.link(&m_jit);
3115 m_jit.move(op1GPR, resultGPR);
3117 addSlowPathGenerator(
3118 slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
3121 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3126 if (node->child1().useKind() == UntypedUse) {
3127 JSValueOperand op1(this, node->child1());
3128 GPRReg op1GPR = op1.gpr();
3130 GPRResult result(this);
3131 GPRReg resultGPR = result.gpr();
3135 JITCompiler::Jump done;
3136 if (node->child1()->prediction() & SpecString) {
3137 JITCompiler::Jump slowPath1 = m_jit.branchTest64(
3138 JITCompiler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
3139 JITCompiler::Jump slowPath2 = m_jit.branchPtr(
3140 JITCompiler::NotEqual,
3141 JITCompiler::Address(op1GPR, JSCell::structureOffset()),
3142 TrustedImmPtr(m_jit.vm()->stringStructure.get()));
3143 m_jit.move(op1GPR, resultGPR);
3144 done = m_jit.jump();
3145 slowPath1.link(&m_jit);
3146 slowPath2.link(&m_jit);
3148 callOperation(operationToString, resultGPR, op1GPR);
3151 cellResult(resultGPR, node);
3155 compileToStringOnCell(node);
3159 case NewStringObject: {
3160 compileNewStringObject(node);