2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "ArrayPrototype.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGSlowPathGenerator.h"
36 #include "JSCJSValueInlines.h"
37 #include "ObjectPrototype.h"
39 namespace JSC { namespace DFG {
43 void SpeculativeJIT::boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat format)
46 if (sourceGPR == targetGPR)
51 FPRReg fpr = fprAllocate();
53 if (format == DataFormatInt52)
54 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
56 ASSERT(format == DataFormatStrictInt52);
58 m_jit.boxInt52(sourceGPR, targetGPR, tempGPR, fpr);
60 if (tempGPR != targetGPR)
66 GPRReg SpeculativeJIT::fillJSValue(Edge edge)
68 VirtualRegister virtualRegister = edge->virtualRegister();
69 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
71 switch (info.registerFormat()) {
72 case DataFormatNone: {
73 GPRReg gpr = allocate();
75 if (edge->hasConstant()) {
76 if (isInt32Constant(edge.node())) {
77 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
78 JSValue jsValue = jsNumber(valueOfInt32Constant(edge.node()));
79 m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
80 } else if (isNumberConstant(edge.node())) {
81 info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
82 JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(edge.node()));
83 m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
85 ASSERT(isJSConstant(edge.node()));
86 JSValue jsValue = valueOfJSConstant(edge.node());
87 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
88 info.fillJSValue(*m_stream, gpr, DataFormatJS);
91 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
93 DataFormat spillFormat = info.spillFormat();
94 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
95 switch (spillFormat) {
96 case DataFormatInt32: {
97 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
98 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
99 spillFormat = DataFormatJSInt32;
103 case DataFormatInt52:
104 case DataFormatStrictInt52: {
105 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
106 boxInt52(gpr, gpr, spillFormat);
111 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
112 if (spillFormat == DataFormatDouble) {
113 // Need to box the double, since we want a JSValue.
114 m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
115 spillFormat = DataFormatJSDouble;
117 RELEASE_ASSERT(spillFormat & DataFormatJS);
120 info.fillJSValue(*m_stream, gpr, spillFormat);
125 case DataFormatInt32: {
126 GPRReg gpr = info.gpr();
127 // If the register has already been locked we need to take a copy.
128 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
129 if (m_gprs.isLocked(gpr)) {
130 GPRReg result = allocate();
131 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
135 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
136 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
140 case DataFormatDouble: {
141 FPRReg fpr = info.fpr();
142 GPRReg gpr = boxDouble(fpr);
145 info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
147 m_gprs.retain(gpr, virtualRegister, SpillOrderJS);
152 case DataFormatInt52:
153 case DataFormatStrictInt52: {
154 GPRReg gpr = info.gpr();
156 GPRReg resultGPR = allocate();
157 boxInt52(gpr, resultGPR, info.registerFormat());
163 // No retag required on JSVALUE64!
165 case DataFormatJSInt32:
166 case DataFormatJSDouble:
167 case DataFormatJSCell:
168 case DataFormatJSBoolean: {
169 GPRReg gpr = info.gpr();
174 case DataFormatBoolean:
175 case DataFormatStorage:
176 // this type currently never occurs
177 RELEASE_ASSERT_NOT_REACHED();
180 RELEASE_ASSERT_NOT_REACHED();
181 return InvalidGPRReg;
185 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node* node)
187 SpeculateInt32Operand op1(this, node->child1());
188 FPRTemporary boxer(this);
189 GPRTemporary result(this, Reuse, op1);
191 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
193 m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
194 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), boxer.fpr());
196 boxDouble(boxer.fpr(), result.gpr());
198 JITCompiler::Jump done = m_jit.jump();
200 positive.link(&m_jit);
202 m_jit.or64(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
206 jsValueResult(result.gpr(), m_currentNode);
209 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
211 JITCompiler::DataLabelPtr structureToCompare;
212 JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
214 JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
215 m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
216 JITCompiler::DataLabelCompact loadWithPatch = m_jit.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
218 JITCompiler::Label doneLabel = m_jit.label();
220 OwnPtr<SlowPathGenerator> slowPath;
221 if (!slowPathTarget.isSet()) {
222 slowPath = slowPathCall(
223 structureCheck.m_jump, this, operationGetByIdOptimize, resultGPR, baseGPR,
224 identifierUID(identifierNumber), spillMode);
226 JITCompiler::JumpList slowCases;
227 slowCases.append(structureCheck.m_jump);
228 slowCases.append(slowPathTarget);
229 slowPath = slowPathCall(
230 slowCases, this, operationGetByIdOptimize, resultGPR, baseGPR,
231 identifierUID(identifierNumber), spillMode);
233 m_jit.addPropertyAccess(
234 PropertyAccessRecord(
235 codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch,
236 slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR),
238 spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
239 addSlowPathGenerator(slowPath.release());
242 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
245 JITCompiler::DataLabelPtr structureToCompare;
246 JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
248 writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
250 JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
251 m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
252 JITCompiler::DataLabel32 storeWithPatch = m_jit.store64WithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
254 JITCompiler::Label doneLabel = m_jit.label();
256 V_DFGOperation_EJCI optimizedCall;
257 if (m_jit.strictModeFor(m_currentNode->codeOrigin)) {
258 if (putKind == Direct)
259 optimizedCall = operationPutByIdDirectStrictOptimize;
261 optimizedCall = operationPutByIdStrictOptimize;
263 if (putKind == Direct)
264 optimizedCall = operationPutByIdDirectNonStrictOptimize;
266 optimizedCall = operationPutByIdNonStrictOptimize;
268 OwnPtr<SlowPathGenerator> slowPath;
269 if (!slowPathTarget.isSet()) {
270 slowPath = slowPathCall(
271 structureCheck.m_jump, this, optimizedCall, NoResult, valueGPR, baseGPR,
272 identifierUID(identifierNumber));
274 JITCompiler::JumpList slowCases;
275 slowCases.append(structureCheck.m_jump);
276 slowCases.append(slowPathTarget);
277 slowPath = slowPathCall(
278 slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR,
279 identifierUID(identifierNumber));
281 RegisterSet currentlyUsedRegisters = usedRegisters();
282 currentlyUsedRegisters.clear(scratchGPR);
283 ASSERT(currentlyUsedRegisters.get(baseGPR));
284 ASSERT(currentlyUsedRegisters.get(valueGPR));
285 m_jit.addPropertyAccess(
286 PropertyAccessRecord(
287 codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
288 JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel,
289 safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), currentlyUsedRegisters));
290 addSlowPathGenerator(slowPath.release());
293 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
295 JSValueOperand arg(this, operand);
296 GPRReg argGPR = arg.gpr();
298 GPRTemporary result(this, Reuse, arg);
299 GPRReg resultGPR = result.gpr();
301 JITCompiler::Jump notCell;
303 JITCompiler::Jump notMasqueradesAsUndefined;
304 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
305 if (!isKnownCell(operand.node()))
306 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
308 speculationWatchpointForMasqueradesAsUndefined();
310 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
311 notMasqueradesAsUndefined = m_jit.jump();
313 GPRTemporary localGlobalObject(this);
314 GPRTemporary remoteGlobalObject(this);
316 if (!isKnownCell(operand.node()))
317 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
319 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
320 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined));
322 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
323 notMasqueradesAsUndefined = m_jit.jump();
325 isMasqueradesAsUndefined.link(&m_jit);
326 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
327 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
328 m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
329 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
330 m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
333 if (!isKnownCell(operand.node())) {
334 JITCompiler::Jump done = m_jit.jump();
336 notCell.link(&m_jit);
338 m_jit.move(argGPR, resultGPR);
339 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
340 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
345 notMasqueradesAsUndefined.link(&m_jit);
347 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
348 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
351 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
353 BasicBlock* taken = branchNode->takenBlock();
354 BasicBlock* notTaken = branchNode->notTakenBlock();
356 if (taken == nextBlock()) {
358 BasicBlock* tmp = taken;
363 JSValueOperand arg(this, operand);
364 GPRReg argGPR = arg.gpr();
366 GPRTemporary result(this, Reuse, arg);
367 GPRReg resultGPR = result.gpr();
369 JITCompiler::Jump notCell;
371 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
372 if (!isKnownCell(operand.node()))
373 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
375 speculationWatchpointForMasqueradesAsUndefined();
377 jump(invert ? taken : notTaken, ForceJump);
379 GPRTemporary localGlobalObject(this);
380 GPRTemporary remoteGlobalObject(this);
382 if (!isKnownCell(operand.node()))
383 notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
385 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
386 branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken);
388 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
389 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
390 m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR);
391 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
392 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
395 if (!isKnownCell(operand.node())) {
396 jump(notTaken, ForceJump);
398 notCell.link(&m_jit);
400 m_jit.move(argGPR, resultGPR);
401 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
402 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
408 bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert)
410 unsigned branchIndexInBlock = detectPeepHoleBranch();
411 if (branchIndexInBlock != UINT_MAX) {
412 Node* branchNode = m_block->at(branchIndexInBlock);
414 RELEASE_ASSERT(node->adjustedRefCount() == 1);
416 nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
420 m_indexInBlock = branchIndexInBlock;
421 m_currentNode = branchNode;
426 nonSpeculativeNonPeepholeCompareNull(operand, invert);
431 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
433 BasicBlock* taken = branchNode->takenBlock();
434 BasicBlock* notTaken = branchNode->notTakenBlock();
436 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
438 // The branch instruction will branch to the taken block.
439 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
440 if (taken == nextBlock()) {
441 cond = JITCompiler::invert(cond);
442 callResultCondition = JITCompiler::Zero;
443 BasicBlock* tmp = taken;
448 JSValueOperand arg1(this, node->child1());
449 JSValueOperand arg2(this, node->child2());
450 GPRReg arg1GPR = arg1.gpr();
451 GPRReg arg2GPR = arg2.gpr();
453 JITCompiler::JumpList slowPath;
455 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
456 GPRResult result(this);
457 GPRReg resultGPR = result.gpr();
463 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
465 branchTest32(callResultCondition, resultGPR, taken);
467 GPRTemporary result(this, Reuse, arg2);
468 GPRReg resultGPR = result.gpr();
473 if (!isKnownInteger(node->child1().node()))
474 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
475 if (!isKnownInteger(node->child2().node()))
476 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
478 branch32(cond, arg1GPR, arg2GPR, taken);
480 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
481 jump(notTaken, ForceJump);
483 slowPath.link(&m_jit);
485 silentSpillAllRegisters(resultGPR);
486 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
487 silentFillAllRegisters(resultGPR);
489 branchTest32(callResultCondition, resultGPR, taken);
495 m_indexInBlock = m_block->size() - 1;
496 m_currentNode = branchNode;
499 template<typename JumpType>
500 class CompareAndBoxBooleanSlowPathGenerator
501 : public CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg> {
503 CompareAndBoxBooleanSlowPathGenerator(
504 JumpType from, SpeculativeJIT* jit,
505 S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
506 : CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg>(
507 from, jit, function, NeedToSpill, result)
514 virtual void generateInternal(SpeculativeJIT* jit)
517 this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
518 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
519 jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
528 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
530 JSValueOperand arg1(this, node->child1());
531 JSValueOperand arg2(this, node->child2());
532 GPRReg arg1GPR = arg1.gpr();
533 GPRReg arg2GPR = arg2.gpr();
535 JITCompiler::JumpList slowPath;
537 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
538 GPRResult result(this);
539 GPRReg resultGPR = result.gpr();
545 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
547 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
548 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
550 GPRTemporary result(this, Reuse, arg2);
551 GPRReg resultGPR = result.gpr();
556 if (!isKnownInteger(node->child1().node()))
557 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
558 if (!isKnownInteger(node->child2().node()))
559 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
561 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
562 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
564 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
565 addSlowPathGenerator(adoptPtr(
566 new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
567 slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR)));
570 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
574 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
576 BasicBlock* taken = branchNode->takenBlock();
577 BasicBlock* notTaken = branchNode->notTakenBlock();
579 // The branch instruction will branch to the taken block.
580 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
581 if (taken == nextBlock()) {
583 BasicBlock* tmp = taken;
588 JSValueOperand arg1(this, node->child1());
589 JSValueOperand arg2(this, node->child2());
590 GPRReg arg1GPR = arg1.gpr();
591 GPRReg arg2GPR = arg2.gpr();
593 GPRTemporary result(this);
594 GPRReg resultGPR = result.gpr();
599 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
600 // see if we get lucky: if the arguments are cells and they reference the same
601 // cell, then they must be strictly equal.
602 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
604 silentSpillAllRegisters(resultGPR);
605 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
606 silentFillAllRegisters(resultGPR);
608 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
610 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
612 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
614 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
615 JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
617 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
618 JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
619 rightOK.link(&m_jit);
621 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
622 jump(notTaken, ForceJump);
624 twoCellsCase.link(&m_jit);
625 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
627 leftDouble.link(&m_jit);
628 rightDouble.link(&m_jit);
630 silentSpillAllRegisters(resultGPR);
631 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
632 silentFillAllRegisters(resultGPR);
634 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
640 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
642 JSValueOperand arg1(this, node->child1());
643 JSValueOperand arg2(this, node->child2());
644 GPRReg arg1GPR = arg1.gpr();
645 GPRReg arg2GPR = arg2.gpr();
647 GPRTemporary result(this);
648 GPRReg resultGPR = result.gpr();
653 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
654 // see if we get lucky: if the arguments are cells and they reference the same
655 // cell, then they must be strictly equal.
656 // FIXME: this should flush registers instead of silent spill/fill.
657 JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
659 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
661 JITCompiler::Jump done = m_jit.jump();
663 notEqualCase.link(&m_jit);
665 silentSpillAllRegisters(resultGPR);
666 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
667 silentFillAllRegisters(resultGPR);
669 m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
670 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
674 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
676 JITCompiler::JumpList slowPathCases;
678 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
680 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
681 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
683 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
684 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
685 rightOK.link(&m_jit);
687 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
688 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
690 JITCompiler::Jump done = m_jit.jump();
692 twoCellsCase.link(&m_jit);
693 slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
695 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
697 addSlowPathGenerator(
699 new CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>(
700 slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
706 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
709 void SpeculativeJIT::emitCall(Node* node)
711 if (node->op() != Call)
712 RELEASE_ASSERT(node->op() == Construct);
714 // For constructors, the this argument is not passed but we have to make space
716 int dummyThisArgument = node->op() == Call ? 0 : 1;
718 CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
720 Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()];
721 JSValueOperand callee(this, calleeEdge);
722 GPRReg calleeGPR = callee.gpr();
725 // The call instruction's first child is the function; the subsequent children are the
727 int numPassedArgs = node->numChildren() - 1;
729 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount));
730 m_jit.store64(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
731 m_jit.store64(calleeGPR, callFrameSlot(JSStack::Callee));
733 for (int i = 0; i < numPassedArgs; i++) {
734 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
735 JSValueOperand arg(this, argEdge);
736 GPRReg argGPR = arg.gpr();
739 m_jit.store64(argGPR, argumentSlot(i + dummyThisArgument));
744 GPRResult result(this);
745 GPRReg resultGPR = result.gpr();
747 JITCompiler::DataLabelPtr targetToCheck;
748 JITCompiler::JumpList slowPath;
750 CallBeginToken token;
751 m_jit.beginCall(node->codeOrigin, token);
753 m_jit.addPtr(TrustedImm32(-(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register))), GPRInfo::callFrameRegister);
755 slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)));
757 m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
758 m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
760 CodeOrigin codeOrigin = m_currentNode->codeOrigin;
761 JITCompiler::Call fastCall = m_jit.nearCall();
762 m_jit.notifyCall(fastCall, codeOrigin, token);
764 JITCompiler::Jump done = m_jit.jump();
766 slowPath.link(&m_jit);
768 m_jit.move(calleeGPR, GPRInfo::nonArgGPR0);
769 m_jit.prepareForExceptionCheck();
770 JITCompiler::Call slowCall = m_jit.nearCall();
771 m_jit.notifyCall(slowCall, codeOrigin, token);
775 m_jit.move(GPRInfo::returnValueGPR, resultGPR);
777 jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
779 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleeGPR, m_currentNode->codeOrigin);
782 template<bool strict>
783 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
785 #if DFG_ENABLE(DEBUG_VERBOSE)
786 dataLogF("SpecInt@%d ", edge->index());
788 AbstractValue& value = m_state.forNode(edge);
789 SpeculatedType type = value.m_type;
790 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
791 m_interpreter.filter(value, SpecInt32);
792 VirtualRegister virtualRegister = edge->virtualRegister();
793 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
795 switch (info.registerFormat()) {
796 case DataFormatNone: {
797 if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) {
798 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
799 returnFormat = DataFormatInt32;
803 GPRReg gpr = allocate();
805 if (edge->hasConstant()) {
806 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
807 ASSERT(isInt32Constant(edge.node()));
808 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
809 info.fillInt32(*m_stream, gpr);
810 returnFormat = DataFormatInt32;
814 DataFormat spillFormat = info.spillFormat();
816 RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
818 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
820 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
821 // If we know this was spilled as an integer we can fill without checking.
823 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
824 info.fillInt32(*m_stream, gpr);
825 returnFormat = DataFormatInt32;
828 if (spillFormat == DataFormatInt32) {
829 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
830 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
832 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
833 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
834 returnFormat = DataFormatJSInt32;
837 if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) {
838 // Generally, this can only happen if we've already proved that the
839 // value is an int32. That's because if a value originated as a JSValue
840 // then we would speculate that it's an int32 before representing it as
841 // an int52. Otherwise, if we knowingly produced an int52, then we would
842 // be boxing it into a value using Int52ToValue. This assertion is valid
843 // only because Int52 is something that we introduce at prediction time.
844 // However: we may have an int32-producing node replaced by an
845 // int52-producing node due to CSE. So we must do a check.
846 RELEASE_ASSERT(!(type & ~SpecMachineInt));
847 if (type & SpecInt52) {
848 GPRReg temp = allocate();
849 m_jit.signExtend32ToPtr(gpr, temp);
850 // Currently, we can't supply value profiling information here. :-/
852 BadType, JSValueRegs(), 0,
853 m_jit.branch64(MacroAssembler::NotEqual, gpr, temp));
856 if (spillFormat == DataFormatStrictInt52)
857 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
859 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
860 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
861 m_jit.zeroExtend32ToPtr(gpr, gpr);
863 info.fillInt32(*m_stream, gpr);
864 returnFormat = DataFormatInt32;
867 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
869 // Fill as JSValue, and fall through.
870 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
875 RELEASE_ASSERT(!(type & SpecInt52));
876 // Check the value is an integer.
877 GPRReg gpr = info.gpr();
879 if (type & ~SpecInt32)
880 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
881 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
882 // If !strict we're done, return.
884 returnFormat = DataFormatJSInt32;
887 // else fall through & handle as DataFormatJSInt32.
891 case DataFormatJSInt32: {
892 // In a strict fill we need to strip off the value tag.
894 GPRReg gpr = info.gpr();
896 // If the register has already been locked we need to take a copy.
897 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
898 if (m_gprs.isLocked(gpr))
902 info.fillInt32(*m_stream, gpr);
905 m_jit.zeroExtend32ToPtr(gpr, result);
906 returnFormat = DataFormatInt32;
910 GPRReg gpr = info.gpr();
912 returnFormat = DataFormatJSInt32;
916 case DataFormatInt32: {
917 GPRReg gpr = info.gpr();
919 returnFormat = DataFormatInt32;
923 case DataFormatStrictInt52:
924 case DataFormatInt52: {
925 GPRReg gpr = info.gpr();
927 if (m_gprs.isLocked(gpr)) {
929 m_jit.move(gpr, result);
932 info.fillInt32(*m_stream, gpr);
935 RELEASE_ASSERT(!(type & ~SpecMachineInt));
936 if (info.registerFormat() == DataFormatInt52)
937 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
938 if (type & SpecInt52) {
939 GPRReg temp = allocate();
940 m_jit.signExtend32ToPtr(result, temp);
941 // Currently, we can't supply value profiling information here. :-/
943 BadType, JSValueRegs(), 0,
944 m_jit.branch64(MacroAssembler::NotEqual, result, temp));
947 m_jit.zeroExtend32ToPtr(result, result);
948 returnFormat = DataFormatInt32;
952 case DataFormatDouble:
953 case DataFormatJSDouble: {
954 if (edge->hasConstant() && isInt32Constant(edge.node())) {
955 GPRReg gpr = allocate();
956 ASSERT(isInt32Constant(edge.node()));
957 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
958 returnFormat = DataFormatInt32;
963 case DataFormatBoolean:
964 case DataFormatJSCell:
965 case DataFormatJSBoolean: {
966 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
967 returnFormat = DataFormatInt32;
971 case DataFormatStorage:
972 RELEASE_ASSERT_NOT_REACHED();
975 RELEASE_ASSERT_NOT_REACHED();
976 return InvalidGPRReg;
980 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
982 return fillSpeculateInt32Internal<false>(edge, returnFormat);
985 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
987 DataFormat mustBeDataFormatInt32;
988 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
989 RELEASE_ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
993 GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
995 ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52);
996 AbstractValue& value = m_state.forNode(edge);
997 SpeculatedType type = value.m_type;
998 m_interpreter.filter(value, SpecMachineInt);
999 VirtualRegister virtualRegister = edge->virtualRegister();
1000 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1002 switch (info.registerFormat()) {
1003 case DataFormatNone: {
1004 if ((edge->hasConstant() && !valueOfJSConstant(edge.node()).isMachineInt()) || info.spillFormat() == DataFormatDouble) {
1005 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1009 GPRReg gpr = allocate();
1011 if (edge->hasConstant()) {
1012 JSValue jsValue = valueOfJSConstant(edge.node());
1013 ASSERT(jsValue.isMachineInt());
1014 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1015 int64_t value = jsValue.asMachineInt();
1016 if (desiredFormat == DataFormatInt52)
1017 value = value << JSValue::int52ShiftAmount;
1018 m_jit.move(MacroAssembler::Imm64(value), gpr);
1019 info.fillGPR(*m_stream, gpr, desiredFormat);
1023 DataFormat spillFormat = info.spillFormat();
1025 RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
1027 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1029 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
1030 // If we know this was spilled as an integer we can fill without checking.
1031 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1032 m_jit.signExtend32ToPtr(gpr, gpr);
1033 if (desiredFormat == DataFormatStrictInt52) {
1034 info.fillStrictInt52(*m_stream, gpr);
1037 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1038 info.fillInt52(*m_stream, gpr);
1041 if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) {
1042 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1043 if (desiredFormat == DataFormatStrictInt52) {
1044 if (spillFormat == DataFormatInt52)
1045 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1046 info.fillStrictInt52(*m_stream, gpr);
1049 if (spillFormat == DataFormatStrictInt52)
1050 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1051 info.fillInt52(*m_stream, gpr);
1054 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1056 // Fill as JSValue, and fall through.
1057 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1061 case DataFormatJS: {
1062 // Check the value is an integer. Note that we would *like* to unbox an Int52
1063 // at this point but this is too costly. We only *prove* that this is an Int52
1064 // even though we check if it's an int32.
1065 GPRReg gpr = info.gpr();
1067 if (m_gprs.isLocked(gpr)) {
1068 result = allocate();
1069 m_jit.move(gpr, result);
1074 if (type & ~SpecInt32)
1075 speculationCheck(BadType, JSValueRegs(result), edge, m_jit.branch64(MacroAssembler::Below, result, GPRInfo::tagTypeNumberRegister));
1076 if (result == gpr) // The not-already-locked, so fill in-place, case.
1077 info.fillInt52(*m_stream, gpr, desiredFormat);
1078 m_jit.signExtend32ToPtr(result, result);
1079 if (desiredFormat == DataFormatInt52)
1080 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
1084 case DataFormatInt32:
1085 case DataFormatJSInt32: {
1086 GPRReg gpr = info.gpr();
1088 if (m_gprs.isLocked(gpr)) {
1089 result = allocate();
1090 m_jit.move(gpr, result);
1093 info.fillInt52(*m_stream, gpr, desiredFormat);
1096 m_jit.signExtend32ToPtr(result, result);
1097 if (desiredFormat == DataFormatInt52)
1098 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result);
1102 case DataFormatStrictInt52: {
1103 GPRReg gpr = info.gpr();
1104 bool wasLocked = m_gprs.isLocked(gpr);
1106 if (desiredFormat == DataFormatStrictInt52)
1109 GPRReg result = allocate();
1110 m_jit.move(gpr, result);
1114 info.fillStrictInt52(*m_stream, gpr);
1115 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1119 case DataFormatInt52: {
1120 GPRReg gpr = info.gpr();
1121 bool wasLocked = m_gprs.isLocked(gpr);
1123 if (desiredFormat == DataFormatInt52)
1126 GPRReg result = allocate();
1127 m_jit.move(gpr, result);
1131 info.fillInt52(*m_stream, gpr);
1132 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1136 case DataFormatDouble:
1137 case DataFormatJSDouble:
1138 if (edge->hasConstant()) {
1139 JSValue jsValue = valueOfJSConstant(edge.node());
1140 if (jsValue.isMachineInt()) {
1141 int64_t value = jsValue.asMachineInt();
1142 if (desiredFormat == DataFormatInt52)
1143 value = value << JSValue::int52ShiftAmount;
1144 GPRReg gpr = allocate();
1145 m_jit.move(MacroAssembler::Imm64(value), gpr);
1150 case DataFormatCell:
1151 case DataFormatBoolean:
1152 case DataFormatJSCell:
1153 case DataFormatJSBoolean: {
1154 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1158 case DataFormatStorage:
1159 RELEASE_ASSERT_NOT_REACHED();
1162 RELEASE_ASSERT_NOT_REACHED();
1163 return InvalidGPRReg;
1167 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1169 #if DFG_ENABLE(DEBUG_VERBOSE)
1170 dataLogF("SpecDouble@%d ", edge->index());
1172 AbstractValue& value = m_state.forNode(edge);
1173 SpeculatedType type = value.m_type;
1174 ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecFullNumber));
1175 m_interpreter.filter(value, SpecFullNumber);
1176 VirtualRegister virtualRegister = edge->virtualRegister();
1177 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1179 if (info.registerFormat() == DataFormatNone) {
1180 if (edge->hasConstant()) {
1181 GPRReg gpr = allocate();
1183 if (isInt32Constant(edge.node())) {
1184 FPRReg fpr = fprAllocate();
1185 m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(edge.node())))), gpr);
1186 m_jit.move64ToDouble(gpr, fpr);
1189 // Don't fill double here since that will lead to confusion: the
1190 // register allocator will now think that this is a double while
1191 // everyone else thinks it's an integer.
1194 if (isNumberConstant(edge.node())) {
1195 FPRReg fpr = fprAllocate();
1196 m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge.node()))), gpr);
1197 m_jit.move64ToDouble(gpr, fpr);
1200 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1201 info.fillDouble(*m_stream, fpr);
1204 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1205 return fprAllocate();
1208 DataFormat spillFormat = info.spillFormat();
1209 switch (spillFormat) {
1210 case DataFormatDouble: {
1211 FPRReg fpr = fprAllocate();
1212 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1213 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1214 info.fillDouble(*m_stream, fpr);
1218 case DataFormatInt32: {
1219 GPRReg gpr = allocate();
1221 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1222 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1223 info.fillInt32(*m_stream, gpr);
1228 case DataFormatInt52: {
1229 GPRReg gpr = allocate();
1230 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1231 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1232 info.fillInt52(*m_stream, gpr);
1237 case DataFormatStrictInt52: {
1238 GPRReg gpr = allocate();
1239 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1240 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1241 info.fillStrictInt52(*m_stream, gpr);
1247 GPRReg gpr = allocate();
1249 RELEASE_ASSERT(spillFormat & DataFormatJS);
1250 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1251 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1252 info.fillJSValue(*m_stream, gpr, spillFormat);
1258 switch (info.registerFormat()) {
1259 case DataFormatNone: // Should have filled, above.
1260 case DataFormatBoolean: // This type never occurs.
1261 case DataFormatStorage:
1262 RELEASE_ASSERT_NOT_REACHED();
1264 case DataFormatCell:
1265 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1266 return fprAllocate();
1268 case DataFormatJSCell:
1270 case DataFormatJSBoolean: {
1271 GPRReg jsValueGpr = info.gpr();
1272 m_gprs.lock(jsValueGpr);
1273 FPRReg fpr = fprAllocate();
1274 GPRReg tempGpr = allocate();
1276 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
1278 if (type & ~SpecFullNumber)
1279 speculationCheck(BadType, JSValueRegs(jsValueGpr), edge, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
1281 // First, if we get here we have a double encoded as a JSValue
1282 m_jit.move(jsValueGpr, tempGpr);
1283 unboxDouble(tempGpr, fpr);
1284 JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
1286 // Finally, handle integers.
1287 isInteger.link(&m_jit);
1288 m_jit.convertInt32ToDouble(jsValueGpr, fpr);
1289 hasUnboxedDouble.link(&m_jit);
1291 m_gprs.release(jsValueGpr);
1292 m_gprs.unlock(jsValueGpr);
1293 m_gprs.unlock(tempGpr);
1294 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1295 info.fillDouble(*m_stream, fpr);
1300 case DataFormatJSInt32:
1301 case DataFormatInt32: {
1302 FPRReg fpr = fprAllocate();
1303 GPRReg gpr = info.gpr();
1305 m_jit.convertInt32ToDouble(gpr, fpr);
1310 case DataFormatInt52: {
1311 FPRReg fpr = fprAllocate();
1312 GPRReg gpr = info.gpr();
1314 GPRReg temp = allocate();
1315 m_jit.move(gpr, temp);
1316 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), temp);
1317 m_jit.convertInt64ToDouble(temp, fpr);
1323 case DataFormatStrictInt52: {
1324 FPRReg fpr = fprAllocate();
1325 GPRReg gpr = info.gpr();
1327 m_jit.convertInt64ToDouble(gpr, fpr);
1333 case DataFormatJSDouble: {
1334 GPRReg gpr = info.gpr();
1335 FPRReg fpr = fprAllocate();
1336 if (m_gprs.isLocked(gpr)) {
1337 // Make sure we don't trample gpr if it is in use.
1338 GPRReg temp = allocate();
1339 m_jit.move(gpr, temp);
1340 unboxDouble(temp, fpr);
1343 unboxDouble(gpr, fpr);
1345 m_gprs.release(gpr);
1346 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1348 info.fillDouble(*m_stream, fpr);
1352 case DataFormatDouble: {
1353 FPRReg fpr = info.fpr();
1359 RELEASE_ASSERT_NOT_REACHED();
1360 return InvalidFPRReg;
1364 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1366 #if DFG_ENABLE(DEBUG_VERBOSE)
1367 dataLogF("SpecCell@%d ", edge->index());
1369 AbstractValue& value = m_state.forNode(edge);
1370 SpeculatedType type = value.m_type;
1371 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1372 m_interpreter.filter(value, SpecCell);
1373 VirtualRegister virtualRegister = edge->virtualRegister();
1374 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1376 switch (info.registerFormat()) {
1377 case DataFormatNone: {
1378 if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
1379 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1383 GPRReg gpr = allocate();
1385 if (edge->hasConstant()) {
1386 JSValue jsValue = valueOfJSConstant(edge.node());
1387 if (jsValue.isCell()) {
1388 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1389 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1390 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1393 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1396 RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
1397 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1398 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1400 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1401 if (type & ~SpecCell)
1402 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
1403 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1407 case DataFormatCell:
1408 case DataFormatJSCell: {
1409 GPRReg gpr = info.gpr();
1411 #if DFG_ENABLE(JIT_ASSERT)
1412 MacroAssembler::Jump checkCell = m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
1414 checkCell.link(&m_jit);
1419 case DataFormatJS: {
1420 GPRReg gpr = info.gpr();
1422 if (type & ~SpecCell)
1423 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
1424 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1428 case DataFormatJSInt32:
1429 case DataFormatInt32:
1430 case DataFormatJSDouble:
1431 case DataFormatDouble:
1432 case DataFormatJSBoolean:
1433 case DataFormatBoolean:
1434 case DataFormatInt52:
1435 case DataFormatStrictInt52: {
1436 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1440 case DataFormatStorage:
1441 RELEASE_ASSERT_NOT_REACHED();
1444 RELEASE_ASSERT_NOT_REACHED();
1445 return InvalidGPRReg;
1449 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1451 #if DFG_ENABLE(DEBUG_VERBOSE)
1452 dataLogF("SpecBool@%d ", edge->index());
1454 AbstractValue& value = m_state.forNode(edge);
1455 SpeculatedType type = value.m_type;
1456 m_interpreter.filter(value, SpecBoolean);
1457 VirtualRegister virtualRegister = edge->virtualRegister();
1458 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1460 switch (info.registerFormat()) {
1461 case DataFormatNone: {
1462 if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) {
1463 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1467 GPRReg gpr = allocate();
1469 if (edge->hasConstant()) {
1470 JSValue jsValue = valueOfJSConstant(edge.node());
1471 if (jsValue.isBoolean()) {
1472 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1473 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1474 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1477 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1480 RELEASE_ASSERT(info.spillFormat() & DataFormatJS);
1481 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1482 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1484 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1485 if (type & ~SpecBoolean) {
1486 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1487 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1488 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1490 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1494 case DataFormatBoolean:
1495 case DataFormatJSBoolean: {
1496 GPRReg gpr = info.gpr();
1501 case DataFormatJS: {
1502 GPRReg gpr = info.gpr();
1504 if (type & ~SpecBoolean) {
1505 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1506 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1507 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1509 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1513 case DataFormatJSInt32:
1514 case DataFormatInt32:
1515 case DataFormatJSDouble:
1516 case DataFormatDouble:
1517 case DataFormatJSCell:
1518 case DataFormatCell:
1519 case DataFormatInt52:
1520 case DataFormatStrictInt52: {
1521 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1525 case DataFormatStorage:
1526 RELEASE_ASSERT_NOT_REACHED();
1529 RELEASE_ASSERT_NOT_REACHED();
1530 return InvalidGPRReg;
1534 JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
1536 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
1538 JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
1540 m_jit.move(value, tmp);
1541 unboxDouble(tmp, result);
1543 JITCompiler::Jump done = m_jit.jump();
1545 isInteger.link(&m_jit);
1547 m_jit.convertInt32ToDouble(value, result);
1554 void SpeculativeJIT::compileObjectEquality(Node* node)
1556 SpeculateCellOperand op1(this, node->child1());
1557 SpeculateCellOperand op2(this, node->child2());
1558 GPRTemporary result(this, Reuse, op1);
1560 GPRReg op1GPR = op1.gpr();
1561 GPRReg op2GPR = op2.gpr();
1562 GPRReg resultGPR = result.gpr();
1564 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1565 speculationWatchpointForMasqueradesAsUndefined();
1567 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
1568 MacroAssembler::Equal,
1569 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1570 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1572 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
1573 MacroAssembler::Equal,
1574 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1575 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1577 GPRTemporary structure(this);
1578 GPRReg structureGPR = structure.gpr();
1580 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1582 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr(
1583 MacroAssembler::Equal,
1585 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1586 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1588 MacroAssembler::NonZero,
1589 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1590 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1592 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1594 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr(
1595 MacroAssembler::Equal,
1597 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1598 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1600 MacroAssembler::NonZero,
1601 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1602 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1605 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1606 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1607 MacroAssembler::Jump done = m_jit.jump();
1608 falseCase.link(&m_jit);
1609 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1612 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1615 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1617 SpeculateCellOperand op1(this, leftChild);
1618 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1619 GPRTemporary result(this);
1621 GPRReg op1GPR = op1.gpr();
1622 GPRReg op2GPR = op2.gpr();
1623 GPRReg resultGPR = result.gpr();
1624 GPRTemporary structure;
1625 GPRReg structureGPR = InvalidGPRReg;
1627 bool masqueradesAsUndefinedWatchpointValid =
1628 masqueradesAsUndefinedWatchpointIsStillValid();
1630 if (!masqueradesAsUndefinedWatchpointValid) {
1631 // The masquerades as undefined case will use the structure register, so allocate it here.
1632 // Do this at the top of the function to avoid branching around a register allocation.
1633 GPRTemporary realStructure(this);
1634 structure.adopt(realStructure);
1635 structureGPR = structure.gpr();
1638 if (masqueradesAsUndefinedWatchpointValid) {
1639 speculationWatchpointForMasqueradesAsUndefined();
1641 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1642 MacroAssembler::Equal,
1643 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1644 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1646 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1648 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1649 MacroAssembler::Equal,
1651 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1652 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1654 MacroAssembler::NonZero,
1655 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1656 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1659 // It seems that most of the time when programs do a == b where b may be either null/undefined
1660 // or an object, b is usually an object. Balance the branches to make that case fast.
1661 MacroAssembler::Jump rightNotCell =
1662 m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
1664 // We know that within this branch, rightChild must be a cell.
1665 if (masqueradesAsUndefinedWatchpointValid) {
1666 speculationWatchpointForMasqueradesAsUndefined();
1668 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1669 MacroAssembler::Equal,
1670 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1671 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1673 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1675 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1676 MacroAssembler::Equal,
1678 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1679 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1681 MacroAssembler::NonZero,
1682 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1683 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1686 // At this point we know that we can perform a straight-forward equality comparison on pointer
1687 // values because both left and right are pointers to objects that have no special equality
1689 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1690 MacroAssembler::Jump trueCase = m_jit.jump();
1692 rightNotCell.link(&m_jit);
1694 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1695 // prove that it is either null or undefined.
1696 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1697 m_jit.move(op2GPR, resultGPR);
1698 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1701 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther,
1703 MacroAssembler::NotEqual, resultGPR,
1704 MacroAssembler::TrustedImm64(ValueNull)));
1707 falseCase.link(&m_jit);
1708 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1709 MacroAssembler::Jump done = m_jit.jump();
1710 trueCase.link(&m_jit);
1711 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1714 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1717 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1719 BasicBlock* taken = branchNode->takenBlock();
1720 BasicBlock* notTaken = branchNode->notTakenBlock();
1722 SpeculateCellOperand op1(this, leftChild);
1723 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1724 GPRTemporary result(this);
1726 GPRReg op1GPR = op1.gpr();
1727 GPRReg op2GPR = op2.gpr();
1728 GPRReg resultGPR = result.gpr();
1729 GPRTemporary structure;
1730 GPRReg structureGPR = InvalidGPRReg;
1732 bool masqueradesAsUndefinedWatchpointValid =
1733 masqueradesAsUndefinedWatchpointIsStillValid();
1735 if (!masqueradesAsUndefinedWatchpointValid) {
1736 // The masquerades as undefined case will use the structure register, so allocate it here.
1737 // Do this at the top of the function to avoid branching around a register allocation.
1738 GPRTemporary realStructure(this);
1739 structure.adopt(realStructure);
1740 structureGPR = structure.gpr();
1743 if (masqueradesAsUndefinedWatchpointValid) {
1744 speculationWatchpointForMasqueradesAsUndefined();
1746 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1747 MacroAssembler::Equal,
1748 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1749 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1751 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1753 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr(
1754 MacroAssembler::Equal,
1756 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1757 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1759 MacroAssembler::NonZero,
1760 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1761 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1764 // It seems that most of the time when programs do a == b where b may be either null/undefined
1765 // or an object, b is usually an object. Balance the branches to make that case fast.
1766 MacroAssembler::Jump rightNotCell =
1767 m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
1769 // We know that within this branch, rightChild must be a cell.
1770 if (masqueradesAsUndefinedWatchpointValid) {
1771 speculationWatchpointForMasqueradesAsUndefined();
1773 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1774 MacroAssembler::Equal,
1775 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1776 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1778 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1780 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr(
1781 MacroAssembler::Equal,
1783 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1784 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1786 MacroAssembler::NonZero,
1787 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1788 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1791 // At this point we know that we can perform a straight-forward equality comparison on pointer
1792 // values because both left and right are pointers to objects that have no special equality
1794 branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1796 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1797 // prove that it is either null or undefined.
1798 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1799 rightNotCell.link(&m_jit);
1801 jump(notTaken, ForceJump);
1803 rightNotCell.link(&m_jit);
1804 m_jit.move(op2GPR, resultGPR);
1805 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1808 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther, m_jit.branch64(
1809 MacroAssembler::NotEqual, resultGPR,
1810 MacroAssembler::TrustedImm64(ValueNull)));
1816 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1818 SpeculateInt32Operand op1(this, node->child1());
1819 SpeculateInt32Operand op2(this, node->child2());
1820 GPRTemporary result(this, Reuse, op1, op2);
1822 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
1824 // If we add a DataFormatBool, we should use it here.
1825 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1826 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1829 void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition)
1831 SpeculateWhicheverInt52Operand op1(this, node->child1());
1832 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1833 GPRTemporary result(this, Reuse, op1, op2);
1835 m_jit.compare64(condition, op1.gpr(), op2.gpr(), result.gpr());
1837 // If we add a DataFormatBool, we should use it here.
1838 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1839 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1842 void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1844 BasicBlock* taken = branchNode->takenBlock();
1845 BasicBlock* notTaken = branchNode->notTakenBlock();
1847 // The branch instruction will branch to the taken block.
1848 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1849 if (taken == nextBlock()) {
1850 condition = JITCompiler::invert(condition);
1851 BasicBlock* tmp = taken;
1856 SpeculateWhicheverInt52Operand op1(this, node->child1());
1857 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1859 branch64(condition, op1.gpr(), op2.gpr(), taken);
1863 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1865 SpeculateDoubleOperand op1(this, node->child1());
1866 SpeculateDoubleOperand op2(this, node->child2());
1867 GPRTemporary result(this);
1869 m_jit.move(TrustedImm32(ValueTrue), result.gpr());
1870 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1871 m_jit.xor64(TrustedImm32(true), result.gpr());
1872 trueCase.link(&m_jit);
1874 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1877 void SpeculativeJIT::compileValueAdd(Node* node)
1879 JSValueOperand op1(this, node->child1());
1880 JSValueOperand op2(this, node->child2());
1882 GPRReg op1GPR = op1.gpr();
1883 GPRReg op2GPR = op2.gpr();
1887 GPRResult result(this);
1888 if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
1889 callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR);
1891 callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR);
1893 jsValueResult(result.gpr(), node);
1896 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1898 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1899 GPRTemporary result(this);
1900 GPRReg valueGPR = value.gpr();
1901 GPRReg resultGPR = result.gpr();
1902 GPRTemporary structure;
1903 GPRReg structureGPR = InvalidGPRReg;
1905 bool masqueradesAsUndefinedWatchpointValid =
1906 masqueradesAsUndefinedWatchpointIsStillValid();
1908 if (!masqueradesAsUndefinedWatchpointValid) {
1909 // The masquerades as undefined case will use the structure register, so allocate it here.
1910 // Do this at the top of the function to avoid branching around a register allocation.
1911 GPRTemporary realStructure(this);
1912 structure.adopt(realStructure);
1913 structureGPR = structure.gpr();
1916 MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
1917 if (masqueradesAsUndefinedWatchpointValid) {
1918 speculationWatchpointForMasqueradesAsUndefined();
1920 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1921 MacroAssembler::Equal,
1922 MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
1923 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1925 m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), structureGPR);
1928 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
1929 MacroAssembler::Equal,
1931 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1933 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1935 MacroAssembler::Zero,
1936 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1937 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1939 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1941 MacroAssembler::Equal,
1942 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1943 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
1945 isNotMasqueradesAsUndefined.link(&m_jit);
1947 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1948 MacroAssembler::Jump done = m_jit.jump();
1950 notCell.link(&m_jit);
1952 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1953 m_jit.move(valueGPR, resultGPR);
1954 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1956 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1957 MacroAssembler::NotEqual,
1959 MacroAssembler::TrustedImm64(ValueNull)));
1961 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1965 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1968 void SpeculativeJIT::compileLogicalNot(Node* node)
1970 switch (node->child1().useKind()) {
1971 case ObjectOrOtherUse: {
1972 compileObjectOrOtherLogicalNot(node->child1());
1977 SpeculateInt32Operand value(this, node->child1());
1978 GPRTemporary result(this, Reuse, value);
1979 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
1980 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1981 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1986 SpeculateDoubleOperand value(this, node->child1());
1987 FPRTemporary scratch(this);
1988 GPRTemporary result(this);
1989 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
1990 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1991 m_jit.xor32(TrustedImm32(true), result.gpr());
1992 nonZero.link(&m_jit);
1993 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1998 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1999 SpeculateBooleanOperand value(this, node->child1());
2000 GPRTemporary result(this, Reuse, value);
2002 m_jit.move(value.gpr(), result.gpr());
2003 m_jit.xor64(TrustedImm32(true), result.gpr());
2005 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
2009 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
2010 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
2012 m_jit.move(value.gpr(), result.gpr());
2013 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
2015 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
2016 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
2017 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
2019 // If we add a DataFormatBool, we should use it here.
2020 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
2025 JSValueOperand arg1(this, node->child1());
2026 GPRTemporary result(this);
2028 GPRReg arg1GPR = arg1.gpr();
2029 GPRReg resultGPR = result.gpr();
2033 m_jit.move(arg1GPR, resultGPR);
2034 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
2035 JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
2037 addSlowPathGenerator(
2038 slowPathCall(slowCase, this, dfgConvertJSValueToBoolean, resultGPR, arg1GPR));
2040 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
2041 jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
2046 RELEASE_ASSERT_NOT_REACHED();
2051 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
2053 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
2054 GPRTemporary scratch(this);
2055 GPRReg valueGPR = value.gpr();
2056 GPRReg scratchGPR = scratch.gpr();
2058 MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
2059 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
2060 speculationWatchpointForMasqueradesAsUndefined();
2063 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
2064 MacroAssembler::Equal,
2065 MacroAssembler::Address(valueGPR, JSCell::structureOffset()),
2066 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
2068 m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), scratchGPR);
2071 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr(
2072 MacroAssembler::Equal,
2074 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
2076 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
2078 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
2080 MacroAssembler::Equal,
2081 MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
2082 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin))));
2084 isNotMasqueradesAsUndefined.link(&m_jit);
2086 jump(taken, ForceJump);
2088 notCell.link(&m_jit);
2090 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
2091 m_jit.move(valueGPR, scratchGPR);
2092 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
2094 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
2095 MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
2099 noResult(m_currentNode);
2102 void SpeculativeJIT::emitBranch(Node* node)
2104 BasicBlock* taken = node->takenBlock();
2105 BasicBlock* notTaken = node->notTakenBlock();
2107 switch (node->child1().useKind()) {
2108 case ObjectOrOtherUse: {
2109 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
2115 if (node->child1().useKind() == Int32Use) {
2116 bool invert = false;
2118 if (taken == nextBlock()) {
2120 BasicBlock* tmp = taken;
2125 SpeculateInt32Operand value(this, node->child1());
2126 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
2128 SpeculateDoubleOperand value(this, node->child1());
2129 FPRTemporary scratch(this);
2130 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
2141 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
2142 GPRReg valueGPR = value.gpr();
2144 if (node->child1().useKind() == BooleanUse) {
2145 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
2146 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
2148 if (taken == nextBlock()) {
2149 condition = MacroAssembler::Zero;
2150 BasicBlock* tmp = taken;
2155 branchTest32(condition, valueGPR, TrustedImm32(true), taken);
2158 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2159 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2161 typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump());
2165 GPRTemporary result(this);
2166 GPRReg resultGPR = result.gpr();
2168 if (node->child1()->prediction() & SpecInt32) {
2169 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
2170 branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
2173 if (node->child1()->prediction() & SpecBoolean) {
2174 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2175 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2180 silentSpillAllRegisters(resultGPR);
2181 callOperation(dfgConvertJSValueToBoolean, resultGPR, valueGPR);
2182 silentFillAllRegisters(resultGPR);
2184 branchTest32(MacroAssembler::NonZero, resultGPR, taken);
2188 noResult(node, UseChildrenCalledExplicitly);
2193 RELEASE_ASSERT_NOT_REACHED();
2197 void SpeculativeJIT::compile(Node* node)
2199 NodeType op = node->op();
2201 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
2202 m_jit.clearRegisterAllocationOffsets();
2207 initConstantInfo(node);
2210 case PhantomArguments:
2211 initConstantInfo(node);
2214 case WeakJSConstant:
2215 m_jit.addWeakReference(node->weakConstant());
2216 initConstantInfo(node);
2220 // CSE should always eliminate this.
2221 RELEASE_ASSERT_NOT_REACHED();
2226 SpeculatedType prediction = node->variableAccessData()->prediction();
2227 AbstractValue& value = m_state.variables().operand(node->local());
2229 // If we have no prediction for this local, then don't attempt to compile.
2230 if (prediction == SpecNone) {
2231 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2235 // If the CFA is tracking this variable and it found that the variable
2236 // cannot have been assigned, then don't attempt to proceed.
2237 if (value.isClear()) {
2238 // FIXME: We should trap instead.
2239 // https://bugs.webkit.org/show_bug.cgi?id=110383
2240 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2244 switch (node->variableAccessData()->flushFormat()) {
2245 case FlushedDouble: {
2246 FPRTemporary result(this);
2247 m_jit.loadDouble(JITCompiler::addressFor(node->local()), result.fpr());
2248 VirtualRegister virtualRegister = node->virtualRegister();
2249 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
2250 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
2254 case FlushedInt32: {
2255 GPRTemporary result(this);
2256 m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr());
2258 // Like int32Result, but don't useChildren - our children are phi nodes,
2259 // and don't represent values within this dataflow with virtual registers.
2260 VirtualRegister virtualRegister = node->virtualRegister();
2261 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
2262 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
2266 case FlushedInt52: {
2267 GPRTemporary result(this);
2268 m_jit.load64(JITCompiler::addressFor(node->local()), result.gpr());
2270 VirtualRegister virtualRegister = node->virtualRegister();
2271 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2272 generationInfoFromVirtualRegister(virtualRegister).initInt52(node, node->refCount(), result.gpr());
2277 GPRTemporary result(this);
2278 m_jit.load64(JITCompiler::addressFor(node->local()), result.gpr());
2280 // Like jsValueResult, but don't useChildren - our children are phi nodes,
2281 // and don't represent values within this dataflow with virtual registers.
2282 VirtualRegister virtualRegister = node->virtualRegister();
2283 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2286 if (isCellSpeculation(value.m_type))
2287 format = DataFormatJSCell;
2288 else if (isBooleanSpeculation(value.m_type))
2289 format = DataFormatJSBoolean;
2291 format = DataFormatJS;
2293 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format);
2299 case GetLocalUnlinked: {
2300 GPRTemporary result(this);
2302 m_jit.load64(JITCompiler::addressFor(node->unlinkedLocal()), result.gpr());
2304 jsValueResult(result.gpr(), node);
2308 case MovHintAndCheck: {
2309 compileMovHintAndCheck(node);
2314 compileInlineStart(node);
2320 RELEASE_ASSERT_NOT_REACHED();
2325 // SetLocal doubles as a hint as to where a node will be stored and
2326 // as a speculation point. So before we speculate make sure that we
2327 // know where the child of this node needs to go in the virtual
2329 compileMovHint(node);
2331 switch (node->variableAccessData()->flushFormat()) {
2332 case FlushedDouble: {
2333 SpeculateDoubleOperand value(this, node->child1());
2334 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->local()));
2336 // Indicate that it's no longer necessary to retrieve the value of
2337 // this bytecode variable from registers or other locations in the stack,
2338 // but that it is stored as a double.
2339 recordSetLocal(node->local(), ValueSource(DoubleInJSStack));
2343 case FlushedInt32: {
2344 SpeculateInt32Operand value(this, node->child1());
2345 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->local()));
2347 recordSetLocal(node->local(), ValueSource(Int32InJSStack));
2351 case FlushedInt52: {
2352 SpeculateInt52Operand value(this, node->child1());
2353 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->local()));
2355 recordSetLocal(node->local(), ValueSource(Int52InJSStack));
2360 SpeculateCellOperand cell(this, node->child1());
2361 GPRReg cellGPR = cell.gpr();
2362 m_jit.store64(cellGPR, JITCompiler::addressFor(node->local()));
2364 recordSetLocal(node->local(), ValueSource(CellInJSStack));
2368 case FlushedBoolean: {
2369 SpeculateBooleanOperand boolean(this, node->child1());
2370 m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->local()));
2372 recordSetLocal(node->local(), ValueSource(BooleanInJSStack));
2376 case FlushedJSValue: {
2377 JSValueOperand value(this, node->child1());
2378 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->local()));
2381 recordSetLocal(node->local(), ValueSource(ValueInJSStack));
2383 // If we're storing an arguments object that has been optimized away,
2384 // our variable event stream for OSR exit now reflects the optimized
2385 // value (JSValue()). On the slow path, we want an arguments object
2386 // instead. We add an additional move hint to show OSR exit that it
2387 // needs to reconstruct the arguments object.
2388 if (node->child1()->op() == PhantomArguments)
2389 compileMovHint(node);
2394 RELEASE_ASSERT_NOT_REACHED();
2402 // This is a no-op; it just marks the fact that the argument is being used.
2403 // But it may be profitable to use this as a hook to run speculation checks
2404 // on arguments, thereby allowing us to trivially eliminate such checks if
2405 // the argument is not used.
2411 if (isInt32Constant(node->child1().node())) {
2412 SpeculateInt32Operand op2(this, node->child2());
2413 GPRTemporary result(this, Reuse, op2);
2415 bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr());
2417 int32Result(result.gpr(), node);
2418 } else if (isInt32Constant(node->child2().node())) {
2419 SpeculateInt32Operand op1(this, node->child1());
2420 GPRTemporary result(this, Reuse, op1);
2422 bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr());
2424 int32Result(result.gpr(), node);
2426 SpeculateInt32Operand op1(this, node->child1());
2427 SpeculateInt32Operand op2(this, node->child2());
2428 GPRTemporary result(this, Reuse, op1, op2);
2430 GPRReg reg1 = op1.gpr();
2431 GPRReg reg2 = op2.gpr();
2432 bitOp(op, reg1, reg2, result.gpr());
2434 int32Result(result.gpr(), node);
2441 if (isInt32Constant(node->child2().node())) {
2442 SpeculateInt32Operand op1(this, node->child1());
2443 GPRTemporary result(this, Reuse, op1);
2445 shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr());
2447 int32Result(result.gpr(), node);
2449 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2450 SpeculateInt32Operand op1(this, node->child1());
2451 SpeculateInt32Operand op2(this, node->child2());
2452 GPRTemporary result(this, Reuse, op1);
2454 GPRReg reg1 = op1.gpr();
2455 GPRReg reg2 = op2.gpr();
2456 shiftOp(op, reg1, reg2, result.gpr());
2458 int32Result(result.gpr(), node);
2462 case UInt32ToNumber: {
2463 compileUInt32ToNumber(node);
2467 case DoubleAsInt32: {
2468 compileDoubleAsInt32(node);
2472 case ValueToInt32: {
2473 compileValueToInt32(node);
2477 case Int32ToDouble: {
2478 compileInt32ToDouble(node);
2482 case Int52ToValue: {
2483 JSValueOperand operand(this, node->child1());
2484 GPRTemporary result(this, Reuse, operand);
2485 m_jit.move(operand.gpr(), result.gpr());
2486 jsValueResult(result.gpr(), node);
2490 case Int52ToDouble: {
2491 SpeculateDoubleOperand operand(this, node->child1());
2492 FPRTemporary result(this, operand);
2493 m_jit.moveDouble(operand.fpr(), result.fpr());
2494 doubleResult(result.fpr(), node);
2504 compileMakeRope(node);
2508 compileArithSub(node);
2512 compileArithNegate(node);
2516 compileArithMul(node);
2520 compileArithIMul(node);
2524 compileArithDiv(node);
2529 compileArithMod(node);
2534 switch (node->child1().useKind()) {
2536 SpeculateStrictInt32Operand op1(this, node->child1());
2537 GPRTemporary result(this);
2538 GPRTemporary scratch(this);
2540 m_jit.move(op1.gpr(), result.gpr());
2541 m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
2542 m_jit.add32(scratch.gpr(), result.gpr());
2543 m_jit.xor32(scratch.gpr(), result.gpr());
2544 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2545 int32Result(result.gpr(), node);
2550 SpeculateDoubleOperand op1(this, node->child1());
2551 FPRTemporary result(this);
2553 m_jit.absDouble(op1.fpr(), result.fpr());
2554 doubleResult(result.fpr(), node);
2559 RELEASE_ASSERT_NOT_REACHED();
2567 switch (node->binaryUseKind()) {
2569 SpeculateStrictInt32Operand op1(this, node->child1());
2570 SpeculateStrictInt32Operand op2(this, node->child2());
2571 GPRTemporary result(this, Reuse, op1);
2573 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
2574 m_jit.move(op2.gpr(), result.gpr());
2575 if (op1.gpr() != result.gpr()) {
2576 MacroAssembler::Jump done = m_jit.jump();
2577 op1Less.link(&m_jit);
2578 m_jit.move(op1.gpr(), result.gpr());
2581 op1Less.link(&m_jit);
2583 int32Result(result.gpr(), node);
2588 SpeculateDoubleOperand op1(this, node->child1());
2589 SpeculateDoubleOperand op2(this, node->child2());
2590 FPRTemporary result(this, op1);
2592 FPRReg op1FPR = op1.fpr();
2593 FPRReg op2FPR = op2.fpr();
2594 FPRReg resultFPR = result.fpr();
2596 MacroAssembler::JumpList done;
2598 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2600 // op2 is eather the lesser one or one of then is NaN
2601 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2603 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2604 // op1 + op2 and putting it into result.
2605 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2606 done.append(m_jit.jump());
2608 op2Less.link(&m_jit);
2609 m_jit.moveDouble(op2FPR, resultFPR);
2611 if (op1FPR != resultFPR) {
2612 done.append(m_jit.jump());
2614 op1Less.link(&m_jit);
2615 m_jit.moveDouble(op1FPR, resultFPR);
2617 op1Less.link(&m_jit);
2621 doubleResult(resultFPR, node);
2626 RELEASE_ASSERT_NOT_REACHED();
2633 SpeculateDoubleOperand op1(this, node->child1());
2634 FPRTemporary result(this, op1);
2636 m_jit.sqrtDouble(op1.fpr(), result.fpr());
2638 doubleResult(result.fpr(), node);
2643 compileLogicalNot(node);
2647 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2652 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2656 case CompareGreater:
2657 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2661 case CompareGreaterEq:
2662 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2666 case CompareEqConstant:
2667 ASSERT(isNullConstant(node->child2().node()));
2668 if (nonSpeculativeCompareNull(node, node->child1()))
2673 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2677 case CompareStrictEqConstant:
2678 if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node())))
2682 case CompareStrictEq:
2683 if (compileStrictEq(node))
2687 case StringCharCodeAt: {
2688 compileGetCharCodeAt(node);
2692 case StringCharAt: {
2693 // Relies on StringCharAt node having same basic layout as GetByVal
2694 compileGetByValOnString(node);
2698 case StringFromCharCode: {
2699 compileFromCharCode(node);
2709 case ArrayifyToStructure: {
2715 switch (node->arrayMode().type()) {
2716 case Array::SelectUsingPredictions:
2717 case Array::ForceExit:
2718 RELEASE_ASSERT_NOT_REACHED();
2719 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2721 case Array::Generic: {
2722 JSValueOperand base(this, node->child1());
2723 JSValueOperand property(this, node->child2());
2724 GPRReg baseGPR = base.gpr();
2725 GPRReg propertyGPR = property.gpr();
2728 GPRResult result(this);
2729 callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
2731 jsValueResult(result.gpr(), node);
2735 case Array::Contiguous: {
2736 if (node->arrayMode().isInBounds()) {
2737 SpeculateStrictInt32Operand property(this, node->child2());
2738 StorageOperand storage(this, node->child3());
2740 GPRReg propertyReg = property.gpr();
2741 GPRReg storageReg = storage.gpr();
2746 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2748 GPRTemporary result(this);
2749 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
2750 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2751 jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
2755 SpeculateCellOperand base(this, node->child1());
2756 SpeculateStrictInt32Operand property(this, node->child2());
2757 StorageOperand storage(this, node->child3());
2759 GPRReg baseReg = base.gpr();
2760 GPRReg propertyReg = property.gpr();
2761 GPRReg storageReg = storage.gpr();
2766 GPRTemporary result(this);
2767 GPRReg resultReg = result.gpr();
2769 MacroAssembler::JumpList slowCases;
2771 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2773 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2774 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2776 addSlowPathGenerator(
2778 slowCases, this, operationGetByValArrayInt,
2779 result.gpr(), baseReg, propertyReg));
2781 jsValueResult(resultReg, node);
2785 case Array::Double: {
2786 if (node->arrayMode().isInBounds()) {
2787 if (node->arrayMode().isSaneChain()) {
2788 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2789 ASSERT(globalObject->arrayPrototypeChainIsSane());
2791 speculationWatchpoint(),
2792 globalObject->arrayPrototype()->structure()->transitionWatchpointSet());
2794 speculationWatchpoint(),
2795 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2798 SpeculateStrictInt32Operand property(this, node->child2());
2799 StorageOperand storage(this, node->child3());
2801 GPRReg propertyReg = property.gpr();
2802 GPRReg storageReg = storage.gpr();
2807 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2809 FPRTemporary result(this);
2810 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2811 if (!node->arrayMode().isSaneChain())
2812 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2813 doubleResult(result.fpr(), node);
2817 SpeculateCellOperand base(this, node->child1());
2818 SpeculateStrictInt32Operand property(this, node->child2());
2819 StorageOperand storage(this, node->child3());
2821 GPRReg baseReg = base.gpr();
2822 GPRReg propertyReg = property.gpr();
2823 GPRReg storageReg = storage.gpr();
2828 GPRTemporary result(this);
2829 FPRTemporary temp(this);
2830 GPRReg resultReg = result.gpr();
2831 FPRReg tempReg = temp.fpr();
2833 MacroAssembler::JumpList slowCases;
2835 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2837 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2838 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2839 boxDouble(tempReg, resultReg);
2841 addSlowPathGenerator(
2843 slowCases, this, operationGetByValArrayInt,
2844 result.gpr(), baseReg, propertyReg));
2846 jsValueResult(resultReg, node);
2850 case Array::ArrayStorage:
2851 case Array::SlowPutArrayStorage: {
2852 if (node->arrayMode().isInBounds()) {
2853 SpeculateStrictInt32Operand property(this, node->child2());
2854 StorageOperand storage(this, node->child3());
2856 GPRReg propertyReg = property.gpr();
2857 GPRReg storageReg = storage.gpr();
2862 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2864 GPRTemporary result(this);
2865 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
2866 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2868 jsValueResult(result.gpr(), node);
2872 SpeculateCellOperand base(this, node->child1());
2873 SpeculateStrictInt32Operand property(this, node->child2());
2874 StorageOperand storage(this, node->child3());
2876 GPRReg baseReg = base.gpr();
2877 GPRReg propertyReg = property.gpr();
2878 GPRReg storageReg = storage.gpr();
2883 GPRTemporary result(this);
2884 GPRReg resultReg = result.gpr();
2886 MacroAssembler::JumpList slowCases;
2888 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2890 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
2891 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2893 addSlowPathGenerator(
2895 slowCases, this, operationGetByValArrayInt,
2896 result.gpr(), baseReg, propertyReg));
2898 jsValueResult(resultReg, node);
2902 compileGetByValOnString(node);
2904 case Array::Arguments:
2905 compileGetByValOnArguments(node);
2908 TypedArrayType type = node->arrayMode().typedArrayType();
2910 compileGetByValOnIntTypedArray(node, type);
2912 compileGetByValOnFloatTypedArray(node, type);
2918 case PutByValAlias: {
2919 Edge child1 = m_jit.graph().varArgChild(node, 0);
2920 Edge child2 = m_jit.graph().varArgChild(node, 1);
2921 Edge child3 = m_jit.graph().varArgChild(node, 2);
2922 Edge child4 = m_jit.graph().varArgChild(node, 3);
2924 ArrayMode arrayMode = node->arrayMode().modeForPut();
2925 bool alreadyHandled = false;
2927 switch (arrayMode.type()) {
2928 case Array::SelectUsingPredictions:
2929 case Array::ForceExit:
2930 RELEASE_ASSERT_NOT_REACHED();
2931 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2932 alreadyHandled = true;
2934 case Array::Generic: {
2935 RELEASE_ASSERT(node->op() == PutByVal);
2937 JSValueOperand arg1(this, child1);
2938 JSValueOperand arg2(this, child2);
2939 JSValueOperand arg3(this, child3);
2940 GPRReg arg1GPR = arg1.gpr();
2941 GPRReg arg2GPR = arg2.gpr();
2942 GPRReg arg3GPR = arg3.gpr();
2945 callOperation(m_jit.strictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
2948 alreadyHandled = true;
2958 // FIXME: the base may not be necessary for some array access modes. But we have to
2959 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2960 // no harm in locking it here.
2961 SpeculateCellOperand base(this, child1);
2962 SpeculateStrictInt32Operand property(this, child2);
2964 GPRReg baseReg = base.gpr();
2965 GPRReg propertyReg = property.gpr();
2967 switch (arrayMode.type()) {
2969 case Array::Contiguous: {
2970 JSValueOperand value(this, child3, ManualOperandSpeculation);
2972 GPRReg valueReg = value.gpr();
2977 if (arrayMode.type() == Array::Int32) {
2979 JSValueRegs(valueReg), child3, SpecInt32,
2981 MacroAssembler::Below, valueReg, GPRInfo::tagTypeNumberRegister));
2984 if (arrayMode.type() == Array::Contiguous && Heap::isWriteBarrierEnabled()) {
2985 GPRTemporary scratch(this);
2986 writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr());
2989 StorageOperand storage(this, child4);
2990 GPRReg storageReg = storage.gpr();
2992 if (node->op() == PutByValAlias) {
2993 // Store the value to the array.
2994 GPRReg propertyReg = property.gpr();
2995 GPRReg valueReg = value.gpr();
2996 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3002 GPRTemporary temporary;
3003 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3005 MacroAssembler::Jump slowCase;
3007 if (arrayMode.isInBounds()) {
3009 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
3010 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
3012 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3014 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
3016 if (!arrayMode.isOutOfBounds())
3017 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
3019 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3020 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3022 inBounds.link(&m_jit);
3025 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3032 if (arrayMode.isOutOfBounds()) {
3033 addSlowPathGenerator(
3036 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3037 NoResult, baseReg, propertyReg, valueReg));
3040 noResult(node, UseChildrenCalledExplicitly);
3044 case Array::Double: {
3045 compileDoublePutByVal(node, base, property);
3049 case Array::ArrayStorage:
3050 case Array::SlowPutArrayStorage: {
3051 JSValueOperand value(this, child3);
3053 GPRReg valueReg = value.gpr();
3058 if (Heap::isWriteBarrierEnabled()) {
3059 GPRTemporary scratch(this);
3060 writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr());
3063 StorageOperand storage(this, child4);
3064 GPRReg storageReg = storage.gpr();
3066 if (node->op() == PutByValAlias) {
3067 // Store the value to the array.
3068 GPRReg propertyReg = property.gpr();
3069 GPRReg valueReg = value.gpr();
3070 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3076 GPRTemporary temporary;
3077 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3079 MacroAssembler::JumpList slowCases;
3081 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
3082 if (!arrayMode.isOutOfBounds())
3083 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
3085 slowCases.append(beyondArrayBounds);
3087 // Check if we're writing to a hole; if so increment m_numValuesInVector.
3088 if (arrayMode.isInBounds()) {
3090 StoreToHole, JSValueRegs(), 0,
3091 m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
3093 MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3094 if (arrayMode.isSlowPut()) {
3095 // This is sort of strange. If we wanted to optimize this code path, we would invert
3096 // the above branch. But it's simply not worth it since this only happens if we're
3097 // already having a bad time.
3098 slowCases.append(m_jit.jump());
3100 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
3102 // If we're writing to a hole we might be growing the array;
3103 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3104 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3105 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3107 lengthDoesNotNeedUpdate.link(&m_jit);
3109 notHoleValue.link(&m_jit);
3112 // Store the value to the array.
3113 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3120 if (!slowCases.empty()) {
3121 addSlowPathGenerator(
3124 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3125 NoResult, baseReg, propertyReg, valueReg));
3128 noResult(node, UseChildrenCalledExplicitly);
3132 case Array::Arguments: {
3133 JSValueOperand value(this, child3);
3134 GPRTemporary scratch(this);
3135 GPRTemporary scratch2(this);
3137 GPRReg valueReg = value.gpr();
3138 GPRReg scratchReg = scratch.gpr();
3139 GPRReg scratch2Reg = scratch2.gpr();
3144 // Two really lame checks.
3146 Uncountable, JSValueSource(), 0,
3148 MacroAssembler::AboveOrEqual, propertyReg,
3149 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments))));
3151 Uncountable, JSValueSource(), 0,
3152 m_jit.branchTestPtr(
3153 MacroAssembler::NonZero,
3154 MacroAssembler::Address(
3155 baseReg, OBJECT_OFFSETOF(Arguments, m_slowArguments))));
3157 m_jit.move(propertyReg, scratch2Reg);
3158 m_jit.signExtend32ToPtr(scratch2Reg, scratch2Reg);
3160 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
3165 MacroAssembler::BaseIndex(
3166 scratchReg, scratch2Reg, MacroAssembler::TimesEight,
3167 CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)));
3174 TypedArrayType type = arrayMode.typedArrayType();
3176 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
3178 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
3185 if (compileRegExpExec(node))
3187 if (!node->adjustedRefCount()) {
3188 SpeculateCellOperand base(this, node->child1());
3189 SpeculateCellOperand argument(this, node->child2());
3190 GPRReg baseGPR = base.gpr();
3191 GPRReg argumentGPR = argument.gpr();
3194 GPRResult result(this);
3195 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
3197 // Must use jsValueResult because otherwise we screw up register
3198 // allocation, which thinks that this node has a result.
3199 jsValueResult(result.gpr(), node);
3203 SpeculateCellOperand base(this, node->child1());
3204 SpeculateCellOperand argument(this, node->child2());
3205 GPRReg baseGPR = base.gpr();
3206 GPRReg argumentGPR = argument.gpr();
3209 GPRResult result(this);
3210 callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR);
3212 jsValueResult(result.gpr(), node);
3217 SpeculateCellOperand base(this, node->child1());
3218 SpeculateCellOperand argument(this, node->child2());
3219 GPRReg baseGPR = base.gpr();
3220 GPRReg argumentGPR = argument.gpr();
3223 GPRResult result(this);
3224 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
3226 // If we add a DataFormatBool, we should use it here.
3227 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3228 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
3233 ASSERT(node->arrayMode().isJSArray());
3235 SpeculateCellOperand base(this, node->child1());
3236 GPRTemporary storageLength(this);
3238 GPRReg baseGPR = base.gpr();
3239 GPRReg storageLengthGPR = storageLength.gpr();
3241 StorageOperand storage(this, node->child3());
3242 GPRReg storageGPR = storage.gpr();
3244 switch (node->arrayMode().type()) {
3246 case Array::Contiguous: {
3247 JSValueOperand value(this, node->child2(), ManualOperandSpeculation);
3248 GPRReg valueGPR = value.gpr();
3250 if (node->arrayMode().type() == Array::Int32) {
3252 JSValueRegs(valueGPR), node->child2(), SpecInt32,
3254 MacroAssembler::Below, valueGPR, GPRInfo::tagTypeNumberRegister));
3257 if (node->arrayMode().type() != Array::Int32 && Heap::isWriteBarrierEnabled()) {
3258 GPRTemporary scratch(this);
3259 writeBarrier(baseGPR, valueGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR);
3262 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3263 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3264 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3265 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3266 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3267 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3269 addSlowPathGenerator(
3271 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
3272 valueGPR, baseGPR));
3274 jsValueResult(storageLengthGPR, node);
3278 case Array::Double: {
3279 SpeculateDoubleOperand value(this, node->child2());
3280 FPRReg valueFPR = value.fpr();
3283 JSValueRegs(), node->child2(), SpecFullRealNumber,
3284 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
3286 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3287 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3288 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3289 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3290 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3291 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3293 addSlowPathGenerator(
3295 slowPath, this, operationArrayPushDouble, NoResult, storageLengthGPR,
3296 valueFPR, baseGPR));
3298 jsValueResult(storageLengthGPR, node);
3302 case Array::ArrayStorage: {
3303 JSValueOperand value(this, node->child2());
3304 GPRReg valueGPR = value.gpr();
3306 if (Heap::isWriteBarrierEnabled()) {
3307 GPRTemporary scratch(this);
3308 writeBarrier(baseGPR, valueGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR);
3311 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3313 // Refuse to handle bizarre lengths.
3314 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
3316 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
3318 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3320 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3321 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3322 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3323 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3325 addSlowPathGenerator(
3327 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
3328 valueGPR, baseGPR));
3330 jsValueResult(storageLengthGPR, node);
3342 ASSERT(node->arrayMode().isJSArray());
3344 SpeculateCellOperand base(this, node->child1());
3345 StorageOperand storage(this, node->child2());
3346 GPRTemporary value(this);
3347 GPRTemporary storageLength(this);
3348 FPRTemporary temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
3350 GPRReg baseGPR = base.gpr();
3351 GPRReg storageGPR = storage.gpr();
3352 GPRReg valueGPR = value.gpr();
3353 GPRReg storageLengthGPR = storageLength.gpr();
3354 FPRReg tempFPR = temp.fpr();
3356 switch (node->arrayMode().type()) {
3359 case Array::Contiguous: {
3361 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3362 MacroAssembler::Jump undefinedCase =
3363 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3364 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3366 storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3367 MacroAssembler::Jump slowCase;
3368 if (node->arrayMode().type() == Array::Double) {
3370 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3372 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3373 // length and the new length.
3375 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3376 slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
3377 boxDouble(tempFPR, valueGPR);
3380 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3382 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3383 // length and the new length.
3385 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3386 slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
3389 addSlowPathGenerator(
3391 undefinedCase, this,
3392 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3393 addSlowPathGenerator(
3395 slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
3397 // We can't know for sure that the result is an int because of the slow paths. :-/
3398 jsValueResult(valueGPR, node);
3402 case Array::ArrayStorage: {
3403 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3405 JITCompiler::Jump undefinedCase =
3406 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3408 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3410 JITCompiler::JumpList slowCases;
3411 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
3413 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
3414 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
3416 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3418 m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3419 m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3421 addSlowPathGenerator(
3423 undefinedCase, this,
3424 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3426 addSlowPathGenerator(
3428 slowCases, this, operationArrayPop, valueGPR, baseGPR));
3430 jsValueResult(valueGPR, node);
3442 jump(node->takenBlock());
3456 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
3457 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
3458 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
3460 #if DFG_ENABLE(SUCCESS_STATS)
3461 static SamplingCounter counter("SpeculativeJIT");
3462 m_jit.emitCount(counter);
3465 // Return the result in returnValueGPR.
3466 JSValueOperand op1(this, node->child1());
3467 m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
3469 // Grab the return address.
3470 m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT1);
3471 // Restore our caller's "r".
3472 m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister);
3474 m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
3482 case ThrowReferenceError: {
3483 // We expect that throw statements are rare and are intended to exit the code block
3484 // anyway, so we just OSR back to the old JIT for now.
3485 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
3490 RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3491 JSValueOperand op1(this, node->child1());
3492 GPRTemporary result(this, Reuse, op1);
3494 GPRReg op1GPR = op1.gpr();
3495 GPRReg resultGPR = result.gpr();
3499 if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean)))
3500 m_jit.move(op1GPR, resultGPR);
3502 MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
3503 MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()));
3505 alreadyPrimitive.link(&m_jit);
3506 m_jit.move(op1GPR, resultGPR);
3508 addSlowPathGenerator(
3509 slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
3512 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3517 if (node->child1().useKind() == UntypedUse) {
3518 JSValueOperand op1(this, node->child1());
3519 GPRReg op1GPR = op1.gpr();
3521 GPRResult result(this);
3522 GPRReg resultGPR = result.gpr();
3526 JITCompiler::Jump done;
3527 if (node->child1()->prediction() & SpecString) {
3528 JITCompiler::Jump slowPath1 = m_jit.branchTest64(
3529 JITCompiler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
3530 JITCompiler::Jump slowPath2 = m_jit.branchPtr(
3531 JITCompiler::NotEqual,
3532 JITCompiler::Address(op1GPR, JSCell::structureOffset()),
3533 TrustedImmPtr(m_jit.vm()->stringStructure.get()));
3534 m_jit.move(op1GPR, resultGPR);
3535 done = m_jit.jump();
3536 slowPath1.link(&m_jit);
3537 slowPath2.link(&m_jit);
3539 callOperation(operationToString, resultGPR, op1GPR);
3542 cellResult(resultGPR, node);
3546 compileToStringOnCell(node);
3550 case NewStringObject: {
3551 compileNewStringObject(node);
3556 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin);
3557 if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) {
3559 speculationWatchpoint(),
3560 globalObject->havingABadTimeWatchpoint());
3562 Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
3563 RELEASE_ASSERT(structure->indexingType() == node->indexingType());
3565 hasUndecided(structure->indexingType())
3566 || hasInt32(structure->indexingType())
3567 || hasDouble(structure->indexingType())
3568 || hasContiguous(structure->indexingType()));
3570 unsigned numElements = node->numChildren();
3572 GPRTemporary result(this);
3573 GPRTemporary storage(this);
3575 GPRReg resultGPR = result.gpr();
3576 GPRReg storageGPR = storage.gpr();
3578 emitAllocateJSArray(resultGPR, structure, storageGPR, numElements);
3580 // At this point, one way or another, resultGPR and storageGPR have pointers to
3581 // the JSArray and the Butterfly, respectively.
3583 ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren());
3585 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
3586 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
3587 switch (node->indexingType()) {
3588 case ALL_BLANK_INDEXING_TYPES:
3589 case ALL_UNDECIDED_INDEXING_TYPES:
3592 case ALL_DOUBLE_INDEXING_TYPES: {
3593 SpeculateDoubleOperand operand(this, use);
3594 FPRReg opFPR = operand.fpr();
3596 JSValueRegs(), use, SpecFullRealNumber,
3598 MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
3599 m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
3602 case ALL_INT32_INDEXING_TYPES:
3603 case ALL_CONTIGUOUS_INDEXING_TYPES: {
3604 JSValueOperand operand(this, use, ManualOperandSpeculation);
3605 GPRReg opGPR = operand.gpr();
3606 if (hasInt32(node->indexingType())) {
3608 JSValueRegs(opGPR), use, SpecInt32,
3610 MacroAssembler::Below, opGPR, GPRInfo::tagTypeNumberRegister));
3612 m_jit.store64(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
3621 // Yuck, we should *really* have a way of also returning the storageGPR. But
3622 // that's the least of what's wrong with this code. We really shouldn't be
3623 // allocating the array after having computed - and probably spilled to the
3624 // stack - all of the things that will go into the array. The solution to that
3625 // bigger problem will also likely fix the redundancy in reloading the storage
3626 // pointer that we currently have.
3628 cellResult(resultGPR, node);
3632 if (!node->numChildren()) {
3634 GPRResult result(this);
3635 callOperation(operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
3636 cellResult(result.gpr(), node);
3640 size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();