2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Intel Corporation. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "DFGSpeculativeJIT.h"
32 #include "ArrayPrototype.h"
33 #include "CallFrameShuffler.h"
34 #include "DFGAbstractInterpreterInlines.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGOperations.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "DirectArguments.h"
39 #include "GetterSetter.h"
40 #include "HasOwnPropertyCache.h"
41 #include "HashMapImpl.h"
42 #include "JSEnvironmentRecord.h"
43 #include "JSPropertyNameEnumerator.h"
44 #include "ObjectPrototype.h"
45 #include "JSCInlines.h"
46 #include "SetupVarargsFrame.h"
47 #include "TypeProfilerLog.h"
50 namespace JSC { namespace DFG {
54 bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
56 // FIXME: For double we could fill with a FPR.
59 VirtualRegister virtualRegister = edge->virtualRegister();
60 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
62 switch (info.registerFormat()) {
63 case DataFormatNone: {
65 if (edge->hasConstant()) {
67 payloadGPR = allocate();
68 JSValue value = edge->asJSValue();
69 m_jit.move(Imm32(value.tag()), tagGPR);
70 m_jit.move(Imm32(value.payload()), payloadGPR);
71 m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
72 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
73 info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
75 DataFormat spillFormat = info.spillFormat();
76 ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
78 payloadGPR = allocate();
79 switch (spillFormat) {
81 m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR);
82 spillFormat = DataFormatJSInt32; // This will be used as the new register format.
85 m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR);
86 spillFormat = DataFormatJSCell; // This will be used as the new register format.
88 case DataFormatBoolean:
89 m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR);
90 spillFormat = DataFormatJSBoolean; // This will be used as the new register format.
93 m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR);
96 m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR);
97 m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
98 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
99 info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
105 case DataFormatInt32:
107 case DataFormatBoolean: {
108 GPRReg gpr = info.gpr();
109 // If the register has already been locked we need to take a copy.
110 if (m_gprs.isLocked(gpr)) {
111 payloadGPR = allocate();
112 m_jit.move(gpr, payloadGPR);
118 int32_t tag = JSValue::EmptyValueTag;
119 DataFormat fillFormat = DataFormatJS;
120 switch (info.registerFormat()) {
121 case DataFormatInt32:
122 tag = JSValue::Int32Tag;
123 fillFormat = DataFormatJSInt32;
126 tag = JSValue::CellTag;
127 fillFormat = DataFormatJSCell;
129 case DataFormatBoolean:
130 tag = JSValue::BooleanTag;
131 fillFormat = DataFormatJSBoolean;
134 RELEASE_ASSERT_NOT_REACHED();
137 m_jit.move(TrustedImm32(tag), tagGPR);
139 m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
140 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
141 info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat);
145 case DataFormatJSDouble:
147 case DataFormatJSInt32:
148 case DataFormatJSCell:
149 case DataFormatJSBoolean: {
150 tagGPR = info.tagGPR();
151 payloadGPR = info.payloadGPR();
153 m_gprs.lock(payloadGPR);
157 case DataFormatStorage:
158 case DataFormatDouble:
159 // this type currently never occurs
160 RELEASE_ASSERT_NOT_REACHED();
163 RELEASE_ASSERT_NOT_REACHED();
168 void SpeculativeJIT::cachedGetById(CodeOrigin origin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget , SpillRegistersMode mode, AccessType type)
170 cachedGetById(origin, base.tagGPR(), base.payloadGPR(), result.tagGPR(), result.payloadGPR(), identifierNumber, slowPathTarget, mode, type);
173 void SpeculativeJIT::cachedGetById(
174 CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
175 unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode, AccessType type)
177 // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
178 // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
179 // trip over one move instruction.
180 if (basePayloadGPR == resultTagGPR) {
181 RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR);
183 if (baseTagGPROrNone == resultPayloadGPR) {
184 m_jit.swap(basePayloadGPR, baseTagGPROrNone);
185 baseTagGPROrNone = resultTagGPR;
187 m_jit.move(basePayloadGPR, resultPayloadGPR);
188 basePayloadGPR = resultPayloadGPR;
191 RegisterSet usedRegisters = this->usedRegisters();
192 if (spillMode == DontSpill) {
193 // We've already flushed registers to the stack, we don't need to spill these.
194 usedRegisters.set(JSValueRegs(baseTagGPROrNone, basePayloadGPR), false);
195 usedRegisters.set(JSValueRegs(resultTagGPR, resultPayloadGPR), false);
198 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
199 JITGetByIdGenerator gen(
200 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
201 JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(resultTagGPR, resultPayloadGPR), type);
203 gen.generateFastPath(m_jit);
205 JITCompiler::JumpList slowCases;
206 if (slowPathTarget.isSet())
207 slowCases.append(slowPathTarget);
208 slowCases.append(gen.slowPathJump());
210 J_JITOperation_ESsiJI getByIdFunction;
211 if (type == AccessType::Get)
212 getByIdFunction = operationGetByIdOptimize;
214 getByIdFunction = operationTryGetByIdOptimize;
216 std::unique_ptr<SlowPathGenerator> slowPath;
217 if (baseTagGPROrNone == InvalidGPRReg) {
218 slowPath = slowPathCall(
219 slowCases, this, getByIdFunction,
220 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
221 static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
222 identifierUID(identifierNumber));
224 slowPath = slowPathCall(
225 slowCases, this, getByIdFunction,
226 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), JSValueRegs(baseTagGPROrNone, basePayloadGPR), identifierUID(identifierNumber));
229 m_jit.addGetById(gen, slowPath.get());
230 addSlowPathGenerator(WTFMove(slowPath));
233 void SpeculativeJIT::cachedGetByIdWithThis(
234 CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPR, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
235 unsigned identifierNumber, JITCompiler::JumpList slowPathTarget)
237 RegisterSet usedRegisters = this->usedRegisters();
239 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
240 JITGetByIdWithThisGenerator gen(
241 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
242 JSValueRegs(resultTagGPR, resultPayloadGPR), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), AccessType::GetWithThis);
244 gen.generateFastPath(m_jit);
246 JITCompiler::JumpList slowCases;
247 if (!slowPathTarget.empty())
248 slowCases.append(slowPathTarget);
249 slowCases.append(gen.slowPathJump());
251 std::unique_ptr<SlowPathGenerator> slowPath;
252 if (baseTagGPROrNone == InvalidGPRReg && thisTagGPR == InvalidGPRReg) {
253 slowPath = slowPathCall(
254 slowCases, this, operationGetByIdWithThisOptimize,
255 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
256 static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
257 static_cast<int32_t>(JSValue::CellTag), thisPayloadGPR,
258 identifierUID(identifierNumber));
260 ASSERT(baseTagGPROrNone != InvalidGPRReg);
261 ASSERT(thisTagGPR != InvalidGPRReg);
263 slowPath = slowPathCall(
264 slowCases, this, operationGetByIdWithThisOptimize,
265 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), identifierUID(identifierNumber));
268 m_jit.addGetByIdWithThis(gen, slowPath.get());
269 addSlowPathGenerator(WTFMove(slowPath));
272 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
274 RegisterSet usedRegisters = this->usedRegisters();
275 if (spillMode == DontSpill) {
276 // We've already flushed registers to the stack, we don't need to spill these.
277 usedRegisters.set(basePayloadGPR, false);
278 usedRegisters.set(JSValueRegs(valueTagGPR, valuePayloadGPR), false);
280 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
281 JITPutByIdGenerator gen(
282 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
283 JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
284 scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
286 gen.generateFastPath(m_jit);
288 JITCompiler::JumpList slowCases;
289 if (slowPathTarget.isSet())
290 slowCases.append(slowPathTarget);
291 slowCases.append(gen.slowPathJump());
293 auto slowPath = slowPathCall(
294 slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), JSValueRegs(valueTagGPR, valuePayloadGPR),
295 basePayloadGPR, identifierUID(identifierNumber));
297 m_jit.addPutById(gen, slowPath.get());
298 addSlowPathGenerator(WTFMove(slowPath));
301 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
303 JSValueOperand arg(this, operand, ManualOperandSpeculation);
304 GPRReg argTagGPR = arg.tagGPR();
305 GPRReg argPayloadGPR = arg.payloadGPR();
307 GPRTemporary resultPayload(this, Reuse, arg, PayloadWord);
308 GPRReg resultPayloadGPR = resultPayload.gpr();
310 JITCompiler::Jump notCell;
311 JITCompiler::Jump notMasqueradesAsUndefined;
312 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
313 if (!isKnownCell(operand.node()))
314 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
316 m_jit.move(TrustedImm32(0), resultPayloadGPR);
317 notMasqueradesAsUndefined = m_jit.jump();
319 GPRTemporary localGlobalObject(this);
320 GPRTemporary remoteGlobalObject(this);
322 if (!isKnownCell(operand.node()))
323 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
325 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
326 JITCompiler::NonZero,
327 JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
328 JITCompiler::TrustedImm32(MasqueradesAsUndefined));
330 m_jit.move(TrustedImm32(0), resultPayloadGPR);
331 notMasqueradesAsUndefined = m_jit.jump();
333 isMasqueradesAsUndefined.link(&m_jit);
334 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
335 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
336 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
337 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR);
338 m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
339 m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR);
342 if (!isKnownCell(operand.node())) {
343 JITCompiler::Jump done = m_jit.jump();
345 notCell.link(&m_jit);
346 // null or undefined?
347 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
348 m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR);
349 m_jit.compare32(JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);
354 notMasqueradesAsUndefined.link(&m_jit);
356 booleanResult(resultPayloadGPR, m_currentNode);
359 void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode)
361 BasicBlock* taken = branchNode->branchData()->taken.block;
362 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
365 if (taken == nextBlock()) {
367 BasicBlock* tmp = taken;
372 JSValueOperand arg(this, operand, ManualOperandSpeculation);
373 GPRReg argTagGPR = arg.tagGPR();
374 GPRReg argPayloadGPR = arg.payloadGPR();
376 GPRTemporary result(this, Reuse, arg, TagWord);
377 GPRReg resultGPR = result.gpr();
379 JITCompiler::Jump notCell;
381 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
382 if (!isKnownCell(operand.node()))
383 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
385 jump(invert ? taken : notTaken, ForceJump);
387 GPRTemporary localGlobalObject(this);
388 GPRTemporary remoteGlobalObject(this);
390 if (!isKnownCell(operand.node()))
391 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
393 branchTest8(JITCompiler::Zero,
394 JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
395 JITCompiler::TrustedImm32(MasqueradesAsUndefined),
396 invert ? taken : notTaken);
398 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
399 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
400 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
401 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR);
402 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
403 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
406 if (!isKnownCell(operand.node())) {
407 jump(notTaken, ForceJump);
409 notCell.link(&m_jit);
410 // null or undefined?
411 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
412 m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR);
413 branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
419 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
421 BasicBlock* taken = branchNode->branchData()->taken.block;
422 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
424 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
426 // The branch instruction will branch to the taken block.
427 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
428 if (taken == nextBlock()) {
429 cond = JITCompiler::invert(cond);
430 callResultCondition = JITCompiler::Zero;
431 BasicBlock* tmp = taken;
436 JSValueOperand arg1(this, node->child1());
437 JSValueOperand arg2(this, node->child2());
438 JSValueRegs arg1Regs = arg1.jsValueRegs();
439 JSValueRegs arg2Regs = arg2.jsValueRegs();
440 GPRReg arg1TagGPR = arg1.tagGPR();
441 GPRReg arg1PayloadGPR = arg1.payloadGPR();
442 GPRReg arg2TagGPR = arg2.tagGPR();
443 GPRReg arg2PayloadGPR = arg2.payloadGPR();
445 JITCompiler::JumpList slowPath;
447 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
448 GPRFlushedCallResult result(this);
449 GPRReg resultGPR = result.gpr();
455 callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
456 m_jit.exceptionCheck();
458 branchTest32(callResultCondition, resultGPR, taken);
460 GPRTemporary result(this);
461 GPRReg resultGPR = result.gpr();
466 if (!isKnownInteger(node->child1().node()))
467 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
468 if (!isKnownInteger(node->child2().node()))
469 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
471 branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
473 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
474 jump(notTaken, ForceJump);
476 slowPath.link(&m_jit);
478 silentSpillAllRegisters(resultGPR);
479 callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
480 m_jit.exceptionCheck();
481 silentFillAllRegisters(resultGPR);
483 branchTest32(callResultCondition, resultGPR, taken);
489 m_indexInBlock = m_block->size() - 1;
490 m_currentNode = branchNode;
493 template<typename JumpType>
494 class CompareAndBoxBooleanSlowPathGenerator
495 : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
497 CompareAndBoxBooleanSlowPathGenerator(
498 JumpType from, SpeculativeJIT* jit,
499 S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
500 GPRReg arg2Tag, GPRReg arg2Payload)
501 : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
502 from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
504 , m_arg1Payload(arg1Payload)
506 , m_arg2Payload(arg2Payload)
511 virtual void generateInternal(SpeculativeJIT* jit)
516 this->m_function, this->m_result, JSValueRegs(m_arg1Tag, m_arg1Payload), JSValueRegs(m_arg2Tag, m_arg2Payload)));
517 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
523 GPRReg m_arg1Payload;
525 GPRReg m_arg2Payload;
528 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
530 JSValueOperand arg1(this, node->child1());
531 JSValueOperand arg2(this, node->child2());
532 GPRReg arg1TagGPR = arg1.tagGPR();
533 GPRReg arg1PayloadGPR = arg1.payloadGPR();
534 GPRReg arg2TagGPR = arg2.tagGPR();
535 GPRReg arg2PayloadGPR = arg2.payloadGPR();
537 JITCompiler::JumpList slowPath;
539 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
540 GPRFlushedCallResult result(this);
541 GPRReg resultPayloadGPR = result.gpr();
547 callOperation(helperFunction, resultPayloadGPR, arg1.jsValueRegs(), arg2.jsValueRegs());
548 m_jit.exceptionCheck();
550 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
552 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
553 GPRReg resultPayloadGPR = resultPayload.gpr();
558 if (!isKnownInteger(node->child1().node()))
559 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
560 if (!isKnownInteger(node->child2().node()))
561 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
563 m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
565 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
566 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
567 slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
568 arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR));
571 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
575 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
577 BasicBlock* taken = branchNode->branchData()->taken.block;
578 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
580 // The branch instruction will branch to the taken block.
581 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
582 if (taken == nextBlock()) {
584 BasicBlock* tmp = taken;
589 JSValueOperand arg1(this, node->child1());
590 JSValueOperand arg2(this, node->child2());
591 GPRReg arg1PayloadGPR = arg1.payloadGPR();
592 GPRReg arg2PayloadGPR = arg2.payloadGPR();
593 JSValueRegs arg1Regs = arg1.jsValueRegs();
594 JSValueRegs arg2Regs = arg2.jsValueRegs();
596 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
597 GPRReg resultPayloadGPR = resultPayload.gpr();
602 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
603 // see if we get lucky: if the arguments are cells and they reference the same
604 // cell, then they must be strictly equal.
605 branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken);
607 silentSpillAllRegisters(resultPayloadGPR);
608 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1Regs, arg2Regs);
609 m_jit.exceptionCheck();
610 silentFillAllRegisters(resultPayloadGPR);
612 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
614 // FIXME: Add fast paths for twoCells, number etc.
616 silentSpillAllRegisters(resultPayloadGPR);
617 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1Regs, arg2Regs);
618 m_jit.exceptionCheck();
619 silentFillAllRegisters(resultPayloadGPR);
621 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
627 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
629 JSValueOperand arg1(this, node->child1());
630 JSValueOperand arg2(this, node->child2());
631 GPRReg arg1PayloadGPR = arg1.payloadGPR();
632 GPRReg arg2PayloadGPR = arg2.payloadGPR();
633 JSValueRegs arg1Regs = arg1.jsValueRegs();
634 JSValueRegs arg2Regs = arg2.jsValueRegs();
636 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
637 GPRReg resultPayloadGPR = resultPayload.gpr();
642 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
643 // see if we get lucky: if the arguments are cells and they reference the same
644 // cell, then they must be strictly equal.
645 // FIXME: this should flush registers instead of silent spill/fill.
646 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
648 m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
649 JITCompiler::Jump done = m_jit.jump();
651 notEqualCase.link(&m_jit);
653 silentSpillAllRegisters(resultPayloadGPR);
654 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1Regs, arg2Regs);
655 m_jit.exceptionCheck();
656 silentFillAllRegisters(resultPayloadGPR);
658 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
662 // FIXME: Add fast paths.
664 silentSpillAllRegisters(resultPayloadGPR);
665 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1Regs, arg2Regs);
666 silentFillAllRegisters(resultPayloadGPR);
667 m_jit.exceptionCheck();
669 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
672 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
675 void SpeculativeJIT::compileMiscStrictEq(Node* node)
677 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
678 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
679 GPRTemporary result(this);
681 if (node->child1().useKind() == MiscUse)
682 speculateMisc(node->child1(), op1.jsValueRegs());
683 if (node->child2().useKind() == MiscUse)
684 speculateMisc(node->child2(), op2.jsValueRegs());
686 m_jit.move(TrustedImm32(0), result.gpr());
687 JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
688 m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
689 notEqual.link(&m_jit);
690 booleanResult(result.gpr(), node);
693 void SpeculativeJIT::emitCall(Node* node)
695 CallLinkInfo::CallType callType;
696 bool isVarargs = false;
697 bool isForwardVarargs = false;
699 bool isDirect = false;
700 bool isEmulatedTail = false;
701 switch (node->op()) {
704 callType = CallLinkInfo::Call;
707 callType = CallLinkInfo::TailCall;
710 case TailCallInlinedCaller:
711 callType = CallLinkInfo::Call;
712 isEmulatedTail = true;
715 callType = CallLinkInfo::Construct;
718 callType = CallLinkInfo::CallVarargs;
721 case TailCallVarargs:
722 callType = CallLinkInfo::TailCallVarargs;
726 case TailCallVarargsInlinedCaller:
727 callType = CallLinkInfo::CallVarargs;
729 isEmulatedTail = true;
731 case ConstructVarargs:
732 callType = CallLinkInfo::ConstructVarargs;
735 case CallForwardVarargs:
736 callType = CallLinkInfo::CallVarargs;
737 isForwardVarargs = true;
739 case TailCallForwardVarargs:
740 callType = CallLinkInfo::TailCallVarargs;
742 isForwardVarargs = true;
744 case TailCallForwardVarargsInlinedCaller:
745 callType = CallLinkInfo::CallVarargs;
746 isEmulatedTail = true;
747 isForwardVarargs = true;
749 case ConstructForwardVarargs:
750 callType = CallLinkInfo::ConstructVarargs;
751 isForwardVarargs = true;
754 callType = CallLinkInfo::DirectCall;
757 case DirectConstruct:
758 callType = CallLinkInfo::DirectConstruct;
762 callType = CallLinkInfo::DirectTailCall;
766 case DirectTailCallInlinedCaller:
767 callType = CallLinkInfo::DirectCall;
768 isEmulatedTail = true;
772 DFG_CRASH(m_jit.graph(), node, "bad node type");
776 Edge calleeEdge = m_jit.graph().child(node, 0);
777 GPRReg calleeTagGPR = InvalidGPRReg;
778 GPRReg calleePayloadGPR = InvalidGPRReg;
779 CallFrameShuffleData shuffleData;
781 ExecutableBase* executable = nullptr;
782 FunctionExecutable* functionExecutable = nullptr;
784 executable = node->castOperand<ExecutableBase*>();
785 functionExecutable = jsDynamicCast<FunctionExecutable*>(*m_jit.vm(), executable);
788 unsigned numPassedArgs = 0;
789 unsigned numAllocatedArgs = 0;
791 // Gotta load the arguments somehow. Varargs is trickier.
792 if (isVarargs || isForwardVarargs) {
793 RELEASE_ASSERT(!isDirect);
794 CallVarargsData* data = node->callVarargsData();
796 unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
798 if (isForwardVarargs) {
807 scratchGPR1 = JITCompiler::selectScratchGPR();
808 scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
809 scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
811 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
812 JITCompiler::JumpList slowCase;
813 InlineCallFrame* inlineCallFrame;
815 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
817 inlineCallFrame = node->origin.semantic.inlineCallFrame;
818 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
819 emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
820 JITCompiler::Jump done = m_jit.jump();
821 slowCase.link(&m_jit);
822 callOperation(operationThrowStackOverflowForVarargs);
823 m_jit.exceptionCheck();
824 m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
827 GPRReg argumentsPayloadGPR;
828 GPRReg argumentsTagGPR;
833 auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
834 if (reservedGPR != InvalidGPRReg)
836 JSValueOperand arguments(this, node->child3());
837 argumentsTagGPR = arguments.tagGPR();
838 argumentsPayloadGPR = arguments.payloadGPR();
839 if (reservedGPR != InvalidGPRReg)
843 scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR);
844 scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR);
845 scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR);
848 loadArgumentsGPR(InvalidGPRReg);
850 DFG_ASSERT(m_jit.graph(), node, isFlushed());
852 // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is
854 callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, JSValueRegs(argumentsTagGPR, argumentsPayloadGPR), numUsedStackSlots, data->firstVarArgOffset);
855 m_jit.exceptionCheck();
857 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
858 // Reconstruct the arguments operand while preserving the callee frame.
859 loadArgumentsGPR(GPRInfo::returnValueGPR);
860 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
861 emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
862 m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
864 callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, JSValueRegs(argumentsTagGPR, argumentsPayloadGPR), data->firstVarArgOffset, GPRInfo::returnValueGPR);
865 m_jit.exceptionCheck();
866 m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, JITCompiler::stackPointerRegister);
869 DFG_ASSERT(m_jit.graph(), node, isFlushed());
871 // We don't need the arguments array anymore.
875 // Now set up the "this" argument.
876 JSValueOperand thisArgument(this, node->child2());
877 GPRReg thisArgumentTagGPR = thisArgument.tagGPR();
878 GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR();
881 m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0));
882 m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0));
884 // The call instruction's first child is either the function (normal call) or the
885 // receiver (method call). subsequent children are the arguments.
886 numPassedArgs = node->numChildren() - 1;
887 numAllocatedArgs = numPassedArgs;
889 if (functionExecutable) {
890 // Allocate more args if this would let us avoid arity checks. This is throttled by
891 // CallLinkInfo's limit. It's probably good to throttle it - if the callee wants a
892 // ginormous amount of argument space then it's better for them to do it so that when we
893 // make calls to other things, we don't waste space.
894 unsigned desiredNumAllocatedArgs = static_cast<unsigned>(functionExecutable->parameterCount()) + 1;
895 if (desiredNumAllocatedArgs <= Options::maximumDirectCallStackSize()) {
896 numAllocatedArgs = std::max(numAllocatedArgs, desiredNumAllocatedArgs);
898 // Whoever converts to DirectCall should do this adjustment. It's too late for us to
899 // do this adjustment now since we will have already emitted code that relied on the
900 // value of m_parameterSlots.
903 Graph::parameterSlotsForArgCount(numAllocatedArgs)
904 <= m_jit.graph().m_parameterSlots);
909 JSValueOperand callee(this, calleeEdge);
910 calleeTagGPR = callee.tagGPR();
911 calleePayloadGPR = callee.payloadGPR();
915 shuffleData.numLocals = m_jit.graph().frameRegisterCount();
916 shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR);
917 shuffleData.args.resize(numAllocatedArgs);
918 shuffleData.numPassedArgs = numPassedArgs;
920 for (unsigned i = 0; i < numPassedArgs; ++i) {
921 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
922 GenerationInfo& info = generationInfo(argEdge.node());
925 shuffleData.args[i] = info.recovery(argEdge->virtualRegister());
928 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
929 shuffleData.args[i] = ValueRecovery::constant(jsUndefined());
931 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(CallFrameSlot::argumentCount));
933 for (unsigned i = 0; i < numPassedArgs; i++) {
934 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
935 JSValueOperand arg(this, argEdge);
936 GPRReg argTagGPR = arg.tagGPR();
937 GPRReg argPayloadGPR = arg.payloadGPR();
940 m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
941 m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
944 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
945 m_jit.storeTrustedValue(jsUndefined(), JITCompiler::calleeArgumentSlot(i));
949 if (!isTail || isVarargs || isForwardVarargs) {
950 JSValueOperand callee(this, calleeEdge);
951 calleeTagGPR = callee.tagGPR();
952 calleePayloadGPR = callee.payloadGPR();
954 m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(CallFrameSlot::callee));
955 m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(CallFrameSlot::callee));
961 JITCompiler::DataLabelPtr targetToCheck;
962 JITCompiler::JumpList slowPath;
964 CodeOrigin staticOrigin = node->origin.semantic;
965 ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls());
966 ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()));
967 CodeOrigin dynamicOrigin =
968 isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin;
969 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size());
971 CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
972 info->setUpCall(callType, node->origin.semantic, calleePayloadGPR);
974 auto setResultAndResetStack = [&] () {
975 GPRFlushedCallResult resultPayload(this);
976 GPRFlushedCallResult2 resultTag(this);
977 GPRReg resultPayloadGPR = resultPayload.gpr();
978 GPRReg resultTagGPR = resultTag.gpr();
980 m_jit.setupResults(resultPayloadGPR, resultTagGPR);
982 jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
983 // After the calls are done, we need to reestablish our stack
984 // pointer. We rely on this for varargs calls, calls with arity
985 // mismatch (the callframe is slided) and tail calls.
986 m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
989 if (node->op() == CallEval) {
990 // We want to call operationCallEval but we don't want to overwrite the parameter area in
991 // which we have created a prototypical eval call frame. This means that we have to
992 // subtract stack to make room for the call. Lucky for us, at this point we have the whole
993 // register file to ourselves.
995 m_jit.emitStoreCallSiteIndex(callSite);
996 m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), JITCompiler::stackPointerRegister, GPRInfo::regT0);
997 m_jit.storePtr(GPRInfo::callFrameRegister, JITCompiler::Address(GPRInfo::regT0, CallFrame::callerFrameOffset()));
999 // Now we need to make room for:
1000 // - The caller frame and PC of a call to operationCallEval.
1001 // - Potentially two arguments on the stack.
1002 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
1003 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
1004 m_jit.subPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
1005 m_jit.setupArgumentsWithExecState(GPRInfo::regT0);
1006 prepareForExternalCall();
1007 m_jit.appendCall(operationCallEval);
1008 m_jit.exceptionCheck();
1009 JITCompiler::Jump done = m_jit.branch32(JITCompiler::NotEqual, GPRInfo::returnValueGPR2, TrustedImm32(JSValue::EmptyValueTag));
1011 // This is the part where we meant to make a normal call. Oops.
1012 m_jit.addPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
1013 m_jit.load32(JITCompiler::calleeFrameSlot(CallFrameSlot::callee).withOffset(PayloadOffset), GPRInfo::regT0);
1014 m_jit.load32(JITCompiler::calleeFrameSlot(CallFrameSlot::callee).withOffset(TagOffset), GPRInfo::regT1);
1015 m_jit.emitDumbVirtualCall(info);
1018 setResultAndResetStack();
1023 info->setExecutableDuringCompilation(executable);
1024 info->setMaxNumArguments(numAllocatedArgs);
1027 RELEASE_ASSERT(node->op() == DirectTailCall);
1029 JITCompiler::PatchableJump patchableJump = m_jit.patchableJump();
1030 JITCompiler::Label mainPath = m_jit.label();
1032 m_jit.emitStoreCallSiteIndex(callSite);
1034 info->setFrameShuffleData(shuffleData);
1035 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
1037 JITCompiler::Call call = m_jit.nearTailCall();
1039 JITCompiler::Label slowPath = m_jit.label();
1040 patchableJump.m_jump.linkTo(slowPath, &m_jit);
1042 silentSpillAllRegisters(InvalidGPRReg);
1043 callOperation(operationLinkDirectCall, info, calleePayloadGPR);
1044 silentFillAllRegisters(InvalidGPRReg);
1045 m_jit.exceptionCheck();
1046 m_jit.jump().linkTo(mainPath, &m_jit);
1050 m_jit.addJSDirectTailCall(patchableJump, call, slowPath, info);
1054 JITCompiler::Label mainPath = m_jit.label();
1056 m_jit.emitStoreCallSiteIndex(callSite);
1058 JITCompiler::Call call = m_jit.nearCall();
1059 JITCompiler::Jump done = m_jit.jump();
1061 JITCompiler::Label slowPath = m_jit.label();
1063 m_jit.pop(JITCompiler::selectScratchGPR(calleePayloadGPR));
1065 callOperation(operationLinkDirectCall, info, calleePayloadGPR);
1066 m_jit.exceptionCheck();
1067 m_jit.jump().linkTo(mainPath, &m_jit);
1071 setResultAndResetStack();
1073 m_jit.addJSDirectCall(call, slowPath, info);
1077 m_jit.emitStoreCallSiteIndex(callSite);
1079 slowPath.append(m_jit.branchIfNotCell(JSValueRegs(calleeTagGPR, calleePayloadGPR)));
1080 slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
1083 if (node->op() == TailCall) {
1084 info->setFrameShuffleData(shuffleData);
1085 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
1087 m_jit.emitRestoreCalleeSaves();
1088 m_jit.prepareForTailCallSlow();
1092 JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
1094 JITCompiler::Jump done = m_jit.jump();
1096 slowPath.link(&m_jit);
1098 if (node->op() == TailCall) {
1099 CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
1100 callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(
1101 GPRInfo::regT1, GPRInfo::regT0));
1102 callFrameShuffler.prepareForSlowPath();
1104 // Callee payload needs to be in regT0, tag in regT1
1105 if (calleeTagGPR == GPRInfo::regT0) {
1106 if (calleePayloadGPR == GPRInfo::regT1)
1107 m_jit.swap(GPRInfo::regT1, GPRInfo::regT0);
1109 m_jit.move(calleeTagGPR, GPRInfo::regT1);
1110 m_jit.move(calleePayloadGPR, GPRInfo::regT0);
1113 m_jit.move(calleePayloadGPR, GPRInfo::regT0);
1114 m_jit.move(calleeTagGPR, GPRInfo::regT1);
1118 m_jit.emitRestoreCalleeSaves();
1121 m_jit.move(TrustedImmPtr(info), GPRInfo::regT2);
1122 JITCompiler::Call slowCall = m_jit.nearCall();
1127 m_jit.abortWithReason(JITDidReturnFromTailCall);
1129 setResultAndResetStack();
1131 m_jit.addJSCall(fastCall, slowCall, targetToCheck, info);
1134 template<bool strict>
1135 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
1137 AbstractValue& value = m_state.forNode(edge);
1138 SpeculatedType type = value.m_type;
1139 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32Only));
1141 m_interpreter.filter(value, SpecInt32Only);
1142 if (value.isClear()) {
1143 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1144 returnFormat = DataFormatInt32;
1148 VirtualRegister virtualRegister = edge->virtualRegister();
1149 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1151 switch (info.registerFormat()) {
1152 case DataFormatNone: {
1153 if (edge->hasConstant()) {
1154 ASSERT(edge->isInt32Constant());
1155 GPRReg gpr = allocate();
1156 m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
1157 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1158 info.fillInt32(*m_stream, gpr);
1159 returnFormat = DataFormatInt32;
1163 DataFormat spillFormat = info.spillFormat();
1165 ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
1167 // If we know this was spilled as an integer we can fill without checking.
1168 if (type & ~SpecInt32Only)
1169 speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1171 GPRReg gpr = allocate();
1172 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
1173 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1174 info.fillInt32(*m_stream, gpr);
1175 returnFormat = DataFormatInt32;
1179 case DataFormatJSInt32:
1180 case DataFormatJS: {
1181 // Check the value is an integer.
1182 GPRReg tagGPR = info.tagGPR();
1183 GPRReg payloadGPR = info.payloadGPR();
1184 m_gprs.lock(tagGPR);
1185 m_gprs.lock(payloadGPR);
1186 if (type & ~SpecInt32Only)
1187 speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
1188 m_gprs.unlock(tagGPR);
1189 m_gprs.release(tagGPR);
1190 m_gprs.release(payloadGPR);
1191 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
1192 info.fillInt32(*m_stream, payloadGPR);
1193 // If !strict we're done, return.
1194 returnFormat = DataFormatInt32;
1198 case DataFormatInt32: {
1199 GPRReg gpr = info.gpr();
1201 returnFormat = DataFormatInt32;
1205 case DataFormatCell:
1206 case DataFormatBoolean:
1207 case DataFormatJSDouble:
1208 case DataFormatJSCell:
1209 case DataFormatJSBoolean:
1210 case DataFormatDouble:
1211 case DataFormatStorage:
1213 RELEASE_ASSERT_NOT_REACHED();
1214 return InvalidGPRReg;
1218 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
1220 return fillSpeculateInt32Internal<false>(edge, returnFormat);
1223 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
1225 DataFormat mustBeDataFormatInt32;
1226 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
1227 ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
1231 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1233 ASSERT(isDouble(edge.useKind()));
1234 ASSERT(edge->hasDoubleResult());
1235 VirtualRegister virtualRegister = edge->virtualRegister();
1236 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1238 if (info.registerFormat() == DataFormatNone) {
1240 if (edge->hasConstant()) {
1241 RELEASE_ASSERT(edge->isNumberConstant());
1242 FPRReg fpr = fprAllocate();
1243 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr);
1244 m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
1245 info.fillDouble(*m_stream, fpr);
1249 RELEASE_ASSERT(info.spillFormat() == DataFormatDouble);
1250 FPRReg fpr = fprAllocate();
1251 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1252 m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
1253 info.fillDouble(*m_stream, fpr);
1257 RELEASE_ASSERT(info.registerFormat() == DataFormatDouble);
1258 FPRReg fpr = info.fpr();
1263 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1265 AbstractValue& value = m_state.forNode(edge);
1266 SpeculatedType type = value.m_type;
1267 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1269 m_interpreter.filter(value, SpecCell);
1270 if (value.isClear()) {
1271 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1275 VirtualRegister virtualRegister = edge->virtualRegister();
1276 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1278 switch (info.registerFormat()) {
1279 case DataFormatNone: {
1280 if (edge->hasConstant()) {
1281 GPRReg gpr = allocate();
1282 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1283 m_jit.move(TrustedImmPtr(edge->constant()), gpr);
1284 info.fillCell(*m_stream, gpr);
1288 ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
1289 if (type & ~SpecCell) {
1292 JSValueSource(JITCompiler::addressFor(virtualRegister)),
1295 MacroAssembler::NotEqual,
1296 JITCompiler::tagFor(virtualRegister),
1297 TrustedImm32(JSValue::CellTag)));
1299 GPRReg gpr = allocate();
1300 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
1301 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1302 info.fillCell(*m_stream, gpr);
1306 case DataFormatCell: {
1307 GPRReg gpr = info.gpr();
1312 case DataFormatJSCell:
1313 case DataFormatJS: {
1314 GPRReg tagGPR = info.tagGPR();
1315 GPRReg payloadGPR = info.payloadGPR();
1316 m_gprs.lock(tagGPR);
1317 m_gprs.lock(payloadGPR);
1318 if (type & ~SpecCell) {
1320 BadType, JSValueRegs(tagGPR, payloadGPR), edge,
1321 m_jit.branchIfNotCell(info.jsValueRegs()));
1323 m_gprs.unlock(tagGPR);
1324 m_gprs.release(tagGPR);
1325 m_gprs.release(payloadGPR);
1326 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell);
1327 info.fillCell(*m_stream, payloadGPR);
1331 case DataFormatJSInt32:
1332 case DataFormatInt32:
1333 case DataFormatJSDouble:
1334 case DataFormatJSBoolean:
1335 case DataFormatBoolean:
1336 case DataFormatDouble:
1337 case DataFormatStorage:
1338 RELEASE_ASSERT_NOT_REACHED();
1341 RELEASE_ASSERT_NOT_REACHED();
1342 return InvalidGPRReg;
1346 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1348 AbstractValue& value = m_state.forNode(edge);
1349 SpeculatedType type = value.m_type;
1350 ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
1352 m_interpreter.filter(value, SpecBoolean);
1353 if (value.isClear()) {
1354 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1358 VirtualRegister virtualRegister = edge->virtualRegister();
1359 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1361 switch (info.registerFormat()) {
1362 case DataFormatNone: {
1363 if (edge->hasConstant()) {
1364 JSValue jsValue = edge->asJSValue();
1365 GPRReg gpr = allocate();
1366 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1367 m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
1368 info.fillBoolean(*m_stream, gpr);
1372 ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean);
1374 if (type & ~SpecBoolean)
1375 speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1377 GPRReg gpr = allocate();
1378 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
1379 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1380 info.fillBoolean(*m_stream, gpr);
1384 case DataFormatBoolean: {
1385 GPRReg gpr = info.gpr();
1390 case DataFormatJSBoolean:
1391 case DataFormatJS: {
1392 GPRReg tagGPR = info.tagGPR();
1393 GPRReg payloadGPR = info.payloadGPR();
1394 m_gprs.lock(tagGPR);
1395 m_gprs.lock(payloadGPR);
1396 if (type & ~SpecBoolean)
1397 speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
1399 m_gprs.unlock(tagGPR);
1400 m_gprs.release(tagGPR);
1401 m_gprs.release(payloadGPR);
1402 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean);
1403 info.fillBoolean(*m_stream, payloadGPR);
1407 case DataFormatJSInt32:
1408 case DataFormatInt32:
1409 case DataFormatJSDouble:
1410 case DataFormatJSCell:
1411 case DataFormatCell:
1412 case DataFormatDouble:
1413 case DataFormatStorage:
1414 RELEASE_ASSERT_NOT_REACHED();
1417 RELEASE_ASSERT_NOT_REACHED();
1418 return InvalidGPRReg;
1422 void SpeculativeJIT::compileObjectEquality(Node* node)
1424 SpeculateCellOperand op1(this, node->child1());
1425 SpeculateCellOperand op2(this, node->child2());
1426 GPRReg op1GPR = op1.gpr();
1427 GPRReg op2GPR = op2.gpr();
1429 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1431 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1433 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1436 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1437 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1439 MacroAssembler::NonZero,
1440 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1441 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1444 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1445 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1447 MacroAssembler::NonZero,
1448 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1449 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1452 GPRTemporary resultPayload(this, Reuse, op2);
1453 GPRReg resultPayloadGPR = resultPayload.gpr();
1455 MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
1456 m_jit.move(TrustedImm32(1), resultPayloadGPR);
1457 MacroAssembler::Jump done = m_jit.jump();
1458 falseCase.link(&m_jit);
1459 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1462 booleanResult(resultPayloadGPR, node);
1465 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
1467 SpeculateCellOperand op1(this, objectChild);
1468 JSValueOperand op2(this, otherChild);
1470 GPRReg op1GPR = op1.gpr();
1471 GPRReg op2GPR = op2.payloadGPR();
1473 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1475 GPRTemporary resultPayload(this, Reuse, op1);
1476 GPRReg resultPayloadGPR = resultPayload.gpr();
1478 MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs());
1480 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1481 MacroAssembler::Jump op2NotCellJump = m_jit.jump();
1483 // At this point we know that we can perform a straight-forward equality comparison on pointer
1484 // values because we are doing strict equality.
1485 op2CellJump.link(&m_jit);
1486 m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR);
1488 op2NotCellJump.link(&m_jit);
1489 booleanResult(resultPayloadGPR, m_currentNode);
1492 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
1494 BasicBlock* taken = branchNode->branchData()->taken.block;
1495 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1497 SpeculateCellOperand op1(this, objectChild);
1498 JSValueOperand op2(this, otherChild);
1500 GPRReg op1GPR = op1.gpr();
1501 GPRReg op2GPR = op2.payloadGPR();
1503 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1505 branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken);
1507 if (taken == nextBlock()) {
1508 branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
1511 branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1516 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1518 SpeculateCellOperand op1(this, leftChild);
1519 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1520 GPRTemporary result(this);
1522 GPRReg op1GPR = op1.gpr();
1523 GPRReg op2TagGPR = op2.tagGPR();
1524 GPRReg op2PayloadGPR = op2.payloadGPR();
1525 GPRReg resultGPR = result.gpr();
1527 bool masqueradesAsUndefinedWatchpointValid =
1528 masqueradesAsUndefinedWatchpointIsStillValid();
1530 if (masqueradesAsUndefinedWatchpointValid) {
1532 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1535 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1536 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1538 MacroAssembler::NonZero,
1539 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1540 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1544 // It seems that most of the time when programs do a == b where b may be either null/undefined
1545 // or an object, b is usually an object. Balance the branches to make that case fast.
1546 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
1548 // We know that within this branch, rightChild must be a cell.
1549 if (masqueradesAsUndefinedWatchpointValid) {
1551 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
1554 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
1555 speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
1557 MacroAssembler::NonZero,
1558 MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
1559 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1562 // At this point we know that we can perform a straight-forward equality comparison on pointer
1563 // values because both left and right are pointers to objects that have no special equality
1565 MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2PayloadGPR);
1566 MacroAssembler::Jump trueCase = m_jit.jump();
1568 rightNotCell.link(&m_jit);
1570 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1571 // prove that it is either null or undefined.
1572 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1573 m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
1576 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
1578 MacroAssembler::NotEqual, resultGPR,
1579 MacroAssembler::TrustedImm32(JSValue::NullTag)));
1582 falseCase.link(&m_jit);
1583 m_jit.move(TrustedImm32(0), resultGPR);
1584 MacroAssembler::Jump done = m_jit.jump();
1585 trueCase.link(&m_jit);
1586 m_jit.move(TrustedImm32(1), resultGPR);
1589 booleanResult(resultGPR, m_currentNode);
1592 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1594 BasicBlock* taken = branchNode->branchData()->taken.block;
1595 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1597 SpeculateCellOperand op1(this, leftChild);
1598 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1599 GPRTemporary result(this);
1601 GPRReg op1GPR = op1.gpr();
1602 GPRReg op2TagGPR = op2.tagGPR();
1603 GPRReg op2PayloadGPR = op2.payloadGPR();
1604 GPRReg resultGPR = result.gpr();
1606 bool masqueradesAsUndefinedWatchpointValid =
1607 masqueradesAsUndefinedWatchpointIsStillValid();
1609 if (masqueradesAsUndefinedWatchpointValid) {
1611 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1614 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1615 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1617 MacroAssembler::NonZero,
1618 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1619 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1622 // It seems that most of the time when programs do a == b where b may be either null/undefined
1623 // or an object, b is usually an object. Balance the branches to make that case fast.
1624 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
1626 // We know that within this branch, rightChild must be a cell.
1627 if (masqueradesAsUndefinedWatchpointValid) {
1629 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
1630 m_jit.branchIfNotObject(op2PayloadGPR));
1633 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
1634 m_jit.branchIfNotObject(op2PayloadGPR));
1635 speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
1637 MacroAssembler::NonZero,
1638 MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
1639 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1642 // At this point we know that we can perform a straight-forward equality comparison on pointer
1643 // values because both left and right are pointers to objects that have no special equality
1645 branch32(MacroAssembler::Equal, op1GPR, op2PayloadGPR, taken);
1647 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1648 // prove that it is either null or undefined.
1649 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1650 rightNotCell.link(&m_jit);
1652 jump(notTaken, ForceJump);
1654 rightNotCell.link(&m_jit);
1655 m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
1658 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
1660 MacroAssembler::NotEqual, resultGPR,
1661 MacroAssembler::TrustedImm32(JSValue::NullTag)));
1667 void SpeculativeJIT::compileSymbolUntypedEquality(Node* node, Edge symbolEdge, Edge untypedEdge)
1669 SpeculateCellOperand symbol(this, symbolEdge);
1670 JSValueOperand untyped(this, untypedEdge);
1672 GPRReg symbolGPR = symbol.gpr();
1673 GPRReg untypedGPR = untyped.payloadGPR();
1675 speculateSymbol(symbolEdge, symbolGPR);
1677 GPRTemporary resultPayload(this, Reuse, symbol);
1678 GPRReg resultPayloadGPR = resultPayload.gpr();
1680 MacroAssembler::Jump untypedCellJump = m_jit.branchIfCell(untyped.jsValueRegs());
1682 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1683 MacroAssembler::Jump untypedNotCellJump = m_jit.jump();
1685 // At this point we know that we can perform a straight-forward equality comparison on pointer
1686 // values because we are doing strict equality.
1687 untypedCellJump.link(&m_jit);
1688 m_jit.compare32(MacroAssembler::Equal, symbolGPR, untypedGPR, resultPayloadGPR);
1690 untypedNotCellJump.link(&m_jit);
1691 booleanResult(resultPayloadGPR, node);
1694 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1696 SpeculateInt32Operand op1(this, node->child1());
1697 SpeculateInt32Operand op2(this, node->child2());
1698 GPRTemporary resultPayload(this);
1700 m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr());
1702 // If we add a DataFormatBool, we should use it here.
1703 booleanResult(resultPayload.gpr(), node);
1706 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1708 SpeculateDoubleOperand op1(this, node->child1());
1709 SpeculateDoubleOperand op2(this, node->child2());
1710 GPRTemporary resultPayload(this);
1712 m_jit.move(TrustedImm32(1), resultPayload.gpr());
1713 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1714 m_jit.move(TrustedImm32(0), resultPayload.gpr());
1715 trueCase.link(&m_jit);
1717 booleanResult(resultPayload.gpr(), node);
1720 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1722 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1723 GPRTemporary resultPayload(this);
1724 GPRReg valueTagGPR = value.tagGPR();
1725 GPRReg valuePayloadGPR = value.payloadGPR();
1726 GPRReg resultPayloadGPR = resultPayload.gpr();
1727 GPRTemporary structure;
1728 GPRReg structureGPR = InvalidGPRReg;
1730 bool masqueradesAsUndefinedWatchpointValid =
1731 masqueradesAsUndefinedWatchpointIsStillValid();
1733 if (!masqueradesAsUndefinedWatchpointValid) {
1734 // The masquerades as undefined case will use the structure register, so allocate it here.
1735 // Do this at the top of the function to avoid branching around a register allocation.
1736 GPRTemporary realStructure(this);
1737 structure.adopt(realStructure);
1738 structureGPR = structure.gpr();
1741 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
1742 if (masqueradesAsUndefinedWatchpointValid) {
1744 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1745 m_jit.branchIfNotObject(valuePayloadGPR));
1748 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1749 m_jit.branchIfNotObject(valuePayloadGPR));
1751 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1753 MacroAssembler::Zero,
1754 MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
1755 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1757 m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR);
1758 speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
1760 MacroAssembler::Equal,
1761 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1762 TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1764 isNotMasqueradesAsUndefined.link(&m_jit);
1766 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1767 MacroAssembler::Jump done = m_jit.jump();
1769 notCell.link(&m_jit);
1771 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
1772 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1773 m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR);
1775 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
1777 MacroAssembler::NotEqual,
1779 TrustedImm32(JSValue::NullTag)));
1781 m_jit.move(TrustedImm32(1), resultPayloadGPR);
1785 booleanResult(resultPayloadGPR, m_currentNode);
1788 void SpeculativeJIT::compileLogicalNot(Node* node)
1790 switch (node->child1().useKind()) {
1792 case KnownBooleanUse: {
1793 SpeculateBooleanOperand value(this, node->child1());
1794 GPRTemporary result(this, Reuse, value);
1795 m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
1796 booleanResult(result.gpr(), node);
1800 case ObjectOrOtherUse: {
1801 compileObjectOrOtherLogicalNot(node->child1());
1806 SpeculateInt32Operand value(this, node->child1());
1807 GPRTemporary resultPayload(this, Reuse, value);
1808 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr());
1809 booleanResult(resultPayload.gpr(), node);
1813 case DoubleRepUse: {
1814 SpeculateDoubleOperand value(this, node->child1());
1815 FPRTemporary scratch(this);
1816 GPRTemporary resultPayload(this);
1817 m_jit.move(TrustedImm32(0), resultPayload.gpr());
1818 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1819 m_jit.move(TrustedImm32(1), resultPayload.gpr());
1820 nonZero.link(&m_jit);
1821 booleanResult(resultPayload.gpr(), node);
1826 JSValueOperand arg1(this, node->child1());
1827 GPRTemporary result(this);
1828 GPRTemporary temp(this);
1829 FPRTemporary valueFPR(this);
1830 FPRTemporary tempFPR(this);
1832 GPRReg resultGPR = result.gpr();
1834 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
1835 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
1836 bool negateResult = true;
1837 m_jit.emitConvertValueToBoolean(arg1.jsValueRegs(), resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
1838 booleanResult(resultGPR, node);
1842 return compileStringZeroLength(node);
1844 case StringOrOtherUse:
1845 return compileLogicalNotStringOrOther(node);
1848 RELEASE_ASSERT_NOT_REACHED();
1853 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
1855 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1856 GPRTemporary scratch(this);
1857 GPRReg valueTagGPR = value.tagGPR();
1858 GPRReg valuePayloadGPR = value.payloadGPR();
1859 GPRReg scratchGPR = scratch.gpr();
1861 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
1862 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1864 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1865 m_jit.branchIfNotObject(valuePayloadGPR));
1868 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1869 m_jit.branchIfNotObject(valuePayloadGPR));
1871 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
1873 MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
1874 TrustedImm32(MasqueradesAsUndefined));
1876 m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR);
1877 speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
1879 MacroAssembler::Equal,
1880 MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
1881 TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1883 isNotMasqueradesAsUndefined.link(&m_jit);
1885 jump(taken, ForceJump);
1887 notCell.link(&m_jit);
1889 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
1890 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1891 m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR);
1893 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
1894 m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
1899 noResult(m_currentNode);
1902 void SpeculativeJIT::emitBranch(Node* node)
1904 BasicBlock* taken = node->branchData()->taken.block;
1905 BasicBlock* notTaken = node->branchData()->notTaken.block;
1907 switch (node->child1().useKind()) {
1909 case KnownBooleanUse: {
1910 SpeculateBooleanOperand value(this, node->child1());
1911 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
1913 if (taken == nextBlock()) {
1914 condition = MacroAssembler::Zero;
1915 BasicBlock* tmp = taken;
1920 branchTest32(condition, value.gpr(), TrustedImm32(1), taken);
1927 case ObjectOrOtherUse: {
1928 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
1933 emitStringBranch(node->child1(), taken, notTaken);
1937 case StringOrOtherUse: {
1938 emitStringOrOtherBranch(node->child1(), taken, notTaken);
1944 if (node->child1().useKind() == Int32Use) {
1945 bool invert = false;
1947 if (taken == nextBlock()) {
1949 BasicBlock* tmp = taken;
1954 SpeculateInt32Operand value(this, node->child1());
1955 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
1957 SpeculateDoubleOperand value(this, node->child1());
1958 FPRTemporary scratch(this);
1959 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
1969 JSValueOperand value(this, node->child1());
1970 FPRTemporary valueFPR(this);
1971 FPRTemporary tempFPR(this);
1972 GPRTemporary result(this);
1973 GPRTemporary temp(this);
1975 JSValueRegs valueRegs = value.jsValueRegs();
1976 GPRReg resultGPR = result.gpr();
1978 use(node->child1());
1980 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
1981 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
1982 m_jit.emitConvertValueToBoolean(valueRegs, resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject);
1983 branchTest32(JITCompiler::Zero, resultGPR, notTaken);
1984 jump(taken, ForceJump);
1986 noResult(node, UseChildrenCalledExplicitly);
1991 RELEASE_ASSERT_NOT_REACHED();
1996 template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
1997 void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag)
1999 Edge child4 = m_jit.graph().varArgChild(node, 3);
2001 ArrayMode arrayMode = node->arrayMode();
2003 GPRReg baseReg = base.gpr();
2004 GPRReg propertyReg = property.gpr();
2006 StorageOperand storage(this, child4);
2007 GPRReg storageReg = storage.gpr();
2009 if (node->op() == PutByValAlias) {
2010 // Store the value to the array.
2011 GPRReg propertyReg = property.gpr();
2012 m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2013 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2019 MacroAssembler::Jump slowCase;
2021 if (arrayMode.isInBounds()) {
2023 OutOfBounds, JSValueRegs(), 0,
2024 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2026 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2028 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2030 if (!arrayMode.isOutOfBounds())
2031 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2033 m_jit.add32(TrustedImm32(1), propertyReg);
2034 m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2035 m_jit.sub32(TrustedImm32(1), propertyReg);
2037 inBounds.link(&m_jit);
2040 m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2041 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2048 if (arrayMode.isOutOfBounds()) {
2049 if (node->op() == PutByValDirect) {
2050 addSlowPathGenerator(slowPathCall(
2052 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
2053 NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
2055 addSlowPathGenerator(slowPathCall(
2057 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2058 NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
2062 noResult(node, UseChildrenCalledExplicitly);
2065 void SpeculativeJIT::compile(Node* node)
2067 NodeType op = node->op();
2069 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
2070 m_jit.clearRegisterAllocationOffsets();
2075 case DoubleConstant:
2076 case PhantomDirectArguments:
2077 case PhantomClonedArguments:
2078 initConstantInfo(node);
2081 case LazyJSConstant:
2082 compileLazyJSConstant(node);
2086 speculate(node, node->child1());
2087 switch (node->child1().useKind()) {
2089 case DoubleRepRealUse: {
2090 SpeculateDoubleOperand op(this, node->child1());
2091 doubleResult(op.fpr(), node);
2096 case DoubleRepAnyIntUse: {
2097 RELEASE_ASSERT_NOT_REACHED();
2101 JSValueOperand op(this, node->child1());
2102 GPRTemporary resultTag(this, Reuse, op, TagWord);
2103 GPRTemporary resultPayload(this, Reuse, op, PayloadWord);
2104 GPRReg sourceTag = op.tagGPR();
2105 GPRReg sourcePayload = op.payloadGPR();
2106 GPRReg resultTagGPR = resultTag.gpr();
2107 GPRReg resultPayloadGPR = resultPayload.gpr();
2108 m_jit.move(sourceTag, resultTagGPR);
2109 m_jit.move(sourcePayload, resultPayloadGPR);
2110 jsValueResult(resultTagGPR, resultPayloadGPR, node);
2118 AbstractValue& value = m_state.variables().operand(node->local());
2120 // If the CFA is tracking this variable and it found that the variable
2121 // cannot have been assigned, then don't attempt to proceed.
2122 if (value.isClear()) {
2123 m_compileOkay = false;
2127 switch (node->variableAccessData()->flushFormat()) {
2128 case FlushedDouble: {
2129 FPRTemporary result(this);
2130 m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
2131 VirtualRegister virtualRegister = node->virtualRegister();
2132 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
2133 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
2137 case FlushedInt32: {
2138 GPRTemporary result(this);
2139 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2141 // Like int32Result, but don't useChildren - our children are phi nodes,
2142 // and don't represent values within this dataflow with virtual registers.
2143 VirtualRegister virtualRegister = node->virtualRegister();
2144 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
2145 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
2150 GPRTemporary result(this);
2151 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2153 // Like cellResult, but don't useChildren - our children are phi nodes,
2154 // and don't represent values within this dataflow with virtual registers.
2155 VirtualRegister virtualRegister = node->virtualRegister();
2156 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell);
2157 generationInfoFromVirtualRegister(virtualRegister).initCell(node, node->refCount(), result.gpr());
2161 case FlushedBoolean: {
2162 GPRTemporary result(this);
2163 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2165 // Like booleanResult, but don't useChildren - our children are phi nodes,
2166 // and don't represent values within this dataflow with virtual registers.
2167 VirtualRegister virtualRegister = node->virtualRegister();
2168 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean);
2169 generationInfoFromVirtualRegister(virtualRegister).initBoolean(node, node->refCount(), result.gpr());
2173 case FlushedJSValue: {
2174 GPRTemporary result(this);
2175 GPRTemporary tag(this);
2176 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2177 m_jit.load32(JITCompiler::tagFor(node->machineLocal()), tag.gpr());
2179 // Like jsValueResult, but don't useChildren - our children are phi nodes,
2180 // and don't represent values within this dataflow with virtual registers.
2181 VirtualRegister virtualRegister = node->virtualRegister();
2182 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2183 m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
2185 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS);
2190 RELEASE_ASSERT_NOT_REACHED();
2195 case GetLocalUnlinked: {
2196 GPRTemporary payload(this);
2197 GPRTemporary tag(this);
2198 m_jit.load32(JITCompiler::payloadFor(node->unlinkedMachineLocal()), payload.gpr());
2199 m_jit.load32(JITCompiler::tagFor(node->unlinkedMachineLocal()), tag.gpr());
2200 jsValueResult(tag.gpr(), payload.gpr(), node);
2205 compileMovHint(m_currentNode);
2211 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
2222 switch (node->variableAccessData()->flushFormat()) {
2223 case FlushedDouble: {
2224 SpeculateDoubleOperand value(this, node->child1());
2225 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
2227 // Indicate that it's no longer necessary to retrieve the value of
2228 // this bytecode variable from registers or other locations in the stack,
2229 // but that it is stored as a double.
2230 recordSetLocal(DataFormatDouble);
2234 case FlushedInt32: {
2235 SpeculateInt32Operand value(this, node->child1());
2236 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2238 recordSetLocal(DataFormatInt32);
2243 SpeculateCellOperand cell(this, node->child1());
2244 GPRReg cellGPR = cell.gpr();
2245 m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->machineLocal()));
2247 recordSetLocal(DataFormatCell);
2251 case FlushedBoolean: {
2252 SpeculateBooleanOperand value(this, node->child1());
2253 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2255 recordSetLocal(DataFormatBoolean);
2259 case FlushedJSValue: {
2260 JSValueOperand value(this, node->child1());
2261 m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal()));
2262 m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal()));
2264 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2269 RELEASE_ASSERT_NOT_REACHED();
2276 // This is a no-op; it just marks the fact that the argument is being used.
2277 // But it may be profitable to use this as a hook to run speculation checks
2278 // on arguments, thereby allowing us to trivially eliminate such checks if
2279 // the argument is not used.
2280 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2286 compileBitwiseOp(node);
2292 compileShiftOp(node);
2295 case UInt32ToNumber: {
2296 compileUInt32ToNumber(node);
2300 case DoubleAsInt32: {
2301 compileDoubleAsInt32(node);
2305 case ValueToInt32: {
2306 compileValueToInt32(node);
2311 compileDoubleRep(node);
2316 compileValueRep(node);
2321 compileValueAdd(node);
2325 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2326 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
2327 JSValueOperand op3(this, node->child3(), ManualOperandSpeculation);
2329 JSValueRegs op1Regs = op1.jsValueRegs();
2330 JSValueRegs op2Regs = op2.jsValueRegs();
2331 JSValueRegs op3Regs;
2334 op3Regs = op3.jsValueRegs();
2338 GPRFlushedCallResult result(this);
2340 callOperation(operationStrCat3, result.gpr(), op1Regs, op2Regs, op3Regs);
2342 callOperation(operationStrCat2, result.gpr(), op1Regs, op2Regs);
2343 m_jit.exceptionCheck();
2345 cellResult(result.gpr(), node);
2350 compileArithAdd(node);
2354 compileArithClz32(node);
2358 compileMakeRope(node);
2362 compileArithSub(node);
2366 compileArithNegate(node);
2370 compileArithMul(node);
2374 compileArithDiv(node);
2379 compileArithMod(node);
2384 compileArithPow(node);
2389 compileArithAbs(node);
2394 switch (node->binaryUseKind()) {
2396 SpeculateStrictInt32Operand op1(this, node->child1());
2397 SpeculateStrictInt32Operand op2(this, node->child2());
2398 GPRTemporary result(this, Reuse, op1);
2400 GPRReg op1GPR = op1.gpr();
2401 GPRReg op2GPR = op2.gpr();
2402 GPRReg resultGPR = result.gpr();
2404 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR);
2405 m_jit.move(op2GPR, resultGPR);
2406 if (op1GPR != resultGPR) {
2407 MacroAssembler::Jump done = m_jit.jump();
2408 op1Less.link(&m_jit);
2409 m_jit.move(op1GPR, resultGPR);
2412 op1Less.link(&m_jit);
2414 int32Result(resultGPR, node);
2418 case DoubleRepUse: {
2419 SpeculateDoubleOperand op1(this, node->child1());
2420 SpeculateDoubleOperand op2(this, node->child2());
2421 FPRTemporary result(this, op1);
2423 FPRReg op1FPR = op1.fpr();
2424 FPRReg op2FPR = op2.fpr();
2425 FPRReg resultFPR = result.fpr();
2427 MacroAssembler::JumpList done;
2429 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2431 // op2 is eather the lesser one or one of then is NaN
2432 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2434 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2435 // op1 + op2 and putting it into result.
2436 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2437 done.append(m_jit.jump());
2439 op2Less.link(&m_jit);
2440 m_jit.moveDouble(op2FPR, resultFPR);
2442 if (op1FPR != resultFPR) {
2443 done.append(m_jit.jump());
2445 op1Less.link(&m_jit);
2446 m_jit.moveDouble(op1FPR, resultFPR);
2448 op1Less.link(&m_jit);
2452 doubleResult(resultFPR, node);
2457 RELEASE_ASSERT_NOT_REACHED();
2464 compileArithSqrt(node);
2468 compileArithFRound(node);
2472 compileArithRandom(node);
2479 compileArithRounding(node);
2483 compileArithSin(node);
2487 compileArithCos(node);
2491 compileArithTan(node);
2495 compileArithLog(node);
2499 compileLogicalNot(node);
2503 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2508 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2512 case CompareGreater:
2513 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2517 case CompareGreaterEq:
2518 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2523 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2527 case CompareStrictEq:
2528 if (compileStrictEq(node))
2533 compileCompareEqPtr(node);
2536 case StringCharCodeAt: {
2537 compileGetCharCodeAt(node);
2541 case StringCharAt: {
2542 // Relies on StringCharAt node having same basic layout as GetByVal
2543 compileGetByValOnString(node);
2547 case StringFromCharCode: {
2548 compileFromCharCode(node);
2558 case ArrayifyToStructure: {
2564 switch (node->arrayMode().type()) {
2565 case Array::SelectUsingPredictions:
2566 case Array::ForceExit:
2567 RELEASE_ASSERT_NOT_REACHED();
2568 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2569 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2572 case Array::Undecided: {
2573 SpeculateStrictInt32Operand index(this, node->child2());
2574 GPRTemporary resultTag(this, Reuse, index);
2575 GPRTemporary resultPayload(this);
2577 GPRReg indexGPR = index.gpr();
2578 GPRReg resultTagGPR = resultTag.gpr();
2579 GPRReg resultPayloadGPR = resultPayload.gpr();
2581 speculationCheck(OutOfBounds, JSValueRegs(), node,
2582 m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
2584 use(node->child1());
2587 m_jit.move(MacroAssembler::TrustedImm32(JSValue::UndefinedTag), resultTagGPR);
2588 m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR);
2589 jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
2592 case Array::Generic: {
2593 SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right.
2594 JSValueOperand property(this, node->child2());
2595 GPRReg baseGPR = base.gpr();
2596 JSValueRegs propertyRegs = property.jsValueRegs();
2599 GPRFlushedCallResult2 resultTag(this);
2600 GPRFlushedCallResult resultPayload(this);
2601 callOperation(operationGetByValCell, JSValueRegs(resultTag.gpr(), resultPayload.gpr()), baseGPR, propertyRegs);
2602 m_jit.exceptionCheck();
2604 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2608 case Array::Contiguous: {
2609 if (node->arrayMode().isInBounds()) {
2610 SpeculateStrictInt32Operand property(this, node->child2());
2611 StorageOperand storage(this, node->child3());
2613 GPRReg propertyReg = property.gpr();
2614 GPRReg storageReg = storage.gpr();
2619 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2621 GPRTemporary resultPayload(this);
2622 if (node->arrayMode().type() == Array::Int32) {
2623 ASSERT(!node->arrayMode().isSaneChain());
2626 OutOfBounds, JSValueRegs(), 0,
2628 MacroAssembler::Equal,
2629 MacroAssembler::BaseIndex(
2630 storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
2631 TrustedImm32(JSValue::EmptyValueTag)));
2633 MacroAssembler::BaseIndex(
2634 storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
2635 resultPayload.gpr());
2636 int32Result(resultPayload.gpr(), node);
2640 GPRTemporary resultTag(this);
2642 MacroAssembler::BaseIndex(
2643 storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
2646 MacroAssembler::BaseIndex(
2647 storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
2648 resultPayload.gpr());
2649 if (node->arrayMode().isSaneChain()) {
2650 JITCompiler::Jump notHole = m_jit.branch32(
2651 MacroAssembler::NotEqual, resultTag.gpr(),
2652 TrustedImm32(JSValue::EmptyValueTag));
2653 m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr());
2654 m_jit.move(TrustedImm32(0), resultPayload.gpr());
2655 notHole.link(&m_jit);
2658 LoadFromHole, JSValueRegs(), 0,
2660 MacroAssembler::Equal, resultTag.gpr(),
2661 TrustedImm32(JSValue::EmptyValueTag)));
2663 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2667 SpeculateCellOperand base(this, node->child1());
2668 SpeculateStrictInt32Operand property(this, node->child2());
2669 StorageOperand storage(this, node->child3());
2671 GPRReg baseReg = base.gpr();
2672 GPRReg propertyReg = property.gpr();
2673 GPRReg storageReg = storage.gpr();
2678 GPRTemporary resultTag(this);
2679 GPRTemporary resultPayload(this);
2680 GPRReg resultTagReg = resultTag.gpr();
2681 GPRReg resultPayloadReg = resultPayload.gpr();
2683 MacroAssembler::JumpList slowCases;
2685 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2687 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
2688 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
2689 slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag)));
2691 addSlowPathGenerator(
2693 slowCases, this, operationGetByValArrayInt,
2694 JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
2696 jsValueResult(resultTagReg, resultPayloadReg, node);
2699 case Array::Double: {
2700 if (node->arrayMode().isInBounds()) {
2701 SpeculateStrictInt32Operand property(this, node->child2());
2702 StorageOperand storage(this, node->child3());
2704 GPRReg propertyReg = property.gpr();
2705 GPRReg storageReg = storage.gpr();
2710 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2712 FPRTemporary result(this);
2713 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2714 if (!node->arrayMode().isSaneChain())
2715 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2716 doubleResult(result.fpr(), node);
2720 SpeculateCellOperand base(this, node->child1());
2721 SpeculateStrictInt32Operand property(this, node->child2());
2722 StorageOperand storage(this, node->child3());
2724 GPRReg baseReg = base.gpr();
2725 GPRReg propertyReg = property.gpr();
2726 GPRReg storageReg = storage.gpr();
2731 GPRTemporary resultTag(this);
2732 GPRTemporary resultPayload(this);
2733 FPRTemporary temp(this);
2734 GPRReg resultTagReg = resultTag.gpr();
2735 GPRReg resultPayloadReg = resultPayload.gpr();
2736 FPRReg tempReg = temp.fpr();
2738 MacroAssembler::JumpList slowCases;
2740 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2742 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2743 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2744 boxDouble(tempReg, resultTagReg, resultPayloadReg);
2746 addSlowPathGenerator(
2748 slowCases, this, operationGetByValArrayInt,
2749 JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
2751 jsValueResult(resultTagReg, resultPayloadReg, node);
2754 case Array::ArrayStorage:
2755 case Array::SlowPutArrayStorage: {
2756 if (node->arrayMode().isInBounds()) {
2757 SpeculateStrictInt32Operand property(this, node->child2());
2758 StorageOperand storage(this, node->child3());
2759 GPRReg propertyReg = property.gpr();
2760 GPRReg storageReg = storage.gpr();
2765 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2767 GPRTemporary resultTag(this);
2768 GPRTemporary resultPayload(this);
2770 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
2771 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
2772 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
2774 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2778 SpeculateCellOperand base(this, node->child1());
2779 SpeculateStrictInt32Operand property(this, node->child2());
2780 StorageOperand storage(this, node->child3());
2781 GPRReg propertyReg = property.gpr();
2782 GPRReg storageReg = storage.gpr();
2783 GPRReg baseReg = base.gpr();
2788 GPRTemporary resultTag(this);
2789 GPRTemporary resultPayload(this);
2790 GPRReg resultTagReg = resultTag.gpr();
2791 GPRReg resultPayloadReg = resultPayload.gpr();
2793 JITCompiler::Jump outOfBounds = m_jit.branch32(
2794 MacroAssembler::AboveOrEqual, propertyReg,
2795 MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2797 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
2798 JITCompiler::Jump hole = m_jit.branch32(
2799 MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag));
2800 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
2802 JITCompiler::JumpList slowCases;
2803 slowCases.append(outOfBounds);
2804 slowCases.append(hole);
2805 addSlowPathGenerator(
2807 slowCases, this, operationGetByValArrayInt,
2808 JSValueRegs(resultTagReg, resultPayloadReg),
2809 baseReg, propertyReg));
2811 jsValueResult(resultTagReg, resultPayloadReg, node);
2815 compileGetByValOnString(node);
2817 case Array::DirectArguments:
2818 compileGetByValOnDirectArguments(node);
2820 case Array::ScopedArguments:
2821 compileGetByValOnScopedArguments(node);
2824 TypedArrayType type = node->arrayMode().typedArrayType();
2826 compileGetByValOnIntTypedArray(node, type);
2828 compileGetByValOnFloatTypedArray(node, type);
2834 compileToLowerCase(node);
2838 case GetByValWithThis: {
2839 JSValueOperand base(this, node->child1());
2840 JSValueRegs baseRegs = base.jsValueRegs();
2841 JSValueOperand thisValue(this, node->child2());
2842 JSValueRegs thisValueRegs = thisValue.jsValueRegs();
2843 JSValueOperand subscript(this, node->child3());
2844 JSValueRegs subscriptRegs = subscript.jsValueRegs();
2846 GPRFlushedCallResult resultPayload(this);
2847 GPRFlushedCallResult2 resultTag(this);
2848 GPRReg resultPayloadGPR = resultPayload.gpr();
2849 GPRReg resultTagGPR = resultTag.gpr();
2852 callOperation(operationGetByValWithThis, JSValueRegs(resultTagGPR, resultPayloadGPR), baseRegs, thisValueRegs, subscriptRegs);
2853 m_jit.exceptionCheck();
2855 jsValueResult(resultTagGPR, resultPayloadGPR, node);
2859 case PutByValDirect:
2861 case PutByValAlias: {
2862 Edge child1 = m_jit.graph().varArgChild(node, 0);
2863 Edge child2 = m_jit.graph().varArgChild(node, 1);
2864 Edge child3 = m_jit.graph().varArgChild(node, 2);
2865 Edge child4 = m_jit.graph().varArgChild(node, 3);
2867 ArrayMode arrayMode = node->arrayMode().modeForPut();
2868 bool alreadyHandled = false;
2870 switch (arrayMode.type()) {
2871 case Array::SelectUsingPredictions:
2872 case Array::ForceExit:
2873 RELEASE_ASSERT_NOT_REACHED();
2874 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2875 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2876 alreadyHandled = true;
2879 case Array::Generic: {
2880 ASSERT(node->op() == PutByVal || node->op() == PutByValDirect);
2882 SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right.
2883 JSValueOperand property(this, child2);
2884 JSValueOperand value(this, child3);
2885 GPRReg baseGPR = base.gpr();
2886 JSValueRegs propertyRegs = property.jsValueRegs();
2887 JSValueRegs valueRegs = value.jsValueRegs();
2890 if (node->op() == PutByValDirect)
2891 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyRegs, valueRegs);
2893 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyRegs, valueRegs);
2894 m_jit.exceptionCheck();
2897 alreadyHandled = true;
2907 SpeculateCellOperand base(this, child1);
2908 SpeculateStrictInt32Operand property(this, child2);
2910 GPRReg baseReg = base.gpr();
2911 GPRReg propertyReg = property.gpr();
2913 switch (arrayMode.type()) {
2914 case Array::Int32: {
2915 SpeculateInt32Operand value(this, child3);
2917 GPRReg valuePayloadReg = value.gpr();
2922 compileContiguousPutByVal(node, base, property, value, valuePayloadReg, TrustedImm32(JSValue::Int32Tag));
2925 case Array::Contiguous: {
2926 JSValueOperand value(this, child3);
2928 GPRReg valueTagReg = value.tagGPR();
2929 GPRReg valuePayloadReg = value.payloadGPR();
2934 compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg);
2937 case Array::Double: {
2938 compileDoublePutByVal(node, base, property);
2941 case Array::ArrayStorage:
2942 case Array::SlowPutArrayStorage: {
2943 JSValueOperand value(this, child3);
2945 GPRReg valueTagReg = value.tagGPR();
2946 GPRReg valuePayloadReg = value.payloadGPR();
2951 StorageOperand storage(this, child4);
2952 GPRReg storageReg = storage.gpr();
2954 if (node->op() == PutByValAlias) {
2955 // Store the value to the array.
2956 GPRReg propertyReg = property.gpr();
2957 m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2958 m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2964 MacroAssembler::JumpList slowCases;
2966 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2967 if (!arrayMode.isOutOfBounds())
2968 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
2970 slowCases.append(beyondArrayBounds);
2972 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2973 if (arrayMode.isInBounds()) {
2975 StoreToHole, JSValueRegs(), 0,
2976 m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)));
2978 MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
2979 if (arrayMode.isSlowPut()) {
2980 // This is sort of strange. If we wanted to optimize this code path, we would invert
2981 // the above branch. But it's simply not worth it since this only happens if we're
2982 // already having a bad time.
2983 slowCases.append(m_jit.jump());
2985 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
2987 // If we're writing to a hole we might be growing the array;
2988 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2989 m_jit.add32(TrustedImm32(1), propertyReg);
2990 m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2991 m_jit.sub32(TrustedImm32(1), propertyReg);
2993 lengthDoesNotNeedUpdate.link(&m_jit);
2995 notHoleValue.link(&m_jit);
2998 // Store the value to the array.
2999 m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3000 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
3007 if (!slowCases.empty()) {
3008 if (node->op() == PutByValDirect) {
3009 addSlowPathGenerator(slowPathCall(
3011 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
3012 NoResult, baseReg, propertyReg, JSValueRegs(valueTagReg, valuePayloadReg)));
3014 addSlowPathGenerator(slowPathCall(
3016 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3017 NoResult, baseReg, propertyReg, JSValueRegs(valueTagReg, valuePayloadReg)));
3021 noResult(node, UseChildrenCalledExplicitly);
3026 TypedArrayType type = arrayMode.typedArrayType();
3028 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
3030 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
3035 case PutByValWithThis: {
3037 // We don't have enough registers on X86 to do this
3038 // without setting up the call frame incrementally.
3040 m_jit.poke(GPRInfo::callFrameRegister, index++);
3043 JSValueOperand base(this, m_jit.graph().varArgChild(node, 0));
3044 GPRReg baseTag = base.tagGPR();
3045 GPRReg basePayload = base.payloadGPR();
3047 JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1));
3048 GPRReg thisValueTag = thisValue.tagGPR();
3049 GPRReg thisValuePayload = thisValue.payloadGPR();
3051 JSValueOperand property(this, m_jit.graph().varArgChild(node, 2));
3052 GPRReg propertyTag = property.tagGPR();
3053 GPRReg propertyPayload = property.payloadGPR();
3055 m_jit.poke(basePayload, index++);
3056 m_jit.poke(baseTag, index++);
3058 m_jit.poke(thisValuePayload, index++);
3059 m_jit.poke(thisValueTag, index++);
3061 m_jit.poke(propertyPayload, index++);
3062 m_jit.poke(propertyTag, index++);
3067 JSValueOperand value(this, m_jit.graph().varArgChild(node, 3));
3068 GPRReg valueTag = value.tagGPR();
3069 GPRReg valuePayload = value.payloadGPR();
3070 m_jit.poke(valuePayload, index++);
3071 m_jit.poke(valueTag, index++);
3074 appendCall(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis);
3075 m_jit.exceptionCheck();
3077 // We don't have enough registers on MIPS either but the ABI is a little different.
3079 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
3081 JSValueOperand base(this, m_jit.graph().varArgChild(node, 0));
3082 GPRReg baseTag = base.tagGPR();
3083 GPRReg basePayload = base.payloadGPR();
3085 JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1));
3086 GPRReg thisValueTag = thisValue.tagGPR();
3087 GPRReg thisValuePayload = thisValue.payloadGPR();
3089 JSValueOperand property(this, m_jit.graph().varArgChild(node, 2));
3090 GPRReg propertyTag = property.tagGPR();
3091 GPRReg propertyPayload = property.payloadGPR();
3093 // for operationPutByValWithThis[Strict](), base is a 64 bits
3094 // argument, so it should be double word aligned on the stack.
3095 // This requirement still applies when it's in argument registers
3096 // instead of on the stack.
3097 m_jit.move(basePayload, GPRInfo::argumentGPR2);
3098 m_jit.move(baseTag, GPRInfo::argumentGPR3);
3100 m_jit.poke(thisValuePayload, index++);
3101 m_jit.poke(thisValueTag, index++);
3103 m_jit.poke(propertyPayload, index++);
3104 m_jit.poke(propertyTag, index++);
3109 JSValueOperand value(this, m_jit.graph().varArgChild(node, 3));
3110 GPRReg valueTag = value.tagGPR();
3111 GPRReg valuePayload = value.payloadGPR();
3112 m_jit.poke(valuePayload, index++);
3113 m_jit.poke(valueTag, index++);
3116 appendCall(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis);
3117 m_jit.exceptionCheck();
3119 static_assert(GPRInfo::numberOfRegisters >= 8, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
3121 JSValueOperand base(this, m_jit.graph().varArgChild(node, 0));
3122 JSValueRegs baseRegs = base.jsValueRegs();
3124 JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1));
3125 JSValueRegs thisRegs = thisValue.jsValueRegs();
3127 JSValueOperand property(this, m_jit.graph().varArgChild(node, 2));
3128 JSValueRegs propertyRegs = property.jsValueRegs();
3130 JSValueOperand value(this, m_jit.graph().varArgChild(node, 3));
3131 JSValueRegs valueRegs = value.jsValueRegs();
3134 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis,
3135 NoResult, baseRegs, thisRegs, propertyRegs, valueRegs);
3136 m_jit.exceptionCheck();
3144 SpeculateCellOperand globalObject(this, node->child1());
3145 GPRReg globalObjectGPR = globalObject.gpr();
3147 if (node->child2().useKind() == RegExpObjectUse) {
3148 if (node->child3().useKind() == StringUse) {
3149 SpeculateCellOperand base(this, node->child2());
3150 SpeculateCellOperand argument(this, node->child3());
3151 GPRReg baseGPR = base.gpr();
3152 GPRReg argumentGPR = argument.gpr();
3153 speculateRegExpObject(node->child2(), baseGPR);
3154 speculateString(node->child3(), argumentGPR);
3157 GPRFlushedCallResult2 resultTag(this);
3158 GPRFlushedCallResult resultPayload(this);
3160 operationRegExpExecString, JSValueRegs(resultTag.gpr(), resultPayload.gpr()),
3161 globalObjectGPR, baseGPR, argumentGPR);
3162 m_jit.exceptionCheck();