2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Intel Corporation. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "DFGSpeculativeJIT.h"
32 #include "ArrayPrototype.h"
33 #include "CallFrameShuffler.h"
34 #include "DFGAbstractInterpreterInlines.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGOperations.h"
37 #include "DFGSlowPathGenerator.h"
39 #include "DirectArguments.h"
40 #include "GetterSetter.h"
41 #include "JSEnvironmentRecord.h"
42 #include "JSLexicalEnvironment.h"
43 #include "JSPropertyNameEnumerator.h"
44 #include "ObjectPrototype.h"
45 #include "JSCInlines.h"
46 #include "SetupVarargsFrame.h"
47 #include "TypeProfilerLog.h"
50 namespace JSC { namespace DFG {
54 bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
56 // FIXME: For double we could fill with a FPR.
59 VirtualRegister virtualRegister = edge->virtualRegister();
60 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
62 switch (info.registerFormat()) {
63 case DataFormatNone: {
65 if (edge->hasConstant()) {
67 payloadGPR = allocate();
68 JSValue value = edge->asJSValue();
69 m_jit.move(Imm32(value.tag()), tagGPR);
70 m_jit.move(Imm32(value.payload()), payloadGPR);
71 m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
72 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
73 info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
75 DataFormat spillFormat = info.spillFormat();
76 ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
78 payloadGPR = allocate();
79 switch (spillFormat) {
81 m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR);
82 spillFormat = DataFormatJSInt32; // This will be used as the new register format.
85 m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR);
86 spillFormat = DataFormatJSCell; // This will be used as the new register format.
88 case DataFormatBoolean:
89 m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR);
90 spillFormat = DataFormatJSBoolean; // This will be used as the new register format.
93 m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR);
96 m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR);
97 m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
98 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
99 info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
105 case DataFormatInt32:
107 case DataFormatBoolean: {
108 GPRReg gpr = info.gpr();
109 // If the register has already been locked we need to take a copy.
110 if (m_gprs.isLocked(gpr)) {
111 payloadGPR = allocate();
112 m_jit.move(gpr, payloadGPR);
118 int32_t tag = JSValue::EmptyValueTag;
119 DataFormat fillFormat = DataFormatJS;
120 switch (info.registerFormat()) {
121 case DataFormatInt32:
122 tag = JSValue::Int32Tag;
123 fillFormat = DataFormatJSInt32;
126 tag = JSValue::CellTag;
127 fillFormat = DataFormatJSCell;
129 case DataFormatBoolean:
130 tag = JSValue::BooleanTag;
131 fillFormat = DataFormatJSBoolean;
134 RELEASE_ASSERT_NOT_REACHED();
137 m_jit.move(TrustedImm32(tag), tagGPR);
139 m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
140 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
141 info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat);
145 case DataFormatJSDouble:
147 case DataFormatJSInt32:
148 case DataFormatJSCell:
149 case DataFormatJSBoolean: {
150 tagGPR = info.tagGPR();
151 payloadGPR = info.payloadGPR();
153 m_gprs.lock(payloadGPR);
157 case DataFormatStorage:
158 case DataFormatDouble:
159 // this type currently never occurs
160 RELEASE_ASSERT_NOT_REACHED();
163 RELEASE_ASSERT_NOT_REACHED();
168 void SpeculativeJIT::cachedGetById(
169 CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
170 unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
172 // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
173 // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
174 // trip over one move instruction.
175 if (basePayloadGPR == resultTagGPR) {
176 RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR);
178 if (baseTagGPROrNone == resultPayloadGPR) {
179 m_jit.swap(basePayloadGPR, baseTagGPROrNone);
180 baseTagGPROrNone = resultTagGPR;
182 m_jit.move(basePayloadGPR, resultPayloadGPR);
183 basePayloadGPR = resultPayloadGPR;
186 RegisterSet usedRegisters = this->usedRegisters();
187 if (spillMode == DontSpill) {
188 // We've already flushed registers to the stack, we don't need to spill these.
189 usedRegisters.set(JSValueRegs(baseTagGPROrNone, basePayloadGPR), false);
190 usedRegisters.set(JSValueRegs(resultTagGPR, resultPayloadGPR), false);
193 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
194 JITGetByIdGenerator gen(
195 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
196 JSValueRegs(baseTagGPROrNone, basePayloadGPR),
197 JSValueRegs(resultTagGPR, resultPayloadGPR));
199 gen.generateFastPath(m_jit);
201 JITCompiler::JumpList slowCases;
202 if (slowPathTarget.isSet())
203 slowCases.append(slowPathTarget);
204 slowCases.append(gen.slowPathJump());
206 std::unique_ptr<SlowPathGenerator> slowPath;
207 if (baseTagGPROrNone == InvalidGPRReg) {
208 slowPath = slowPathCall(
209 slowCases, this, operationGetByIdOptimize,
210 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
211 static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
212 identifierUID(identifierNumber));
214 slowPath = slowPathCall(
215 slowCases, this, operationGetByIdOptimize,
216 JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone,
217 basePayloadGPR, identifierUID(identifierNumber));
220 m_jit.addGetById(gen, slowPath.get());
221 addSlowPathGenerator(WTFMove(slowPath));
224 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
226 RegisterSet usedRegisters = this->usedRegisters();
227 if (spillMode == DontSpill) {
228 // We've already flushed registers to the stack, we don't need to spill these.
229 usedRegisters.set(basePayloadGPR, false);
230 usedRegisters.set(JSValueRegs(valueTagGPR, valuePayloadGPR), false);
232 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
233 JITPutByIdGenerator gen(
234 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
235 JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
236 scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
238 gen.generateFastPath(m_jit);
240 JITCompiler::JumpList slowCases;
241 if (slowPathTarget.isSet())
242 slowCases.append(slowPathTarget);
243 slowCases.append(gen.slowPathJump());
245 auto slowPath = slowPathCall(
246 slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR,
247 valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber));
249 m_jit.addPutById(gen, slowPath.get());
250 addSlowPathGenerator(WTFMove(slowPath));
253 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
255 JSValueOperand arg(this, operand, ManualOperandSpeculation);
256 GPRReg argTagGPR = arg.tagGPR();
257 GPRReg argPayloadGPR = arg.payloadGPR();
259 GPRTemporary resultPayload(this, Reuse, arg, PayloadWord);
260 GPRReg resultPayloadGPR = resultPayload.gpr();
262 JITCompiler::Jump notCell;
263 JITCompiler::Jump notMasqueradesAsUndefined;
264 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
265 if (!isKnownCell(operand.node()))
266 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
268 m_jit.move(TrustedImm32(0), resultPayloadGPR);
269 notMasqueradesAsUndefined = m_jit.jump();
271 GPRTemporary localGlobalObject(this);
272 GPRTemporary remoteGlobalObject(this);
274 if (!isKnownCell(operand.node()))
275 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
277 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
278 JITCompiler::NonZero,
279 JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
280 JITCompiler::TrustedImm32(MasqueradesAsUndefined));
282 m_jit.move(TrustedImm32(0), resultPayloadGPR);
283 notMasqueradesAsUndefined = m_jit.jump();
285 isMasqueradesAsUndefined.link(&m_jit);
286 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
287 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
288 m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
289 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR);
290 m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
291 m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR);
294 if (!isKnownCell(operand.node())) {
295 JITCompiler::Jump done = m_jit.jump();
297 notCell.link(&m_jit);
298 // null or undefined?
299 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
300 m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR);
301 m_jit.compare32(JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);
306 notMasqueradesAsUndefined.link(&m_jit);
308 booleanResult(resultPayloadGPR, m_currentNode);
311 void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode)
313 BasicBlock* taken = branchNode->branchData()->taken.block;
314 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
317 if (taken == nextBlock()) {
319 BasicBlock* tmp = taken;
324 JSValueOperand arg(this, operand, ManualOperandSpeculation);
325 GPRReg argTagGPR = arg.tagGPR();
326 GPRReg argPayloadGPR = arg.payloadGPR();
328 GPRTemporary result(this, Reuse, arg, TagWord);
329 GPRReg resultGPR = result.gpr();
331 JITCompiler::Jump notCell;
333 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
334 if (!isKnownCell(operand.node()))
335 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
337 jump(invert ? taken : notTaken, ForceJump);
339 GPRTemporary localGlobalObject(this);
340 GPRTemporary remoteGlobalObject(this);
342 if (!isKnownCell(operand.node()))
343 notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
345 branchTest8(JITCompiler::Zero,
346 JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
347 JITCompiler::TrustedImm32(MasqueradesAsUndefined),
348 invert ? taken : notTaken);
350 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
351 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
352 m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
353 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR);
354 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
355 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
358 if (!isKnownCell(operand.node())) {
359 jump(notTaken, ForceJump);
361 notCell.link(&m_jit);
362 // null or undefined?
363 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
364 m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR);
365 branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
371 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
373 BasicBlock* taken = branchNode->branchData()->taken.block;
374 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
376 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
378 // The branch instruction will branch to the taken block.
379 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
380 if (taken == nextBlock()) {
381 cond = JITCompiler::invert(cond);
382 callResultCondition = JITCompiler::Zero;
383 BasicBlock* tmp = taken;
388 JSValueOperand arg1(this, node->child1());
389 JSValueOperand arg2(this, node->child2());
390 GPRReg arg1TagGPR = arg1.tagGPR();
391 GPRReg arg1PayloadGPR = arg1.payloadGPR();
392 GPRReg arg2TagGPR = arg2.tagGPR();
393 GPRReg arg2PayloadGPR = arg2.payloadGPR();
395 JITCompiler::JumpList slowPath;
397 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
398 GPRFlushedCallResult result(this);
399 GPRReg resultGPR = result.gpr();
405 callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
406 m_jit.exceptionCheck();
408 branchTest32(callResultCondition, resultGPR, taken);
410 GPRTemporary result(this);
411 GPRReg resultGPR = result.gpr();
416 if (!isKnownInteger(node->child1().node()))
417 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
418 if (!isKnownInteger(node->child2().node()))
419 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
421 branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
423 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
424 jump(notTaken, ForceJump);
426 slowPath.link(&m_jit);
428 silentSpillAllRegisters(resultGPR);
429 callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
430 m_jit.exceptionCheck();
431 silentFillAllRegisters(resultGPR);
433 branchTest32(callResultCondition, resultGPR, taken);
439 m_indexInBlock = m_block->size() - 1;
440 m_currentNode = branchNode;
443 template<typename JumpType>
444 class CompareAndBoxBooleanSlowPathGenerator
445 : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
447 CompareAndBoxBooleanSlowPathGenerator(
448 JumpType from, SpeculativeJIT* jit,
449 S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
450 GPRReg arg2Tag, GPRReg arg2Payload)
451 : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
452 from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
454 , m_arg1Payload(arg1Payload)
456 , m_arg2Payload(arg2Payload)
461 virtual void generateInternal(SpeculativeJIT* jit)
466 this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag,
468 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
474 GPRReg m_arg1Payload;
476 GPRReg m_arg2Payload;
479 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
481 JSValueOperand arg1(this, node->child1());
482 JSValueOperand arg2(this, node->child2());
483 GPRReg arg1TagGPR = arg1.tagGPR();
484 GPRReg arg1PayloadGPR = arg1.payloadGPR();
485 GPRReg arg2TagGPR = arg2.tagGPR();
486 GPRReg arg2PayloadGPR = arg2.payloadGPR();
488 JITCompiler::JumpList slowPath;
490 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
491 GPRFlushedCallResult result(this);
492 GPRReg resultPayloadGPR = result.gpr();
498 callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
499 m_jit.exceptionCheck();
501 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
503 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
504 GPRReg resultPayloadGPR = resultPayload.gpr();
509 if (!isKnownInteger(node->child1().node()))
510 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
511 if (!isKnownInteger(node->child2().node()))
512 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
514 m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
516 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
517 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
518 slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
519 arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR));
522 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
526 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
528 BasicBlock* taken = branchNode->branchData()->taken.block;
529 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
531 // The branch instruction will branch to the taken block.
532 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
533 if (taken == nextBlock()) {
535 BasicBlock* tmp = taken;
540 JSValueOperand arg1(this, node->child1());
541 JSValueOperand arg2(this, node->child2());
542 GPRReg arg1TagGPR = arg1.tagGPR();
543 GPRReg arg1PayloadGPR = arg1.payloadGPR();
544 GPRReg arg2TagGPR = arg2.tagGPR();
545 GPRReg arg2PayloadGPR = arg2.payloadGPR();
547 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
548 GPRReg resultPayloadGPR = resultPayload.gpr();
553 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
554 // see if we get lucky: if the arguments are cells and they reference the same
555 // cell, then they must be strictly equal.
556 branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken);
558 silentSpillAllRegisters(resultPayloadGPR);
559 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
560 m_jit.exceptionCheck();
561 silentFillAllRegisters(resultPayloadGPR);
563 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
565 // FIXME: Add fast paths for twoCells, number etc.
567 silentSpillAllRegisters(resultPayloadGPR);
568 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
569 m_jit.exceptionCheck();
570 silentFillAllRegisters(resultPayloadGPR);
572 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
578 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
580 JSValueOperand arg1(this, node->child1());
581 JSValueOperand arg2(this, node->child2());
582 GPRReg arg1TagGPR = arg1.tagGPR();
583 GPRReg arg1PayloadGPR = arg1.payloadGPR();
584 GPRReg arg2TagGPR = arg2.tagGPR();
585 GPRReg arg2PayloadGPR = arg2.payloadGPR();
587 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
588 GPRReg resultPayloadGPR = resultPayload.gpr();
593 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
594 // see if we get lucky: if the arguments are cells and they reference the same
595 // cell, then they must be strictly equal.
596 // FIXME: this should flush registers instead of silent spill/fill.
597 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
599 m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
600 JITCompiler::Jump done = m_jit.jump();
602 notEqualCase.link(&m_jit);
604 silentSpillAllRegisters(resultPayloadGPR);
605 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
606 m_jit.exceptionCheck();
607 silentFillAllRegisters(resultPayloadGPR);
609 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
613 // FIXME: Add fast paths.
615 silentSpillAllRegisters(resultPayloadGPR);
616 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
617 silentFillAllRegisters(resultPayloadGPR);
618 m_jit.exceptionCheck();
620 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
623 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
626 void SpeculativeJIT::compileMiscStrictEq(Node* node)
628 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
629 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
630 GPRTemporary result(this);
632 if (node->child1().useKind() == MiscUse)
633 speculateMisc(node->child1(), op1.jsValueRegs());
634 if (node->child2().useKind() == MiscUse)
635 speculateMisc(node->child2(), op2.jsValueRegs());
637 m_jit.move(TrustedImm32(0), result.gpr());
638 JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
639 m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
640 notEqual.link(&m_jit);
641 booleanResult(result.gpr(), node);
644 void SpeculativeJIT::emitCall(Node* node)
646 CallLinkInfo::CallType callType;
647 bool isVarargs = false;
648 bool isForwardVarargs = false;
650 bool isEmulatedTail = false;
651 switch (node->op()) {
653 callType = CallLinkInfo::Call;
656 callType = CallLinkInfo::TailCall;
659 case TailCallInlinedCaller:
660 callType = CallLinkInfo::Call;
661 isEmulatedTail = true;
664 callType = CallLinkInfo::Construct;
667 callType = CallLinkInfo::CallVarargs;
670 case TailCallVarargs:
671 callType = CallLinkInfo::TailCallVarargs;
675 case TailCallVarargsInlinedCaller:
676 callType = CallLinkInfo::CallVarargs;
678 isEmulatedTail = true;
680 case ConstructVarargs:
681 callType = CallLinkInfo::ConstructVarargs;
684 case CallForwardVarargs:
685 callType = CallLinkInfo::CallVarargs;
686 isForwardVarargs = true;
688 case TailCallForwardVarargs:
689 callType = CallLinkInfo::TailCallVarargs;
691 isForwardVarargs = true;
693 case TailCallForwardVarargsInlinedCaller:
694 callType = CallLinkInfo::CallVarargs;
695 isEmulatedTail = true;
696 isForwardVarargs = true;
698 case ConstructForwardVarargs:
699 callType = CallLinkInfo::ConstructVarargs;
700 isForwardVarargs = true;
703 DFG_CRASH(m_jit.graph(), node, "bad node type");
707 Edge calleeEdge = m_jit.graph().child(node, 0);
709 GPRReg calleePayloadGPR;
710 CallFrameShuffleData shuffleData;
712 // Gotta load the arguments somehow. Varargs is trickier.
713 if (isVarargs || isForwardVarargs) {
714 CallVarargsData* data = node->callVarargsData();
717 unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
719 if (isForwardVarargs) {
727 scratchGPR1 = JITCompiler::selectScratchGPR();
728 scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
729 scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
731 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
732 JITCompiler::JumpList slowCase;
733 emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
734 JITCompiler::Jump done = m_jit.jump();
735 slowCase.link(&m_jit);
736 callOperation(operationThrowStackOverflowForVarargs);
737 m_jit.exceptionCheck();
738 m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
740 resultGPR = scratchGPR2;
742 GPRReg argumentsPayloadGPR;
743 GPRReg argumentsTagGPR;
748 auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
749 if (reservedGPR != InvalidGPRReg)
751 JSValueOperand arguments(this, node->child2());
752 argumentsTagGPR = arguments.tagGPR();
753 argumentsPayloadGPR = arguments.payloadGPR();
754 if (reservedGPR != InvalidGPRReg)
758 scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR);
759 scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR);
760 scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR);
763 loadArgumentsGPR(InvalidGPRReg);
765 DFG_ASSERT(m_jit.graph(), node, isFlushed());
767 // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is
769 callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, numUsedStackSlots, data->firstVarArgOffset);
770 m_jit.exceptionCheck();
772 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
773 // Reconstruct the arguments operand while preserving the callee frame.
774 loadArgumentsGPR(GPRInfo::returnValueGPR);
775 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
776 emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
777 m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
779 callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsTagGPR, argumentsPayloadGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
780 m_jit.exceptionCheck();
781 resultGPR = GPRInfo::returnValueGPR;
784 m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
786 DFG_ASSERT(m_jit.graph(), node, isFlushed());
788 // We don't need the arguments array anymore.
792 // Now set up the "this" argument.
793 JSValueOperand thisArgument(this, node->child3());
794 GPRReg thisArgumentTagGPR = thisArgument.tagGPR();
795 GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR();
798 m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0));
799 m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0));
801 // The call instruction's first child is either the function (normal call) or the
802 // receiver (method call). subsequent children are the arguments.
803 int numPassedArgs = node->numChildren() - 1;
805 if (node->op() == TailCall) {
806 JSValueOperand callee(this, calleeEdge);
807 calleeTagGPR = callee.tagGPR();
808 calleePayloadGPR = callee.payloadGPR();
811 shuffleData.numLocals = m_jit.graph().frameRegisterCount();
812 shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR);
813 shuffleData.args.resize(numPassedArgs);
815 for (int i = 0; i < numPassedArgs; ++i) {
816 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
817 GenerationInfo& info = generationInfo(argEdge.node());
819 shuffleData.args[i] = info.recovery(argEdge->virtualRegister());
822 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount));
824 for (int i = 0; i < numPassedArgs; i++) {
825 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
826 JSValueOperand arg(this, argEdge);
827 GPRReg argTagGPR = arg.tagGPR();
828 GPRReg argPayloadGPR = arg.payloadGPR();
831 m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
832 m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
837 if (node->op() != TailCall) {
838 JSValueOperand callee(this, calleeEdge);
839 calleeTagGPR = callee.tagGPR();
840 calleePayloadGPR = callee.payloadGPR();
842 m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
843 m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
849 GPRFlushedCallResult resultPayload(this);
850 GPRFlushedCallResult2 resultTag(this);
851 GPRReg resultPayloadGPR = resultPayload.gpr();
852 GPRReg resultTagGPR = resultTag.gpr();
854 JITCompiler::DataLabelPtr targetToCheck;
855 JITCompiler::JumpList slowPath;
857 CodeOrigin staticOrigin = node->origin.semantic;
858 ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls());
859 ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()));
860 CodeOrigin dynamicOrigin =
861 isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin;
862 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size());
863 m_jit.emitStoreCallSiteIndex(callSite);
865 CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
867 slowPath.append(m_jit.branchIfNotCell(JSValueRegs(calleeTagGPR, calleePayloadGPR)));
868 slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
871 if (node->op() == TailCall) {
872 info->setFrameShuffleData(shuffleData);
873 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
875 m_jit.emitRestoreCalleeSaves();
876 m_jit.prepareForTailCallSlow();
880 JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
882 JITCompiler::Jump done = m_jit.jump();
884 slowPath.link(&m_jit);
886 if (node->op() == TailCall) {
887 CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
888 callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(
889 GPRInfo::regT1, GPRInfo::regT0));
890 callFrameShuffler.prepareForSlowPath();
892 // Callee payload needs to be in regT0, tag in regT1
893 if (calleeTagGPR == GPRInfo::regT0) {
894 if (calleePayloadGPR == GPRInfo::regT1)
895 m_jit.swap(GPRInfo::regT1, GPRInfo::regT0);
897 m_jit.move(calleeTagGPR, GPRInfo::regT1);
898 m_jit.move(calleePayloadGPR, GPRInfo::regT0);
901 m_jit.move(calleePayloadGPR, GPRInfo::regT0);
902 m_jit.move(calleeTagGPR, GPRInfo::regT1);
906 m_jit.emitRestoreCalleeSaves();
909 m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2);
910 JITCompiler::Call slowCall = m_jit.nearCall();
915 m_jit.abortWithReason(JITDidReturnFromTailCall);
917 m_jit.setupResults(resultPayloadGPR, resultTagGPR);
919 jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
920 // After the calls are done, we need to reestablish our stack
921 // pointer. We rely on this for varargs calls, calls with arity
922 // mismatch (the callframe is slided) and tail calls.
923 m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
926 info->setUpCall(callType, node->origin.semantic, calleePayloadGPR);
927 m_jit.addJSCall(fastCall, slowCall, targetToCheck, info);
930 template<bool strict>
931 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
933 AbstractValue& value = m_state.forNode(edge);
934 SpeculatedType type = value.m_type;
935 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
937 m_interpreter.filter(value, SpecInt32);
938 if (value.isClear()) {
939 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
940 returnFormat = DataFormatInt32;
944 VirtualRegister virtualRegister = edge->virtualRegister();
945 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
947 switch (info.registerFormat()) {
948 case DataFormatNone: {
949 if (edge->hasConstant()) {
950 ASSERT(edge->isInt32Constant());
951 GPRReg gpr = allocate();
952 m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
953 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
954 info.fillInt32(*m_stream, gpr);
955 returnFormat = DataFormatInt32;
959 DataFormat spillFormat = info.spillFormat();
961 ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
963 // If we know this was spilled as an integer we can fill without checking.
964 if (type & ~SpecInt32)
965 speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
967 GPRReg gpr = allocate();
968 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
969 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
970 info.fillInt32(*m_stream, gpr);
971 returnFormat = DataFormatInt32;
975 case DataFormatJSInt32:
977 // Check the value is an integer.
978 GPRReg tagGPR = info.tagGPR();
979 GPRReg payloadGPR = info.payloadGPR();
981 m_gprs.lock(payloadGPR);
982 if (type & ~SpecInt32)
983 speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
984 m_gprs.unlock(tagGPR);
985 m_gprs.release(tagGPR);
986 m_gprs.release(payloadGPR);
987 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
988 info.fillInt32(*m_stream, payloadGPR);
989 // If !strict we're done, return.
990 returnFormat = DataFormatInt32;
994 case DataFormatInt32: {
995 GPRReg gpr = info.gpr();
997 returnFormat = DataFormatInt32;
1001 case DataFormatCell:
1002 case DataFormatBoolean:
1003 case DataFormatJSDouble:
1004 case DataFormatJSCell:
1005 case DataFormatJSBoolean:
1006 case DataFormatDouble:
1007 case DataFormatStorage:
1009 RELEASE_ASSERT_NOT_REACHED();
1010 return InvalidGPRReg;
1014 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
1016 return fillSpeculateInt32Internal<false>(edge, returnFormat);
1019 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
1021 DataFormat mustBeDataFormatInt32;
1022 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
1023 ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
1027 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1029 ASSERT(isDouble(edge.useKind()));
1030 ASSERT(edge->hasDoubleResult());
1031 VirtualRegister virtualRegister = edge->virtualRegister();
1032 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1034 if (info.registerFormat() == DataFormatNone) {
1036 if (edge->hasConstant()) {
1037 RELEASE_ASSERT(edge->isNumberConstant());
1038 FPRReg fpr = fprAllocate();
1039 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr);
1040 m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
1041 info.fillDouble(*m_stream, fpr);
1045 RELEASE_ASSERT(info.spillFormat() == DataFormatDouble);
1046 FPRReg fpr = fprAllocate();
1047 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1048 m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
1049 info.fillDouble(*m_stream, fpr);
1053 RELEASE_ASSERT(info.registerFormat() == DataFormatDouble);
1054 FPRReg fpr = info.fpr();
1059 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1061 AbstractValue& value = m_state.forNode(edge);
1062 SpeculatedType type = value.m_type;
1063 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1065 m_interpreter.filter(value, SpecCell);
1066 if (value.isClear()) {
1067 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1071 VirtualRegister virtualRegister = edge->virtualRegister();
1072 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1074 switch (info.registerFormat()) {
1075 case DataFormatNone: {
1076 if (edge->hasConstant()) {
1077 JSValue jsValue = edge->asJSValue();
1078 GPRReg gpr = allocate();
1079 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1080 m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
1081 info.fillCell(*m_stream, gpr);
1085 ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
1086 if (type & ~SpecCell) {
1089 JSValueSource(JITCompiler::addressFor(virtualRegister)),
1092 MacroAssembler::NotEqual,
1093 JITCompiler::tagFor(virtualRegister),
1094 TrustedImm32(JSValue::CellTag)));
1096 GPRReg gpr = allocate();
1097 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
1098 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1099 info.fillCell(*m_stream, gpr);
1103 case DataFormatCell: {
1104 GPRReg gpr = info.gpr();
1109 case DataFormatJSCell:
1110 case DataFormatJS: {
1111 GPRReg tagGPR = info.tagGPR();
1112 GPRReg payloadGPR = info.payloadGPR();
1113 m_gprs.lock(tagGPR);
1114 m_gprs.lock(payloadGPR);
1115 if (type & ~SpecCell) {
1117 BadType, JSValueRegs(tagGPR, payloadGPR), edge,
1118 m_jit.branchIfNotCell(info.jsValueRegs()));
1120 m_gprs.unlock(tagGPR);
1121 m_gprs.release(tagGPR);
1122 m_gprs.release(payloadGPR);
1123 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell);
1124 info.fillCell(*m_stream, payloadGPR);
1128 case DataFormatJSInt32:
1129 case DataFormatInt32:
1130 case DataFormatJSDouble:
1131 case DataFormatJSBoolean:
1132 case DataFormatBoolean:
1133 case DataFormatDouble:
1134 case DataFormatStorage:
1135 RELEASE_ASSERT_NOT_REACHED();
1138 RELEASE_ASSERT_NOT_REACHED();
1139 return InvalidGPRReg;
1143 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1145 AbstractValue& value = m_state.forNode(edge);
1146 SpeculatedType type = value.m_type;
1147 ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
1149 m_interpreter.filter(value, SpecBoolean);
1150 if (value.isClear()) {
1151 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1155 VirtualRegister virtualRegister = edge->virtualRegister();
1156 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1158 switch (info.registerFormat()) {
1159 case DataFormatNone: {
1160 if (edge->hasConstant()) {
1161 JSValue jsValue = edge->asJSValue();
1162 GPRReg gpr = allocate();
1163 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1164 m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
1165 info.fillBoolean(*m_stream, gpr);
1169 ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean);
1171 if (type & ~SpecBoolean)
1172 speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1174 GPRReg gpr = allocate();
1175 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
1176 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1177 info.fillBoolean(*m_stream, gpr);
1181 case DataFormatBoolean: {
1182 GPRReg gpr = info.gpr();
1187 case DataFormatJSBoolean:
1188 case DataFormatJS: {
1189 GPRReg tagGPR = info.tagGPR();
1190 GPRReg payloadGPR = info.payloadGPR();
1191 m_gprs.lock(tagGPR);
1192 m_gprs.lock(payloadGPR);
1193 if (type & ~SpecBoolean)
1194 speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
1196 m_gprs.unlock(tagGPR);
1197 m_gprs.release(tagGPR);
1198 m_gprs.release(payloadGPR);
1199 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean);
1200 info.fillBoolean(*m_stream, payloadGPR);
1204 case DataFormatJSInt32:
1205 case DataFormatInt32:
1206 case DataFormatJSDouble:
1207 case DataFormatJSCell:
1208 case DataFormatCell:
1209 case DataFormatDouble:
1210 case DataFormatStorage:
1211 RELEASE_ASSERT_NOT_REACHED();
1214 RELEASE_ASSERT_NOT_REACHED();
1215 return InvalidGPRReg;
1219 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge)
1221 ASSERT(!isKnownNotCell(valueEdge.node()));
1223 SpeculateCellOperand base(this, baseEdge);
1224 JSValueOperand value(this, valueEdge);
1225 GPRTemporary scratch1(this);
1226 GPRTemporary scratch2(this);
1228 writeBarrier(base.gpr(), value.tagGPR(), valueEdge, scratch1.gpr(), scratch2.gpr());
1231 void SpeculativeJIT::compileObjectEquality(Node* node)
1233 SpeculateCellOperand op1(this, node->child1());
1234 SpeculateCellOperand op2(this, node->child2());
1235 GPRReg op1GPR = op1.gpr();
1236 GPRReg op2GPR = op2.gpr();
1238 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1240 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1242 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1245 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1246 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1248 MacroAssembler::NonZero,
1249 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1250 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1253 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1254 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1256 MacroAssembler::NonZero,
1257 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1258 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1261 GPRTemporary resultPayload(this, Reuse, op2);
1262 GPRReg resultPayloadGPR = resultPayload.gpr();
1264 MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
1265 m_jit.move(TrustedImm32(1), resultPayloadGPR);
1266 MacroAssembler::Jump done = m_jit.jump();
1267 falseCase.link(&m_jit);
1268 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1271 booleanResult(resultPayloadGPR, node);
1274 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
1276 SpeculateCellOperand op1(this, objectChild);
1277 JSValueOperand op2(this, otherChild);
1279 GPRReg op1GPR = op1.gpr();
1280 GPRReg op2GPR = op2.payloadGPR();
1282 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1284 GPRTemporary resultPayload(this, Reuse, op1);
1285 GPRReg resultPayloadGPR = resultPayload.gpr();
1287 MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs());
1289 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1290 MacroAssembler::Jump op2NotCellJump = m_jit.jump();
1292 // At this point we know that we can perform a straight-forward equality comparison on pointer
1293 // values because we are doing strict equality.
1294 op2CellJump.link(&m_jit);
1295 m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR);
1297 op2NotCellJump.link(&m_jit);
1298 booleanResult(resultPayloadGPR, m_currentNode);
1301 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
1303 BasicBlock* taken = branchNode->branchData()->taken.block;
1304 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1306 SpeculateCellOperand op1(this, objectChild);
1307 JSValueOperand op2(this, otherChild);
1309 GPRReg op1GPR = op1.gpr();
1310 GPRReg op2GPR = op2.payloadGPR();
1312 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1314 branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken);
1316 if (taken == nextBlock()) {
1317 branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
1320 branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1325 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1327 SpeculateCellOperand op1(this, leftChild);
1328 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1329 GPRTemporary result(this);
1331 GPRReg op1GPR = op1.gpr();
1332 GPRReg op2TagGPR = op2.tagGPR();
1333 GPRReg op2PayloadGPR = op2.payloadGPR();
1334 GPRReg resultGPR = result.gpr();
1336 bool masqueradesAsUndefinedWatchpointValid =
1337 masqueradesAsUndefinedWatchpointIsStillValid();
1339 if (masqueradesAsUndefinedWatchpointValid) {
1341 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1344 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1345 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1347 MacroAssembler::NonZero,
1348 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1349 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1353 // It seems that most of the time when programs do a == b where b may be either null/undefined
1354 // or an object, b is usually an object. Balance the branches to make that case fast.
1355 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
1357 // We know that within this branch, rightChild must be a cell.
1358 if (masqueradesAsUndefinedWatchpointValid) {
1360 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
1363 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
1364 speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
1366 MacroAssembler::NonZero,
1367 MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
1368 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1371 // At this point we know that we can perform a straight-forward equality comparison on pointer
1372 // values because both left and right are pointers to objects that have no special equality
1374 MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2PayloadGPR);
1375 MacroAssembler::Jump trueCase = m_jit.jump();
1377 rightNotCell.link(&m_jit);
1379 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1380 // prove that it is either null or undefined.
1381 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1382 m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
1385 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
1387 MacroAssembler::NotEqual, resultGPR,
1388 MacroAssembler::TrustedImm32(JSValue::NullTag)));
1391 falseCase.link(&m_jit);
1392 m_jit.move(TrustedImm32(0), resultGPR);
1393 MacroAssembler::Jump done = m_jit.jump();
1394 trueCase.link(&m_jit);
1395 m_jit.move(TrustedImm32(1), resultGPR);
1398 booleanResult(resultGPR, m_currentNode);
1401 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1403 BasicBlock* taken = branchNode->branchData()->taken.block;
1404 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1406 SpeculateCellOperand op1(this, leftChild);
1407 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1408 GPRTemporary result(this);
1410 GPRReg op1GPR = op1.gpr();
1411 GPRReg op2TagGPR = op2.tagGPR();
1412 GPRReg op2PayloadGPR = op2.payloadGPR();
1413 GPRReg resultGPR = result.gpr();
1415 bool masqueradesAsUndefinedWatchpointValid =
1416 masqueradesAsUndefinedWatchpointIsStillValid();
1418 if (masqueradesAsUndefinedWatchpointValid) {
1420 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1423 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1424 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1426 MacroAssembler::NonZero,
1427 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1428 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1431 // It seems that most of the time when programs do a == b where b may be either null/undefined
1432 // or an object, b is usually an object. Balance the branches to make that case fast.
1433 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
1435 // We know that within this branch, rightChild must be a cell.
1436 if (masqueradesAsUndefinedWatchpointValid) {
1438 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
1439 m_jit.branchIfNotObject(op2PayloadGPR));
1442 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
1443 m_jit.branchIfNotObject(op2PayloadGPR));
1444 speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
1446 MacroAssembler::NonZero,
1447 MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
1448 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1451 // At this point we know that we can perform a straight-forward equality comparison on pointer
1452 // values because both left and right are pointers to objects that have no special equality
1454 branch32(MacroAssembler::Equal, op1GPR, op2PayloadGPR, taken);
1456 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1457 // prove that it is either null or undefined.
1458 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1459 rightNotCell.link(&m_jit);
1461 jump(notTaken, ForceJump);
1463 rightNotCell.link(&m_jit);
1464 m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
1467 JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
1469 MacroAssembler::NotEqual, resultGPR,
1470 MacroAssembler::TrustedImm32(JSValue::NullTag)));
1476 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1478 SpeculateInt32Operand op1(this, node->child1());
1479 SpeculateInt32Operand op2(this, node->child2());
1480 GPRTemporary resultPayload(this);
1482 m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr());
1484 // If we add a DataFormatBool, we should use it here.
1485 booleanResult(resultPayload.gpr(), node);
1488 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1490 SpeculateDoubleOperand op1(this, node->child1());
1491 SpeculateDoubleOperand op2(this, node->child2());
1492 GPRTemporary resultPayload(this);
1494 m_jit.move(TrustedImm32(1), resultPayload.gpr());
1495 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1496 m_jit.move(TrustedImm32(0), resultPayload.gpr());
1497 trueCase.link(&m_jit);
1499 booleanResult(resultPayload.gpr(), node);
1502 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1504 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1505 GPRTemporary resultPayload(this);
1506 GPRReg valueTagGPR = value.tagGPR();
1507 GPRReg valuePayloadGPR = value.payloadGPR();
1508 GPRReg resultPayloadGPR = resultPayload.gpr();
1509 GPRTemporary structure;
1510 GPRReg structureGPR = InvalidGPRReg;
1512 bool masqueradesAsUndefinedWatchpointValid =
1513 masqueradesAsUndefinedWatchpointIsStillValid();
1515 if (!masqueradesAsUndefinedWatchpointValid) {
1516 // The masquerades as undefined case will use the structure register, so allocate it here.
1517 // Do this at the top of the function to avoid branching around a register allocation.
1518 GPRTemporary realStructure(this);
1519 structure.adopt(realStructure);
1520 structureGPR = structure.gpr();
1523 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
1524 if (masqueradesAsUndefinedWatchpointValid) {
1526 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1527 m_jit.branchIfNotObject(valuePayloadGPR));
1530 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1531 m_jit.branchIfNotObject(valuePayloadGPR));
1533 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1535 MacroAssembler::Zero,
1536 MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
1537 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1539 m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR);
1540 speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
1542 MacroAssembler::Equal,
1543 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1544 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1546 isNotMasqueradesAsUndefined.link(&m_jit);
1548 m_jit.move(TrustedImm32(0), resultPayloadGPR);
1549 MacroAssembler::Jump done = m_jit.jump();
1551 notCell.link(&m_jit);
1553 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
1554 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1555 m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR);
1557 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
1559 MacroAssembler::NotEqual,
1561 TrustedImm32(JSValue::NullTag)));
1563 m_jit.move(TrustedImm32(1), resultPayloadGPR);
1567 booleanResult(resultPayloadGPR, m_currentNode);
1570 void SpeculativeJIT::compileLogicalNot(Node* node)
1572 switch (node->child1().useKind()) {
1574 case KnownBooleanUse: {
1575 SpeculateBooleanOperand value(this, node->child1());
1576 GPRTemporary result(this, Reuse, value);
1577 m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
1578 booleanResult(result.gpr(), node);
1582 case ObjectOrOtherUse: {
1583 compileObjectOrOtherLogicalNot(node->child1());
1588 SpeculateInt32Operand value(this, node->child1());
1589 GPRTemporary resultPayload(this, Reuse, value);
1590 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr());
1591 booleanResult(resultPayload.gpr(), node);
1595 case DoubleRepUse: {
1596 SpeculateDoubleOperand value(this, node->child1());
1597 FPRTemporary scratch(this);
1598 GPRTemporary resultPayload(this);
1599 m_jit.move(TrustedImm32(0), resultPayload.gpr());
1600 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1601 m_jit.move(TrustedImm32(1), resultPayload.gpr());
1602 nonZero.link(&m_jit);
1603 booleanResult(resultPayload.gpr(), node);
1608 JSValueOperand arg1(this, node->child1());
1609 GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
1610 GPRReg arg1TagGPR = arg1.tagGPR();
1611 GPRReg arg1PayloadGPR = arg1.payloadGPR();
1612 GPRReg resultPayloadGPR = resultPayload.gpr();
1616 JITCompiler::Jump slowCase = m_jit.branch32(JITCompiler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
1618 m_jit.move(arg1PayloadGPR, resultPayloadGPR);
1620 addSlowPathGenerator(
1622 slowCase, this, operationConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR,
1623 arg1PayloadGPR, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1625 m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
1626 booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
1630 return compileStringZeroLength(node);
1633 RELEASE_ASSERT_NOT_REACHED();
1638 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
1640 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1641 GPRTemporary scratch(this);
1642 GPRReg valueTagGPR = value.tagGPR();
1643 GPRReg valuePayloadGPR = value.payloadGPR();
1644 GPRReg scratchGPR = scratch.gpr();
1646 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
1647 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1649 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1650 m_jit.branchIfNotObject(valuePayloadGPR));
1653 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
1654 m_jit.branchIfNotObject(valuePayloadGPR));
1656 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
1658 MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
1659 TrustedImm32(MasqueradesAsUndefined));
1661 m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR);
1662 speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
1664 MacroAssembler::Equal,
1665 MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
1666 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1668 isNotMasqueradesAsUndefined.link(&m_jit);
1670 jump(taken, ForceJump);
1672 notCell.link(&m_jit);
1674 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
1675 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1676 m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR);
1678 JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
1679 m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
1684 noResult(m_currentNode);
1687 void SpeculativeJIT::emitBranch(Node* node)
1689 BasicBlock* taken = node->branchData()->taken.block;
1690 BasicBlock* notTaken = node->branchData()->notTaken.block;
1692 switch (node->child1().useKind()) {
1694 case KnownBooleanUse: {
1695 SpeculateBooleanOperand value(this, node->child1());
1696 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
1698 if (taken == nextBlock()) {
1699 condition = MacroAssembler::Zero;
1700 BasicBlock* tmp = taken;
1705 branchTest32(condition, value.gpr(), TrustedImm32(1), taken);
1712 case ObjectOrOtherUse: {
1713 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
1718 emitStringBranch(node->child1(), taken, notTaken);
1724 if (node->child1().useKind() == Int32Use) {
1725 bool invert = false;
1727 if (taken == nextBlock()) {
1729 BasicBlock* tmp = taken;
1734 SpeculateInt32Operand value(this, node->child1());
1735 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
1737 SpeculateDoubleOperand value(this, node->child1());
1738 FPRTemporary scratch(this);
1739 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
1749 JSValueOperand value(this, node->child1());
1751 GPRReg valueTagGPR = value.tagGPR();
1752 GPRReg valuePayloadGPR = value.payloadGPR();
1754 GPRTemporary result(this);
1755 GPRReg resultGPR = result.gpr();
1757 use(node->child1());
1759 JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag));
1760 JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag));
1762 fastPath.link(&m_jit);
1763 branchTest32(JITCompiler::Zero, valuePayloadGPR, notTaken);
1764 jump(taken, ForceJump);
1766 slowPath.link(&m_jit);
1767 silentSpillAllRegisters(resultGPR);
1768 callOperation(operationConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR);
1769 silentFillAllRegisters(resultGPR);
1771 branchTest32(JITCompiler::NonZero, resultGPR, taken);
1774 noResult(node, UseChildrenCalledExplicitly);
1779 RELEASE_ASSERT_NOT_REACHED();
1784 template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
1785 void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag)
1787 Edge child4 = m_jit.graph().varArgChild(node, 3);
1789 ArrayMode arrayMode = node->arrayMode();
1791 GPRReg baseReg = base.gpr();
1792 GPRReg propertyReg = property.gpr();
1794 StorageOperand storage(this, child4);
1795 GPRReg storageReg = storage.gpr();
1797 if (node->op() == PutByValAlias) {
1798 // Store the value to the array.
1799 GPRReg propertyReg = property.gpr();
1800 m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
1801 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
1807 MacroAssembler::Jump slowCase;
1809 if (arrayMode.isInBounds()) {
1811 OutOfBounds, JSValueRegs(), 0,
1812 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1814 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1816 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1818 if (!arrayMode.isOutOfBounds())
1819 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1821 m_jit.add32(TrustedImm32(1), propertyReg);
1822 m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1823 m_jit.sub32(TrustedImm32(1), propertyReg);
1825 inBounds.link(&m_jit);
1828 m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
1829 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
1836 if (arrayMode.isOutOfBounds()) {
1837 if (node->op() == PutByValDirect) {
1838 addSlowPathGenerator(slowPathCall(
1840 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
1841 NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
1843 addSlowPathGenerator(slowPathCall(
1845 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
1846 NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
1850 noResult(node, UseChildrenCalledExplicitly);
1853 void SpeculativeJIT::compile(Node* node)
1855 NodeType op = node->op();
1857 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1858 m_jit.clearRegisterAllocationOffsets();
1863 case DoubleConstant:
1864 case PhantomDirectArguments:
1865 case PhantomClonedArguments:
1866 initConstantInfo(node);
1870 speculate(node, node->child1());
1871 switch (node->child1().useKind()) {
1873 case DoubleRepRealUse: {
1874 SpeculateDoubleOperand op(this, node->child1());
1875 doubleResult(op.fpr(), node);
1880 case DoubleRepMachineIntUse: {
1881 RELEASE_ASSERT_NOT_REACHED();
1885 JSValueOperand op(this, node->child1());
1886 jsValueResult(op.tagGPR(), op.payloadGPR(), node);
1894 AbstractValue& value = m_state.variables().operand(node->local());
1896 // If the CFA is tracking this variable and it found that the variable
1897 // cannot have been assigned, then don't attempt to proceed.
1898 if (value.isClear()) {
1899 m_compileOkay = false;
1903 switch (node->variableAccessData()->flushFormat()) {
1904 case FlushedDouble: {
1905 FPRTemporary result(this);
1906 m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
1907 VirtualRegister virtualRegister = node->virtualRegister();
1908 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
1909 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
1913 case FlushedInt32: {
1914 GPRTemporary result(this);
1915 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
1917 // Like int32Result, but don't useChildren - our children are phi nodes,
1918 // and don't represent values within this dataflow with virtual registers.
1919 VirtualRegister virtualRegister = node->virtualRegister();
1920 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
1921 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
1926 GPRTemporary result(this);
1927 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
1929 // Like cellResult, but don't useChildren - our children are phi nodes,
1930 // and don't represent values within this dataflow with virtual registers.
1931 VirtualRegister virtualRegister = node->virtualRegister();
1932 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell);
1933 generationInfoFromVirtualRegister(virtualRegister).initCell(node, node->refCount(), result.gpr());
1937 case FlushedBoolean: {
1938 GPRTemporary result(this);
1939 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
1941 // Like booleanResult, but don't useChildren - our children are phi nodes,
1942 // and don't represent values within this dataflow with virtual registers.
1943 VirtualRegister virtualRegister = node->virtualRegister();
1944 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean);
1945 generationInfoFromVirtualRegister(virtualRegister).initBoolean(node, node->refCount(), result.gpr());
1949 case FlushedJSValue: {
1950 GPRTemporary result(this);
1951 GPRTemporary tag(this);
1952 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
1953 m_jit.load32(JITCompiler::tagFor(node->machineLocal()), tag.gpr());
1955 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1956 // and don't represent values within this dataflow with virtual registers.
1957 VirtualRegister virtualRegister = node->virtualRegister();
1958 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
1959 m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
1961 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS);
1966 RELEASE_ASSERT_NOT_REACHED();
1971 case GetLocalUnlinked: {
1972 GPRTemporary payload(this);
1973 GPRTemporary tag(this);
1974 m_jit.load32(JITCompiler::payloadFor(node->unlinkedMachineLocal()), payload.gpr());
1975 m_jit.load32(JITCompiler::tagFor(node->unlinkedMachineLocal()), tag.gpr());
1976 jsValueResult(tag.gpr(), payload.gpr(), node);
1981 compileMovHint(m_currentNode);
1987 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1998 switch (node->variableAccessData()->flushFormat()) {
1999 case FlushedDouble: {
2000 SpeculateDoubleOperand value(this, node->child1());
2001 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
2003 // Indicate that it's no longer necessary to retrieve the value of
2004 // this bytecode variable from registers or other locations in the stack,
2005 // but that it is stored as a double.
2006 recordSetLocal(DataFormatDouble);
2010 case FlushedInt32: {
2011 SpeculateInt32Operand value(this, node->child1());
2012 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2014 recordSetLocal(DataFormatInt32);
2019 SpeculateCellOperand cell(this, node->child1());
2020 GPRReg cellGPR = cell.gpr();
2021 m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->machineLocal()));
2023 recordSetLocal(DataFormatCell);
2027 case FlushedBoolean: {
2028 SpeculateBooleanOperand value(this, node->child1());
2029 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2031 recordSetLocal(DataFormatBoolean);
2035 case FlushedJSValue: {
2036 JSValueOperand value(this, node->child1());
2037 m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal()));
2038 m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal()));
2040 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2045 RELEASE_ASSERT_NOT_REACHED();
2052 // This is a no-op; it just marks the fact that the argument is being used.
2053 // But it may be profitable to use this as a hook to run speculation checks
2054 // on arguments, thereby allowing us to trivially eliminate such checks if
2055 // the argument is not used.
2056 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2062 compileBitwiseOp(node);
2068 compileShiftOp(node);
2071 case UInt32ToNumber: {
2072 compileUInt32ToNumber(node);
2076 case DoubleAsInt32: {
2077 compileDoubleAsInt32(node);
2081 case ValueToInt32: {
2082 compileValueToInt32(node);
2087 compileDoubleRep(node);
2092 compileValueRep(node);
2097 compileValueAdd(node);
2101 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2102 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
2103 JSValueOperand op3(this, node->child3(), ManualOperandSpeculation);
2105 GPRReg op1TagGPR = op1.tagGPR();
2106 GPRReg op1PayloadGPR = op1.payloadGPR();
2107 GPRReg op2TagGPR = op2.tagGPR();
2108 GPRReg op2PayloadGPR = op2.payloadGPR();
2110 GPRReg op3PayloadGPR;
2111 if (node->child3()) {
2112 op3TagGPR = op3.tagGPR();
2113 op3PayloadGPR = op3.payloadGPR();
2115 op3TagGPR = InvalidGPRReg;
2116 op3PayloadGPR = InvalidGPRReg;
2121 GPRFlushedCallResult result(this);
2123 callOperation(operationStrCat3, result.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR, op3TagGPR, op3PayloadGPR);
2125 callOperation(operationStrCat2, result.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
2126 m_jit.exceptionCheck();
2128 cellResult(result.gpr(), node);
2133 compileArithAdd(node);
2137 compileArithClz32(node);
2141 compileMakeRope(node);
2145 compileArithSub(node);
2149 compileArithNegate(node);
2153 compileArithMul(node);
2157 compileArithDiv(node);
2162 compileArithMod(node);
2167 compileArithPow(node);
2172 switch (node->child1().useKind()) {
2174 SpeculateStrictInt32Operand op1(this, node->child1());
2175 GPRTemporary result(this, Reuse, op1);
2176 GPRTemporary scratch(this);
2178 m_jit.move(op1.gpr(), result.gpr());
2179 m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
2180 m_jit.add32(scratch.gpr(), result.gpr());
2181 m_jit.xor32(scratch.gpr(), result.gpr());
2182 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2183 int32Result(result.gpr(), node);
2188 case DoubleRepUse: {
2189 SpeculateDoubleOperand op1(this, node->child1());
2190 FPRTemporary result(this);
2192 m_jit.absDouble(op1.fpr(), result.fpr());
2193 doubleResult(result.fpr(), node);
2198 RELEASE_ASSERT_NOT_REACHED();
2206 switch (node->binaryUseKind()) {
2208 SpeculateStrictInt32Operand op1(this, node->child1());
2209 SpeculateStrictInt32Operand op2(this, node->child2());
2210 GPRTemporary result(this, Reuse, op1);
2212 GPRReg op1GPR = op1.gpr();
2213 GPRReg op2GPR = op2.gpr();
2214 GPRReg resultGPR = result.gpr();
2216 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR);
2217 m_jit.move(op2GPR, resultGPR);
2218 if (op1GPR != resultGPR) {
2219 MacroAssembler::Jump done = m_jit.jump();
2220 op1Less.link(&m_jit);
2221 m_jit.move(op1GPR, resultGPR);
2224 op1Less.link(&m_jit);
2226 int32Result(resultGPR, node);
2230 case DoubleRepUse: {
2231 SpeculateDoubleOperand op1(this, node->child1());
2232 SpeculateDoubleOperand op2(this, node->child2());
2233 FPRTemporary result(this, op1);
2235 FPRReg op1FPR = op1.fpr();
2236 FPRReg op2FPR = op2.fpr();
2237 FPRReg resultFPR = result.fpr();
2239 MacroAssembler::JumpList done;
2241 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2243 // op2 is eather the lesser one or one of then is NaN
2244 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2246 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2247 // op1 + op2 and putting it into result.
2248 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2249 done.append(m_jit.jump());
2251 op2Less.link(&m_jit);
2252 m_jit.moveDouble(op2FPR, resultFPR);
2254 if (op1FPR != resultFPR) {
2255 done.append(m_jit.jump());
2257 op1Less.link(&m_jit);
2258 m_jit.moveDouble(op1FPR, resultFPR);
2260 op1Less.link(&m_jit);
2264 doubleResult(resultFPR, node);
2269 RELEASE_ASSERT_NOT_REACHED();
2276 compileArithSqrt(node);
2280 SpeculateDoubleOperand op1(this, node->child1());
2281 FPRTemporary result(this, op1);
2283 m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
2284 m_jit.convertFloatToDouble(result.fpr(), result.fpr());
2286 doubleResult(result.fpr(), node);
2291 compileArithRandom(node);
2297 compileArithRounding(node);
2301 SpeculateDoubleOperand op1(this, node->child1());
2302 FPRReg op1FPR = op1.fpr();
2306 FPRResult result(this);
2307 callOperation(sin, result.fpr(), op1FPR);
2308 doubleResult(result.fpr(), node);
2313 SpeculateDoubleOperand op1(this, node->child1());
2314 FPRReg op1FPR = op1.fpr();
2318 FPRResult result(this);
2319 callOperation(cos, result.fpr(), op1FPR);
2320 doubleResult(result.fpr(), node);
2325 compileArithLog(node);
2329 compileLogicalNot(node);
2333 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2338 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2342 case CompareGreater:
2343 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2347 case CompareGreaterEq:
2348 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2353 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2357 case CompareStrictEq:
2358 if (compileStrictEq(node))
2362 case StringCharCodeAt: {
2363 compileGetCharCodeAt(node);
2367 case StringCharAt: {
2368 // Relies on StringCharAt node having same basic layout as GetByVal
2369 compileGetByValOnString(node);
2373 case StringFromCharCode: {
2374 compileFromCharCode(node);
2384 case ArrayifyToStructure: {
2390 switch (node->arrayMode().type()) {
2391 case Array::SelectUsingPredictions:
2392 case Array::ForceExit:
2393 RELEASE_ASSERT_NOT_REACHED();
2394 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2395 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2398 case Array::Undecided: {
2399 SpeculateStrictInt32Operand index(this, node->child2());
2400 GPRTemporary resultTag(this, Reuse, index);
2401 GPRTemporary resultPayload(this);
2403 GPRReg indexGPR = index.gpr();
2404 GPRReg resultTagGPR = resultTag.gpr();
2405 GPRReg resultPayloadGPR = resultPayload.gpr();
2407 use(node->child1());
2410 speculationCheck(OutOfBounds, JSValueRegs(), node,
2411 m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
2413 m_jit.move(MacroAssembler::TrustedImm32(JSValue::UndefinedTag), resultTagGPR);
2414 m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR);
2415 jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
2418 case Array::Generic: {
2419 SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right.
2420 JSValueOperand property(this, node->child2());
2421 GPRReg baseGPR = base.gpr();
2422 GPRReg propertyTagGPR = property.tagGPR();
2423 GPRReg propertyPayloadGPR = property.payloadGPR();
2426 GPRFlushedCallResult2 resultTag(this);
2427 GPRFlushedCallResult resultPayload(this);
2428 callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR);
2429 m_jit.exceptionCheck();
2431 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2435 case Array::Contiguous: {
2436 if (node->arrayMode().isInBounds()) {
2437 SpeculateStrictInt32Operand property(this, node->child2());
2438 StorageOperand storage(this, node->child3());
2440 GPRReg propertyReg = property.gpr();
2441 GPRReg storageReg = storage.gpr();
2446 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2448 GPRTemporary resultPayload(this);
2449 if (node->arrayMode().type() == Array::Int32) {
2450 ASSERT(!node->arrayMode().isSaneChain());
2453 OutOfBounds, JSValueRegs(), 0,
2455 MacroAssembler::Equal,
2456 MacroAssembler::BaseIndex(
2457 storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
2458 TrustedImm32(JSValue::EmptyValueTag)));
2460 MacroAssembler::BaseIndex(
2461 storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
2462 resultPayload.gpr());
2463 int32Result(resultPayload.gpr(), node);
2467 GPRTemporary resultTag(this);
2469 MacroAssembler::BaseIndex(
2470 storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
2473 MacroAssembler::BaseIndex(
2474 storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
2475 resultPayload.gpr());
2476 if (node->arrayMode().isSaneChain()) {
2477 JITCompiler::Jump notHole = m_jit.branch32(
2478 MacroAssembler::NotEqual, resultTag.gpr(),
2479 TrustedImm32(JSValue::EmptyValueTag));
2480 m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr());
2481 m_jit.move(TrustedImm32(0), resultPayload.gpr());
2482 notHole.link(&m_jit);
2485 LoadFromHole, JSValueRegs(), 0,
2487 MacroAssembler::Equal, resultTag.gpr(),
2488 TrustedImm32(JSValue::EmptyValueTag)));
2490 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2494 SpeculateCellOperand base(this, node->child1());
2495 SpeculateStrictInt32Operand property(this, node->child2());
2496 StorageOperand storage(this, node->child3());
2498 GPRReg baseReg = base.gpr();
2499 GPRReg propertyReg = property.gpr();
2500 GPRReg storageReg = storage.gpr();
2505 GPRTemporary resultTag(this);
2506 GPRTemporary resultPayload(this);
2507 GPRReg resultTagReg = resultTag.gpr();
2508 GPRReg resultPayloadReg = resultPayload.gpr();
2510 MacroAssembler::JumpList slowCases;
2512 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2514 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
2515 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
2516 slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag)));
2518 addSlowPathGenerator(
2520 slowCases, this, operationGetByValArrayInt,
2521 JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
2523 jsValueResult(resultTagReg, resultPayloadReg, node);
2526 case Array::Double: {
2527 if (node->arrayMode().isInBounds()) {
2528 SpeculateStrictInt32Operand property(this, node->child2());
2529 StorageOperand storage(this, node->child3());
2531 GPRReg propertyReg = property.gpr();
2532 GPRReg storageReg = storage.gpr();
2537 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2539 FPRTemporary result(this);
2540 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2541 if (!node->arrayMode().isSaneChain())
2542 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2543 doubleResult(result.fpr(), node);
2547 SpeculateCellOperand base(this, node->child1());
2548 SpeculateStrictInt32Operand property(this, node->child2());
2549 StorageOperand storage(this, node->child3());
2551 GPRReg baseReg = base.gpr();
2552 GPRReg propertyReg = property.gpr();
2553 GPRReg storageReg = storage.gpr();
2558 GPRTemporary resultTag(this);
2559 GPRTemporary resultPayload(this);
2560 FPRTemporary temp(this);
2561 GPRReg resultTagReg = resultTag.gpr();
2562 GPRReg resultPayloadReg = resultPayload.gpr();
2563 FPRReg tempReg = temp.fpr();
2565 MacroAssembler::JumpList slowCases;
2567 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2569 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2570 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2571 boxDouble(tempReg, resultTagReg, resultPayloadReg);
2573 addSlowPathGenerator(
2575 slowCases, this, operationGetByValArrayInt,
2576 JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
2578 jsValueResult(resultTagReg, resultPayloadReg, node);
2581 case Array::ArrayStorage:
2582 case Array::SlowPutArrayStorage: {
2583 if (node->arrayMode().isInBounds()) {
2584 SpeculateStrictInt32Operand property(this, node->child2());
2585 StorageOperand storage(this, node->child3());
2586 GPRReg propertyReg = property.gpr();
2587 GPRReg storageReg = storage.gpr();
2592 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2594 GPRTemporary resultTag(this);
2595 GPRTemporary resultPayload(this);
2597 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
2598 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
2599 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
2601 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2605 SpeculateCellOperand base(this, node->child1());
2606 SpeculateStrictInt32Operand property(this, node->child2());
2607 StorageOperand storage(this, node->child3());
2608 GPRReg propertyReg = property.gpr();
2609 GPRReg storageReg = storage.gpr();
2610 GPRReg baseReg = base.gpr();
2615 GPRTemporary resultTag(this);
2616 GPRTemporary resultPayload(this);
2617 GPRReg resultTagReg = resultTag.gpr();
2618 GPRReg resultPayloadReg = resultPayload.gpr();
2620 JITCompiler::Jump outOfBounds = m_jit.branch32(
2621 MacroAssembler::AboveOrEqual, propertyReg,
2622 MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2624 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
2625 JITCompiler::Jump hole = m_jit.branch32(
2626 MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag));
2627 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
2629 JITCompiler::JumpList slowCases;
2630 slowCases.append(outOfBounds);
2631 slowCases.append(hole);
2632 addSlowPathGenerator(
2634 slowCases, this, operationGetByValArrayInt,
2635 JSValueRegs(resultTagReg, resultPayloadReg),
2636 baseReg, propertyReg));
2638 jsValueResult(resultTagReg, resultPayloadReg, node);
2642 compileGetByValOnString(node);
2644 case Array::DirectArguments:
2645 compileGetByValOnDirectArguments(node);
2647 case Array::ScopedArguments:
2648 compileGetByValOnScopedArguments(node);
2651 TypedArrayType type = node->arrayMode().typedArrayType();
2653 compileGetByValOnIntTypedArray(node, type);
2655 compileGetByValOnFloatTypedArray(node, type);
2660 case PutByValDirect:
2662 case PutByValAlias: {
2663 Edge child1 = m_jit.graph().varArgChild(node, 0);
2664 Edge child2 = m_jit.graph().varArgChild(node, 1);
2665 Edge child3 = m_jit.graph().varArgChild(node, 2);
2666 Edge child4 = m_jit.graph().varArgChild(node, 3);
2668 ArrayMode arrayMode = node->arrayMode().modeForPut();
2669 bool alreadyHandled = false;
2671 switch (arrayMode.type()) {
2672 case Array::SelectUsingPredictions:
2673 case Array::ForceExit:
2674 RELEASE_ASSERT_NOT_REACHED();
2675 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2676 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
2677 alreadyHandled = true;
2680 case Array::Generic: {
2681 ASSERT(node->op() == PutByVal || node->op() == PutByValDirect);
2683 SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right.
2684 JSValueOperand property(this, child2);
2685 JSValueOperand value(this, child3);
2686 GPRReg baseGPR = base.gpr();
2687 GPRReg propertyTagGPR = property.tagGPR();
2688 GPRReg propertyPayloadGPR = property.payloadGPR();
2689 GPRReg valueTagGPR = value.tagGPR();
2690 GPRReg valuePayloadGPR = value.payloadGPR();
2693 if (node->op() == PutByValDirect)
2694 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
2696 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
2697 m_jit.exceptionCheck();
2700 alreadyHandled = true;
2710 SpeculateCellOperand base(this, child1);
2711 SpeculateStrictInt32Operand property(this, child2);
2713 GPRReg baseReg = base.gpr();
2714 GPRReg propertyReg = property.gpr();
2716 switch (arrayMode.type()) {
2717 case Array::Int32: {
2718 SpeculateInt32Operand value(this, child3);
2720 GPRReg valuePayloadReg = value.gpr();
2725 compileContiguousPutByVal(node, base, property, value, valuePayloadReg, TrustedImm32(JSValue::Int32Tag));
2728 case Array::Contiguous: {
2729 JSValueOperand value(this, child3);
2731 GPRReg valueTagReg = value.tagGPR();
2732 GPRReg valuePayloadReg = value.payloadGPR();
2737 compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg);
2740 case Array::Double: {
2741 compileDoublePutByVal(node, base, property);
2744 case Array::ArrayStorage:
2745 case Array::SlowPutArrayStorage: {
2746 JSValueOperand value(this, child3);
2748 GPRReg valueTagReg = value.tagGPR();
2749 GPRReg valuePayloadReg = value.payloadGPR();
2754 StorageOperand storage(this, child4);
2755 GPRReg storageReg = storage.gpr();
2757 if (node->op() == PutByValAlias) {
2758 // Store the value to the array.
2759 GPRReg propertyReg = property.gpr();
2760 m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2761 m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2767 MacroAssembler::JumpList slowCases;
2769 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2770 if (!arrayMode.isOutOfBounds())
2771 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
2773 slowCases.append(beyondArrayBounds);
2775 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2776 if (arrayMode.isInBounds()) {
2778 StoreToHole, JSValueRegs(), 0,
2779 m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)));
2781 MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
2782 if (arrayMode.isSlowPut()) {
2783 // This is sort of strange. If we wanted to optimize this code path, we would invert
2784 // the above branch. But it's simply not worth it since this only happens if we're
2785 // already having a bad time.
2786 slowCases.append(m_jit.jump());
2788 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
2790 // If we're writing to a hole we might be growing the array;
2791 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2792 m_jit.add32(TrustedImm32(1), propertyReg);
2793 m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2794 m_jit.sub32(TrustedImm32(1), propertyReg);
2796 lengthDoesNotNeedUpdate.link(&m_jit);
2798 notHoleValue.link(&m_jit);
2801 // Store the value to the array.
2802 m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2803 m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2810 if (!slowCases.empty()) {
2811 if (node->op() == PutByValDirect) {
2812 addSlowPathGenerator(slowPathCall(
2814 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
2815 NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg));
2817 addSlowPathGenerator(slowPathCall(
2819 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2820 NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg));
2824 noResult(node, UseChildrenCalledExplicitly);
2829 TypedArrayType type = arrayMode.typedArrayType();
2831 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
2833 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
2839 SpeculateCellOperand base(this, node->child1());
2840 SpeculateCellOperand argument(this, node->child2());
2841 GPRReg baseGPR = base.gpr();
2842 GPRReg argumentGPR = argument.gpr();
2845 GPRFlushedCallResult2 resultTag(this);
2846 GPRFlushedCallResult resultPayload(this);
2847 callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR);
2848 m_jit.exceptionCheck();
2850 jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
2855 SpeculateCellOperand base(this, node->child1());
2856 SpeculateCellOperand argument(this, node->child2());
2857 GPRReg baseGPR = base.gpr();
2858 GPRReg argumentGPR = argument.gpr();
2861 GPRFlushedCallResult result(this);
2862 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
2863 m_jit.exceptionCheck();
2865 // If we add a DataFormatBool, we should use it here.
2866 booleanResult(result.gpr(), node);
2871 ASSERT(node->arrayMode().isJSArray());
2873 SpeculateCellOperand base(this, node->child1());
2874 GPRTemporary storageLength(this);
2876 GPRReg baseGPR = base.gpr();
2877 GPRReg storageLengthGPR = storageLength.gpr();
2879 StorageOperand storage(this, node->child3());
2880 GPRReg storageGPR = storage.gpr();
2882 switch (node->arrayMode().type()) {
2883 case Array::Int32: {
2884 SpeculateInt32Operand value(this, node->child2());
2885 GPRReg valuePayloadGPR = value.gpr();
2887 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2888 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2889 m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2890 m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2891 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2892 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2893 m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
2895 addSlowPathGenerator(
2897 slowPath, this, operationArrayPush,
2898 JSValueRegs(storageGPR, storageLengthGPR),
2899 TrustedImm32(JSValue::Int32Tag), valuePayloadGPR, baseGPR));
2901 jsValueResult(storageGPR, storageLengthGPR, node);
2905 case Array::Contiguous: {
2906 JSValueOperand value(this, node->child2());
2907 GPRReg valueTagGPR = value.tagGPR();
2908 GPRReg valuePayloadGPR = value.payloadGPR();
2910 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2911 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2912 m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2913 m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2914 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2915 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2916 m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
2918 addSlowPathGenerator(
2920 slowPath, this, operationArrayPush,
2921 JSValueRegs(storageGPR, storageLengthGPR),
2922 valueTagGPR, valuePayloadGPR, baseGPR));
2924 jsValueResult(storageGPR, storageLengthGPR, node);
2928 case Array::Double: {
2929 SpeculateDoubleOperand value(this, node->child2());
2930 FPRReg valueFPR = value.fpr();
2933 JSValueRegs(), node->child2(), SpecDoubleReal,
2934 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
2936 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2937 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2938 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2939 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2940 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2941 m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
2943 addSlowPathGenerator(
2945 slowPath, this, operationArrayPushDouble,
2946 JSValueRegs(storageGPR, storageLengthGPR),
2947 valueFPR, baseGPR));
2949 jsValueResult(storageGPR, storageLengthGPR, node);
2953 case Array::ArrayStorage: {
2954 JSValueOperand value(this, node->child2());
2955 GPRReg valueTagGPR = value.tagGPR();
2956 GPRReg valuePayloadGPR = value.payloadGPR();
2958 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
2960 // Refuse to handle bizarre lengths.
2961 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
2963 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
2965 m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
2966 m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
2968 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2969 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
2970 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
2971 m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
2973 addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR));
2975 jsValueResult(storageGPR, storageLengthGPR, node);
2987 ASSERT(node->arrayMode().isJSArray());
2989 SpeculateCellOperand base(this, node->child1());
2990 StorageOperand storage(this, node->child2());
2991 GPRTemporary valueTag(this);
2992 GPRTemporary valuePayload(this);
2994 GPRReg baseGPR = base.gpr();
2995 GPRReg valueTagGPR = valueTag.gpr();
2996 GPRReg valuePayloadGPR = valuePayload.gpr();
2997 GPRReg storageGPR = storage.gpr();
2999 switch (node->arrayMode().type()) {
3001 case Array::Contiguous: {
3003 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR);
3004 MacroAssembler::Jump undefinedCase =
3005 m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR);
3006 m_jit.sub32(TrustedImm32(1), valuePayloadGPR);
3008 valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3010 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
3012 MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
3014 MacroAssembler::TrustedImm32(JSValue::EmptyValueTag),
3015 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3017 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
3020 addSlowPathGenerator(
3022 undefinedCase, this,
3023 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
3024 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
3025 addSlowPathGenerator(
3027 slowCase, this, operationArrayPopAndRecoverLength,
3028 JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
3030 jsValueResult(valueTagGPR, valuePayloadGPR, node);
3034 case Array::Double: {
3035 FPRTemporary temp(this);
3036 FPRReg tempFPR = temp.fpr();
3039 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR);
3040 MacroAssembler::Jump undefinedCase =
3041 m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR);
3042 m_jit.sub32(TrustedImm32(1), valuePayloadGPR);
3044 valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3046 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight),
3048 MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
3049 JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
3051 MacroAssembler::TrustedImm32(nan.u.asBits.tag),
3052 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3054 MacroAssembler::TrustedImm32(nan.u.asBits.payload),
3055 MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
3056 boxDouble(tempFPR, valueTagGPR, valuePayloadGPR);
3058 addSlowPathGenerator(
3060 undefinedCase, this,
3061 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
3062 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
3063 addSlowPathGenerator(
3065 slowCase, this, operationArrayPopAndRecoverLength,
3066 JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
3068 jsValueResult(valueTagGPR, valuePayloadGPR, node);
3072 case Array::ArrayStorage: {
3073 GPRTemporary storageLength(this);
3074 GPRReg storageLengthGPR = storageLength.gpr();
3076 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3078 JITCompiler::JumpList setUndefinedCases;
3079 setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR));
3081 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3083 MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
3085 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR);
3086 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR);
3088 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3090 setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR));
3092 m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3094 m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3096 addSlowPathGenerator(
3098 setUndefinedCases, this,
3099 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
3100 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
3102 addSlowPathGenerator(
3104 slowCase, this, operationArrayPop,
3105 JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
3107 jsValueResult(valueTagGPR, valuePayloadGPR, node);
3119 jump(node->targetBlock());
3133 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2);
3134 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
3135 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
3137 // Return the result in returnValueGPR.
3138 JSValueOperand op1(this, node->child1());
3141 boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
3143 if (op1.payloadGPR() == GPRInfo::returnValueGPR2 && op1.tagGPR() == GPRInfo::returnValueGPR)
3144 m_jit.swap(GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
3145 else if (op1.payloadGPR() == GPRInfo::returnValueGPR2) {
3146 m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
3147 m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
3149 m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
3150 m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
3154 m_jit.emitRestoreCalleeSaves();
3155 m_jit.emitFunctionEpilogue();
3163 case ThrowReferenceError: {
3164 // We expect that throw statements are rare and are intended to exit the code block
3165 // anyway, so we just OSR back to the old JIT for now.
3166 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
3170 case BooleanToNumber: {
3171 switch (node->child1().useKind()) {
3173 SpeculateBooleanOperand value(this, node->child1());
3174 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3176 m_jit.move(value.gpr(), result.gpr());
3178 int32Result(result.gpr(), node);
3183 JSValueOperand value(this, node->child1());
3185 if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
3186 GPRTemporary result(this);
3188 GPRReg valueGPR = value.payloadGPR();
3189 GPRReg resultGPR = result.gpr();
3191 m_jit.move(valueGPR, resultGPR);
3192 int32Result(result.gpr(), node);
3196 GPRTemporary resultTag(this);
3197 GPRTemporary resultPayload(this);
3199 GPRReg valueTagGPR = value.tagGPR();
3200 GPRReg valuePayloadGPR = value.payloadGPR();
3201 GPRReg resultTagGPR = resultTag.gpr();
3202 GPRReg resultPayloadGPR = resultPayload.gpr();
3204 m_jit.move(valuePayloadGPR, resultPayloadGPR);
3205 JITCompiler::Jump isBoolean = m_jit.branch32(
3206 JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::BooleanTag));
3207 m_jit.move(valueTagGPR, resultTagGPR);
3208 JITCompiler::Jump done = m_jit.jump();
3209 isBoolean.link(&m_jit);
3210 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
3213 jsValueResult(resultTagGPR, resultPayloadGPR, node);
3218 RELEASE_ASSERT_NOT_REACHED();
3225 RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3226 JSValueOperand op1(this, node->child1());
3227 GPRTemporary resultTag(this, Reuse, op1, TagWord);
3228 GPRTemporary resultPayload(this, Reuse, op1, PayloadWord);
3230 GPRReg op1TagGPR = op1.tagGPR();
3231 GPRReg op1PayloadGPR = op1.payloadGPR();
3232 GPRReg resultTagGPR = resultTag.gpr();
3233 GPRReg resultPayloadGPR = resultPayload.gpr();
3237 MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(op1.jsValueRegs());
3238 MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1PayloadGPR);
3240 alreadyPrimitive.link(&m_jit);
3241 m_jit.move(op1TagGPR, resultTagGPR);
3242 m_jit.move(op1PayloadGPR, resultPayloadGPR);
3244 addSlowPathGenerator(
3246 notPrimitive, this, operationToPrimitive,
3247 JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR));
3249 jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
3254 case CallStringConstructor: {
3255 if (node->child1().useKind() == UntypedUse) {
3256 JSValueOperand op1(this, node->child1());
3257 GPRReg op1PayloadGPR = op1.payloadGPR();
3258 GPRReg op1TagGPR = op1.tagGPR();
3260 GPRFlushedCallResult result(this);
3261 GPRReg resultGPR = result.gpr();
3265 JITCompiler::Jump done;
3266 if (node->child1()->prediction() & SpecString) {
3267 JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs());
3268 JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR);
3269 m_jit.move(op1PayloadGPR, resultGPR);
3270 done = m_jit.jump();
3271 slowPath1.link(&m_jit);
3272 slowPath2.link(&m_jit);
3275 callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR);
3277 ASSERT(op == CallStringConstructor);
3278 callOperation(operationCallStringConstructor, resultGPR, op1TagGPR, op1PayloadGPR);
3280 m_jit.exceptionCheck();
3283 cellResult(resultGPR, node);
3287 compileToStringOrCallStringConstructorOnCell(node);
3291 case NewStringObject: {
3292 compileNewStringObject(node);
3297 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3298 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
3299 Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
3300 ASSERT(structure->indexingType() == node->indexingType());
3302 hasUndecided(structure->indexingType())
3303 || hasInt32(structure->indexingType())
3304 || hasDouble(structure->indexingType())
3305 || hasContiguous(structure->indexingType()));
3307 unsigned numElements = node->numChildren();
3309 GPRTemporary result(this);
3310 GPRTemporary storage(this);
3312 GPRReg resultGPR = result.gpr();
3313 GPRReg storageGPR = storage.gpr();
3315 emitAllocateJSArray(resultGPR, structure, storageGPR, numElements);
3317 // At this point, one way or another, resultGPR and storageGPR have pointers to
3318 // the JSArray and the Butterfly, respectively.
3320 ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren());
3322 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
3323 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
3324 switch (node->indexingType()) {
3325 case ALL_BLANK_INDEXING_TYPES:
3326 case ALL_UNDECIDED_INDEXING_TYPES:
3329 case ALL_DOUBLE_INDEXING_TYPES: {
3330 SpeculateDoubleOperand operand(this, use);
3331 FPRReg opFPR = operand.fpr();
3333 JSValueRegs(), use, SpecDoubleReal,
3334 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
3336 m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
3339 case ALL_INT32_INDEXING_TYPES: {
3340 SpeculateInt32Operand operand(this, use);
3341 m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3342 m_jit.store32(operand.gpr(), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
3345 case ALL_CONTIGUOUS_INDEXING_TYPES: {
3346 JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]);
3347 GPRReg opTagGPR = operand.tagGPR();
3348 GPRReg opPayloadGPR = operand.payloadGPR();
3349 m_jit.store32(opTagGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3350 m_jit.store32(opPayloadGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
3359 // Yuck, we should *really* have a way of also returning the storageGPR. But
3360 // that's the least of what's wrong with this code. We really shouldn't be
3361 // allocating the array after having computed - and probably spilled to the
3362 // stack - all of the things that will go into the array. The solution to that
3363 // bigger problem will also likely fix the redundancy in reloading the storage
3364 // pointer that we currently have.
3366 cellResult(resultGPR, node);
3370 if (!node->numChildren()) {
3372 GPRFlushedCallResult result(this);
3374 operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
3375 m_jit.exceptionCheck();
3376 cellResult(result.gpr(), node);
3380 size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();
3381 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(scratchSize);
3382 EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
3384 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
3385 // Need to perform the speculations that this node promises to perform. If we're
3386 // emitting code here and the indexing type is not array storage then there is
3387 // probably something hilarious going on and we're already failing at all the
3388 // things, but at least we're going to be sound.
3389 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
3390 switch (node->indexingType()) {
3391 case ALL_BLANK_INDEXING_TYPES:
3392 case ALL_UNDECIDED_INDEXING_TYPES:
3395 case ALL_DOUBLE_INDEXING_TYPES: {
3396 SpeculateDoubleOperand operand(this, use);
3397 FPRReg opFPR = operand.fpr();
3399 JSValueRegs(), use, SpecFullRealNumber,
3400 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
3402 m_jit.storeDouble(opFPR, TrustedImmPtr(reinterpret_cast<char*>(buffer + operandIdx)));
3405 case ALL_INT32_INDEXING_TYPES: {
3406 SpeculateInt32Operand operand(this, use);
3407 GPRReg opGPR = operand.gpr();
3408 m_jit.store32(TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
3409 m_jit.store32(opGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
3412 case ALL_CONTIGUOUS_INDEXING_TYPES:
3413 case ALL_ARRAY_STORAGE_INDEXING_TYPES: {
3414 JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]);
3415 GPRReg opTagGPR = operand.tagGPR();
3416 GPRReg opPayloadGPR = operand.payloadGPR();
3418 m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
3419 m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
3429 switch (node->indexingType()) {
3430 case ALL_DOUBLE_INDEXING_TYPES:
3431 case ALL_INT32_INDEXING_TYPES:
3441 GPRTemporary scratch(this);
3443 // Tell GC mark phase how much of the scratch buffer is active during call.
3444 m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
3445 m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
3448 GPRFlushedCallResult result(this);
3451 operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
3452 static_cast<void*>(buffer), node->numChildren());
3453 m_jit.exceptionCheck();
3456 GPRTemporary scratch(this);
3458 m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
3459 m_jit.storePtr(TrustedImmPtr(0), scratch.gpr());
3462 cellResult(result.gpr(), node, UseChildrenCalledExplicitly);
3466 case NewArrayWithSize: {
3467 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3468 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
3469 SpeculateStrictInt32Operand size(this, node->child1());
3470 GPRTemporary result(this);
3471 GPRTemporary storage(this);
3472 GPRTemporary scratch(this);
3473 GPRTemporary scratch2(this);
3475 GPRReg sizeGPR = size.gpr();
3476 GPRReg resultGPR = result.gpr();
3477 GPRReg storageGPR = storage.gpr();
3478 GPRReg scratchGPR = scratch.gpr();
3479 GPRReg scratch2GPR = scratch2.gpr();
3481 MacroAssembler::JumpList slowCases;
3482 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
3484 ASSERT((1 << 3) == sizeof(JSValue));
3485 m_jit.move(sizeGPR, scratchGPR);
3486 m_jit.lshift32(TrustedImm32(3), scratchGPR);
3487 m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR);
3489 emitAllocateBasicStorage(resultGPR, storageGPR));
3490 m_jit.subPtr(scratchGPR, storageGPR);
3491 Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
3492 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
3494 m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3495 m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3497 if (hasDouble(node->indexingType())) {
3498 JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
3500 m_jit.move(sizeGPR, scratchGPR);
3501 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
3502 MacroAssembler::Label loop = m_jit.label();
3503 m_jit.sub32(TrustedImm32(1), scratchGPR);
3504 m_jit.store32(TrustedImm32(nan.u.asBits.tag), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
3505 m_jit.store32(TrustedImm32(nan.u.asBits.payload), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
3506 m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
3510 addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
3511 slowCases, this, operationNewArrayWithSize, resultGPR,
3512 globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
3513 globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage),
3516 cellResult(resultGPR, node);
3520 SpeculateStrictInt32Operand size(this, node->child1());
3521 GPRReg sizeGPR = size.gpr();
3523 GPRFlushedCallResult result(this);
3524 GPRReg resultGPR = result.gpr();
3525 GPRReg structureGPR = selectScratchGPR(sizeGPR);
3526 MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
3527 m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR);
3528 MacroAssembler::Jump done = m_jit.jump();
3529 bigLength.link(&m_jit);
3530 m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR);
3533 operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR);
3534 m_jit.exceptionCheck();
3535 cellResult(resultGPR, node);
3539 case NewArrayBuffer: {
3540 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3541 IndexingType indexingType = node->indexingType();
3542 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) {
3543 unsigned numElements = node->numConstants();
3545 GPRTemporary result(this);
3546 GPRTemporary storage(this);
3548 GPRReg resultGPR = result.gpr();
3549 GPRReg storageGPR = storage.gpr();
3551 emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements);
3553 if (node->indexingType() == ArrayWithDouble) {
3554 JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant());
3555 for (unsigned index = 0; index < node->numConstants(); ++index) {
3560 u.value = data[index].asNumber();
3561 m_jit.store32(Imm32(u.halves[0]), MacroAssembler::Address(storageGPR, sizeof(double) * index));
3562 m_jit.store32(Imm32(u.halves[1]), MacroAssembler::Address(storageGPR, sizeof(double) * index + sizeof(int32_t)));
3565 int32_t* data = bitwise_cast<int32_t*>(m_jit.codeBlock()->constantBuffer(node->startConstant()));
3566 for (unsigned index = 0; index < node->numConstants() * 2; ++index) {
3568 Imm32(data[index]), MacroAssembler::Address(storageGPR, sizeof(int32_t) * index));
3572 cellResult(resultGPR, node);
3577 GPRFlushedCallResult result(this);
3579 callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants());
3580 m_jit.exceptionCheck();
3582 cellResult(result.gpr(), node);
3586 case NewTypedArray: {
3587 switch (node->child1().useKind()) {
3589 compileNewTypedArray(node);