[JSC] Make the NegZero backward propagated flags of ArithMod stricter
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "DirectArguments.h"
40 #include "JSCInlines.h"
41 #include "JSEnvironmentRecord.h"
42 #include "JSLexicalEnvironment.h"
43 #include "LinkBuffer.h"
44 #include "ScopedArguments.h"
45 #include "ScratchRegisterAllocator.h"
46 #include "WriteBarrierBuffer.h"
47 #include <wtf/MathExtras.h>
48
49 namespace JSC { namespace DFG {
50
51 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
52     : m_compileOkay(true)
53     , m_jit(jit)
54     , m_currentNode(0)
55     , m_lastGeneratedNode(LastNodeType)
56     , m_indexInBlock(0)
57     , m_generationInfo(m_jit.graph().frameRegisterCount())
58     , m_state(m_jit.graph())
59     , m_interpreter(m_jit.graph(), m_state)
60     , m_stream(&jit.jitCode()->variableEventStream)
61     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
62     , m_isCheckingArgumentTypes(false)
63 {
64 }
65
66 SpeculativeJIT::~SpeculativeJIT()
67 {
68 }
69
70 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
71 {
72     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
73     
74     GPRTemporary scratch(this);
75     GPRTemporary scratch2(this);
76     GPRReg scratchGPR = scratch.gpr();
77     GPRReg scratch2GPR = scratch2.gpr();
78     
79     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
80     
81     JITCompiler::JumpList slowCases;
82     
83     slowCases.append(
84         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
85     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
86     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
87     
88     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
89     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
90     
91     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
92 #if USE(JSVALUE64)
93         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
94         for (unsigned i = numElements; i < vectorLength; ++i)
95             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
96 #else
97         EncodedValueDescriptor value;
98         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
99         for (unsigned i = numElements; i < vectorLength; ++i) {
100             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
101             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
102         }
103 #endif
104     }
105     
106     // I want a slow path that also loads out the storage pointer, and that's
107     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
108     // of work for a very small piece of functionality. :-/
109     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
110         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
111         structure, numElements));
112 }
113
114 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
115 {
116     if (inlineCallFrame && !inlineCallFrame->isVarargs())
117         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
118     else {
119         VirtualRegister argumentCountRegister;
120         if (!inlineCallFrame)
121             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
122         else
123             argumentCountRegister = inlineCallFrame->argumentCountRegister;
124         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
125         if (!includeThis)
126             m_jit.sub32(TrustedImm32(1), lengthGPR);
127     }
128 }
129
130 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
131 {
132     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
133 }
134
135 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
136 {
137     if (origin.inlineCallFrame) {
138         if (origin.inlineCallFrame->isClosureCall) {
139             m_jit.loadPtr(
140                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
141                 calleeGPR);
142         } else {
143             m_jit.move(
144                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
145                 calleeGPR);
146         }
147     } else
148         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
149 }
150
151 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
152 {
153     m_jit.addPtr(
154         TrustedImm32(
155             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
156         GPRInfo::callFrameRegister, startGPR);
157 }
158
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
160 {
161     if (!m_compileOkay)
162         return;
163     ASSERT(m_isCheckingArgumentTypes || m_canExit);
164     m_jit.appendExitInfo(jumpToFail);
165     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
166 }
167
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
169 {
170     if (!m_compileOkay)
171         return;
172     ASSERT(m_isCheckingArgumentTypes || m_canExit);
173     m_jit.appendExitInfo(jumpsToFail);
174     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
175 }
176
177 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
178 {
179     if (!m_compileOkay)
180         return OSRExitJumpPlaceholder();
181     ASSERT(m_isCheckingArgumentTypes || m_canExit);
182     unsigned index = m_jit.jitCode()->osrExit.size();
183     m_jit.appendExitInfo();
184     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
185     return OSRExitJumpPlaceholder(index);
186 }
187
188 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
189 {
190     ASSERT(m_isCheckingArgumentTypes || m_canExit);
191     return speculationCheck(kind, jsValueSource, nodeUse.node());
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
195 {
196     ASSERT(m_isCheckingArgumentTypes || m_canExit);
197     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
198 }
199
200 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
201 {
202     ASSERT(m_isCheckingArgumentTypes || m_canExit);
203     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
204 }
205
206 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
207 {
208     if (!m_compileOkay)
209         return;
210     ASSERT(m_isCheckingArgumentTypes || m_canExit);
211     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
212     m_jit.appendExitInfo(jumpToFail);
213     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
214 }
215
216 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
217 {
218     ASSERT(m_isCheckingArgumentTypes || m_canExit);
219     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
220 }
221
222 void SpeculativeJIT::emitInvalidationPoint(Node* node)
223 {
224     if (!m_compileOkay)
225         return;
226     ASSERT(m_canExit);
227     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
228     m_jit.jitCode()->appendOSRExit(OSRExit(
229         UncountableInvalidation, JSValueSource(),
230         m_jit.graph().methodOfGettingAValueProfileFor(node),
231         this, m_stream->size()));
232     info.m_replacementSource = m_jit.watchpointLabel();
233     ASSERT(info.m_replacementSource.isSet());
234     noResult(node);
235 }
236
237 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
238 {
239     ASSERT(m_isCheckingArgumentTypes || m_canExit);
240     if (!m_compileOkay)
241         return;
242     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
243     m_compileOkay = false;
244     if (verboseCompilationEnabled())
245         dataLog("Bailing compilation.\n");
246 }
247
248 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
249 {
250     ASSERT(m_isCheckingArgumentTypes || m_canExit);
251     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
252 }
253
254 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
255 {
256     ASSERT(needsTypeCheck(edge, typesPassedThrough));
257     m_interpreter.filter(edge, typesPassedThrough);
258     speculationCheck(BadType, source, edge.node(), jumpToFail);
259 }
260
261 RegisterSet SpeculativeJIT::usedRegisters()
262 {
263     RegisterSet result;
264     
265     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
266         GPRReg gpr = GPRInfo::toRegister(i);
267         if (m_gprs.isInUse(gpr))
268             result.set(gpr);
269     }
270     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
271         FPRReg fpr = FPRInfo::toRegister(i);
272         if (m_fprs.isInUse(fpr))
273             result.set(fpr);
274     }
275     
276     result.merge(RegisterSet::specialRegisters());
277     
278     return result;
279 }
280
281 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
282 {
283     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
284 }
285
286 void SpeculativeJIT::runSlowPathGenerators()
287 {
288     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
289         m_slowPathGenerators[i]->generate(this);
290 }
291
292 // On Windows we need to wrap fmod; on other platforms we can call it directly.
293 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
294 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
295 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
296 {
297     return fmod(x, y);
298 }
299 #else
300 #define fmodAsDFGOperation fmod
301 #endif
302
303 void SpeculativeJIT::clearGenerationInfo()
304 {
305     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
306         m_generationInfo[i] = GenerationInfo();
307     m_gprs = RegisterBank<GPRInfo>();
308     m_fprs = RegisterBank<FPRInfo>();
309 }
310
311 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
312 {
313     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
314     Node* node = info.node();
315     DataFormat registerFormat = info.registerFormat();
316     ASSERT(registerFormat != DataFormatNone);
317     ASSERT(registerFormat != DataFormatDouble);
318         
319     SilentSpillAction spillAction;
320     SilentFillAction fillAction;
321         
322     if (!info.needsSpill())
323         spillAction = DoNothingForSpill;
324     else {
325 #if USE(JSVALUE64)
326         ASSERT(info.gpr() == source);
327         if (registerFormat == DataFormatInt32)
328             spillAction = Store32Payload;
329         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
330             spillAction = StorePtr;
331         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
332             spillAction = Store64;
333         else {
334             ASSERT(registerFormat & DataFormatJS);
335             spillAction = Store64;
336         }
337 #elif USE(JSVALUE32_64)
338         if (registerFormat & DataFormatJS) {
339             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
340             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
341         } else {
342             ASSERT(info.gpr() == source);
343             spillAction = Store32Payload;
344         }
345 #endif
346     }
347         
348     if (registerFormat == DataFormatInt32) {
349         ASSERT(info.gpr() == source);
350         ASSERT(isJSInt32(info.registerFormat()));
351         if (node->hasConstant()) {
352             ASSERT(node->isInt32Constant());
353             fillAction = SetInt32Constant;
354         } else
355             fillAction = Load32Payload;
356     } else if (registerFormat == DataFormatBoolean) {
357 #if USE(JSVALUE64)
358         RELEASE_ASSERT_NOT_REACHED();
359 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
360         fillAction = DoNothingForFill;
361 #endif
362 #elif USE(JSVALUE32_64)
363         ASSERT(info.gpr() == source);
364         if (node->hasConstant()) {
365             ASSERT(node->isBooleanConstant());
366             fillAction = SetBooleanConstant;
367         } else
368             fillAction = Load32Payload;
369 #endif
370     } else if (registerFormat == DataFormatCell) {
371         ASSERT(info.gpr() == source);
372         if (node->hasConstant()) {
373             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
374             node->asCell(); // To get the assertion.
375             fillAction = SetCellConstant;
376         } else {
377 #if USE(JSVALUE64)
378             fillAction = LoadPtr;
379 #else
380             fillAction = Load32Payload;
381 #endif
382         }
383     } else if (registerFormat == DataFormatStorage) {
384         ASSERT(info.gpr() == source);
385         fillAction = LoadPtr;
386     } else if (registerFormat == DataFormatInt52) {
387         if (node->hasConstant())
388             fillAction = SetInt52Constant;
389         else if (info.spillFormat() == DataFormatInt52)
390             fillAction = Load64;
391         else if (info.spillFormat() == DataFormatStrictInt52)
392             fillAction = Load64ShiftInt52Left;
393         else if (info.spillFormat() == DataFormatNone)
394             fillAction = Load64;
395         else {
396             RELEASE_ASSERT_NOT_REACHED();
397 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
398             fillAction = Load64; // Make GCC happy.
399 #endif
400         }
401     } else if (registerFormat == DataFormatStrictInt52) {
402         if (node->hasConstant())
403             fillAction = SetStrictInt52Constant;
404         else if (info.spillFormat() == DataFormatInt52)
405             fillAction = Load64ShiftInt52Right;
406         else if (info.spillFormat() == DataFormatStrictInt52)
407             fillAction = Load64;
408         else if (info.spillFormat() == DataFormatNone)
409             fillAction = Load64;
410         else {
411             RELEASE_ASSERT_NOT_REACHED();
412 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
413             fillAction = Load64; // Make GCC happy.
414 #endif
415         }
416     } else {
417         ASSERT(registerFormat & DataFormatJS);
418 #if USE(JSVALUE64)
419         ASSERT(info.gpr() == source);
420         if (node->hasConstant()) {
421             if (node->isCellConstant())
422                 fillAction = SetTrustedJSConstant;
423             else
424                 fillAction = SetJSConstant;
425         } else if (info.spillFormat() == DataFormatInt32) {
426             ASSERT(registerFormat == DataFormatJSInt32);
427             fillAction = Load32PayloadBoxInt;
428         } else
429             fillAction = Load64;
430 #else
431         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
432         if (node->hasConstant())
433             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
434         else if (info.payloadGPR() == source)
435             fillAction = Load32Payload;
436         else { // Fill the Tag
437             switch (info.spillFormat()) {
438             case DataFormatInt32:
439                 ASSERT(registerFormat == DataFormatJSInt32);
440                 fillAction = SetInt32Tag;
441                 break;
442             case DataFormatCell:
443                 ASSERT(registerFormat == DataFormatJSCell);
444                 fillAction = SetCellTag;
445                 break;
446             case DataFormatBoolean:
447                 ASSERT(registerFormat == DataFormatJSBoolean);
448                 fillAction = SetBooleanTag;
449                 break;
450             default:
451                 fillAction = Load32Tag;
452                 break;
453             }
454         }
455 #endif
456     }
457         
458     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
459 }
460     
461 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
462 {
463     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
464     Node* node = info.node();
465     ASSERT(info.registerFormat() == DataFormatDouble);
466
467     SilentSpillAction spillAction;
468     SilentFillAction fillAction;
469         
470     if (!info.needsSpill())
471         spillAction = DoNothingForSpill;
472     else {
473         ASSERT(!node->hasConstant());
474         ASSERT(info.spillFormat() == DataFormatNone);
475         ASSERT(info.fpr() == source);
476         spillAction = StoreDouble;
477     }
478         
479 #if USE(JSVALUE64)
480     if (node->hasConstant()) {
481         node->asNumber(); // To get the assertion.
482         fillAction = SetDoubleConstant;
483     } else {
484         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
485         fillAction = LoadDouble;
486     }
487 #elif USE(JSVALUE32_64)
488     ASSERT(info.registerFormat() == DataFormatDouble);
489     if (node->hasConstant()) {
490         node->asNumber(); // To get the assertion.
491         fillAction = SetDoubleConstant;
492     } else
493         fillAction = LoadDouble;
494 #endif
495
496     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
497 }
498     
499 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
500 {
501     switch (plan.spillAction()) {
502     case DoNothingForSpill:
503         break;
504     case Store32Tag:
505         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
506         break;
507     case Store32Payload:
508         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
509         break;
510     case StorePtr:
511         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
512         break;
513 #if USE(JSVALUE64)
514     case Store64:
515         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
516         break;
517 #endif
518     case StoreDouble:
519         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
520         break;
521     default:
522         RELEASE_ASSERT_NOT_REACHED();
523     }
524 }
525     
526 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
527 {
528 #if USE(JSVALUE32_64)
529     UNUSED_PARAM(canTrample);
530 #endif
531     switch (plan.fillAction()) {
532     case DoNothingForFill:
533         break;
534     case SetInt32Constant:
535         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
536         break;
537 #if USE(JSVALUE64)
538     case SetInt52Constant:
539         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
540         break;
541     case SetStrictInt52Constant:
542         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
543         break;
544 #endif // USE(JSVALUE64)
545     case SetBooleanConstant:
546         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
547         break;
548     case SetCellConstant:
549         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
550         break;
551 #if USE(JSVALUE64)
552     case SetTrustedJSConstant:
553         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
554         break;
555     case SetJSConstant:
556         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
557         break;
558     case SetDoubleConstant:
559         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
560         m_jit.move64ToDouble(canTrample, plan.fpr());
561         break;
562     case Load32PayloadBoxInt:
563         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
564         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
565         break;
566     case Load32PayloadConvertToInt52:
567         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
568         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
569         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
570         break;
571     case Load32PayloadSignExtend:
572         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
573         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
574         break;
575 #else
576     case SetJSConstantTag:
577         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
578         break;
579     case SetJSConstantPayload:
580         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
581         break;
582     case SetInt32Tag:
583         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
584         break;
585     case SetCellTag:
586         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
587         break;
588     case SetBooleanTag:
589         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
590         break;
591     case SetDoubleConstant:
592         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
593         break;
594 #endif
595     case Load32Tag:
596         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
597         break;
598     case Load32Payload:
599         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
600         break;
601     case LoadPtr:
602         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
603         break;
604 #if USE(JSVALUE64)
605     case Load64:
606         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
607         break;
608     case Load64ShiftInt52Right:
609         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
610         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
611         break;
612     case Load64ShiftInt52Left:
613         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
614         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
615         break;
616 #endif
617     case LoadDouble:
618         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
619         break;
620     default:
621         RELEASE_ASSERT_NOT_REACHED();
622     }
623 }
624     
625 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
626 {
627     switch (arrayMode.arrayClass()) {
628     case Array::OriginalArray: {
629         CRASH();
630 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
631         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
632         return result;
633 #endif
634     }
635         
636     case Array::Array:
637         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
638         return m_jit.branch32(
639             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
640         
641     case Array::NonArray:
642     case Array::OriginalNonArray:
643         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
644         return m_jit.branch32(
645             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
646         
647     case Array::PossiblyArray:
648         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
649         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
650     }
651     
652     RELEASE_ASSERT_NOT_REACHED();
653     return JITCompiler::Jump();
654 }
655
656 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
657 {
658     JITCompiler::JumpList result;
659     
660     switch (arrayMode.type()) {
661     case Array::Int32:
662         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
663
664     case Array::Double:
665         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
666
667     case Array::Contiguous:
668         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
669
670     case Array::ArrayStorage:
671     case Array::SlowPutArrayStorage: {
672         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
673         
674         if (arrayMode.isJSArray()) {
675             if (arrayMode.isSlowPut()) {
676                 result.append(
677                     m_jit.branchTest32(
678                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
679                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
680                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
681                 result.append(
682                     m_jit.branch32(
683                         MacroAssembler::Above, tempGPR,
684                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
685                 break;
686             }
687             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
688             result.append(
689                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
690             break;
691         }
692         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
693         if (arrayMode.isSlowPut()) {
694             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
695             result.append(
696                 m_jit.branch32(
697                     MacroAssembler::Above, tempGPR,
698                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
699             break;
700         }
701         result.append(
702             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
703         break;
704     }
705     default:
706         CRASH();
707         break;
708     }
709     
710     return result;
711 }
712
713 void SpeculativeJIT::checkArray(Node* node)
714 {
715     ASSERT(node->arrayMode().isSpecific());
716     ASSERT(!node->arrayMode().doesConversion());
717     
718     SpeculateCellOperand base(this, node->child1());
719     GPRReg baseReg = base.gpr();
720     
721     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
722         noResult(m_currentNode);
723         return;
724     }
725     
726     const ClassInfo* expectedClassInfo = 0;
727     
728     switch (node->arrayMode().type()) {
729     case Array::String:
730         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
731         break;
732     case Array::Int32:
733     case Array::Double:
734     case Array::Contiguous:
735     case Array::ArrayStorage:
736     case Array::SlowPutArrayStorage: {
737         GPRTemporary temp(this);
738         GPRReg tempGPR = temp.gpr();
739         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
740         speculationCheck(
741             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
742             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
743         
744         noResult(m_currentNode);
745         return;
746     }
747     case Array::DirectArguments:
748         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
749         noResult(m_currentNode);
750         return;
751     case Array::ScopedArguments:
752         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
753         noResult(m_currentNode);
754         return;
755     default:
756         speculateCellTypeWithoutTypeFiltering(
757             node->child1(), baseReg,
758             typeForTypedArrayType(node->arrayMode().typedArrayType()));
759         noResult(m_currentNode);
760         return;
761     }
762     
763     RELEASE_ASSERT(expectedClassInfo);
764     
765     GPRTemporary temp(this);
766     GPRTemporary temp2(this);
767     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
768     speculationCheck(
769         BadType, JSValueSource::unboxedCell(baseReg), node,
770         m_jit.branchPtr(
771             MacroAssembler::NotEqual,
772             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
773             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
774     
775     noResult(m_currentNode);
776 }
777
778 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
779 {
780     ASSERT(node->arrayMode().doesConversion());
781     
782     GPRTemporary temp(this);
783     GPRTemporary structure;
784     GPRReg tempGPR = temp.gpr();
785     GPRReg structureGPR = InvalidGPRReg;
786     
787     if (node->op() != ArrayifyToStructure) {
788         GPRTemporary realStructure(this);
789         structure.adopt(realStructure);
790         structureGPR = structure.gpr();
791     }
792         
793     // We can skip all that comes next if we already have array storage.
794     MacroAssembler::JumpList slowPath;
795     
796     if (node->op() == ArrayifyToStructure) {
797         slowPath.append(m_jit.branchWeakStructure(
798             JITCompiler::NotEqual,
799             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
800             node->structure()));
801     } else {
802         m_jit.load8(
803             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
804         
805         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
806     }
807     
808     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
809         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
810     
811     noResult(m_currentNode);
812 }
813
814 void SpeculativeJIT::arrayify(Node* node)
815 {
816     ASSERT(node->arrayMode().isSpecific());
817     
818     SpeculateCellOperand base(this, node->child1());
819     
820     if (!node->child2()) {
821         arrayify(node, base.gpr(), InvalidGPRReg);
822         return;
823     }
824     
825     SpeculateInt32Operand property(this, node->child2());
826     
827     arrayify(node, base.gpr(), property.gpr());
828 }
829
830 GPRReg SpeculativeJIT::fillStorage(Edge edge)
831 {
832     VirtualRegister virtualRegister = edge->virtualRegister();
833     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
834     
835     switch (info.registerFormat()) {
836     case DataFormatNone: {
837         if (info.spillFormat() == DataFormatStorage) {
838             GPRReg gpr = allocate();
839             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
840             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
841             info.fillStorage(*m_stream, gpr);
842             return gpr;
843         }
844         
845         // Must be a cell; fill it as a cell and then return the pointer.
846         return fillSpeculateCell(edge);
847     }
848         
849     case DataFormatStorage: {
850         GPRReg gpr = info.gpr();
851         m_gprs.lock(gpr);
852         return gpr;
853     }
854         
855     default:
856         return fillSpeculateCell(edge);
857     }
858 }
859
860 void SpeculativeJIT::useChildren(Node* node)
861 {
862     if (node->flags() & NodeHasVarArgs) {
863         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
864             if (!!m_jit.graph().m_varArgChildren[childIdx])
865                 use(m_jit.graph().m_varArgChildren[childIdx]);
866         }
867     } else {
868         Edge child1 = node->child1();
869         if (!child1) {
870             ASSERT(!node->child2() && !node->child3());
871             return;
872         }
873         use(child1);
874         
875         Edge child2 = node->child2();
876         if (!child2) {
877             ASSERT(!node->child3());
878             return;
879         }
880         use(child2);
881         
882         Edge child3 = node->child3();
883         if (!child3)
884             return;
885         use(child3);
886     }
887 }
888
889 void SpeculativeJIT::compileIn(Node* node)
890 {
891     SpeculateCellOperand base(this, node->child2());
892     GPRReg baseGPR = base.gpr();
893     
894     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
895         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
896             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
897             
898             GPRTemporary result(this);
899             GPRReg resultGPR = result.gpr();
900
901             use(node->child1());
902             
903             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
904             MacroAssembler::Label done = m_jit.label();
905             
906             auto slowPath = slowPathCall(
907                 jump.m_jump, this, operationInOptimize,
908                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
909                 string->tryGetValueImpl());
910             
911             stubInfo->codeOrigin = node->origin.semantic;
912             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
913             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
914             stubInfo->patch.usedRegisters = usedRegisters();
915             stubInfo->patch.spillMode = NeedToSpill;
916
917             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
918             addSlowPathGenerator(WTF::move(slowPath));
919
920             base.use();
921
922             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
923             return;
924         }
925     }
926
927     JSValueOperand key(this, node->child1());
928     JSValueRegs regs = key.jsValueRegs();
929         
930     GPRFlushedCallResult result(this);
931     GPRReg resultGPR = result.gpr();
932         
933     base.use();
934     key.use();
935         
936     flushRegisters();
937     callOperation(
938         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
939         baseGPR, regs);
940     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
941 }
942
943 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
944 {
945     unsigned branchIndexInBlock = detectPeepHoleBranch();
946     if (branchIndexInBlock != UINT_MAX) {
947         Node* branchNode = m_block->at(branchIndexInBlock);
948
949         ASSERT(node->adjustedRefCount() == 1);
950         
951         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
952     
953         m_indexInBlock = branchIndexInBlock;
954         m_currentNode = branchNode;
955         
956         return true;
957     }
958     
959     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
960     
961     return false;
962 }
963
964 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
965 {
966     unsigned branchIndexInBlock = detectPeepHoleBranch();
967     if (branchIndexInBlock != UINT_MAX) {
968         Node* branchNode = m_block->at(branchIndexInBlock);
969
970         ASSERT(node->adjustedRefCount() == 1);
971         
972         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
973     
974         m_indexInBlock = branchIndexInBlock;
975         m_currentNode = branchNode;
976         
977         return true;
978     }
979     
980     nonSpeculativeNonPeepholeStrictEq(node, invert);
981     
982     return false;
983 }
984
985 static const char* dataFormatString(DataFormat format)
986 {
987     // These values correspond to the DataFormat enum.
988     const char* strings[] = {
989         "[  ]",
990         "[ i]",
991         "[ d]",
992         "[ c]",
993         "Err!",
994         "Err!",
995         "Err!",
996         "Err!",
997         "[J ]",
998         "[Ji]",
999         "[Jd]",
1000         "[Jc]",
1001         "Err!",
1002         "Err!",
1003         "Err!",
1004         "Err!",
1005     };
1006     return strings[format];
1007 }
1008
1009 void SpeculativeJIT::dump(const char* label)
1010 {
1011     if (label)
1012         dataLogF("<%s>\n", label);
1013
1014     dataLogF("  gprs:\n");
1015     m_gprs.dump();
1016     dataLogF("  fprs:\n");
1017     m_fprs.dump();
1018     dataLogF("  VirtualRegisters:\n");
1019     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1020         GenerationInfo& info = m_generationInfo[i];
1021         if (info.alive())
1022             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1023         else
1024             dataLogF("    % 3d:[__][__]", i);
1025         if (info.registerFormat() == DataFormatDouble)
1026             dataLogF(":fpr%d\n", info.fpr());
1027         else if (info.registerFormat() != DataFormatNone
1028 #if USE(JSVALUE32_64)
1029             && !(info.registerFormat() & DataFormatJS)
1030 #endif
1031             ) {
1032             ASSERT(info.gpr() != InvalidGPRReg);
1033             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1034         } else
1035             dataLogF("\n");
1036     }
1037     if (label)
1038         dataLogF("</%s>\n", label);
1039 }
1040
1041 GPRTemporary::GPRTemporary()
1042     : m_jit(0)
1043     , m_gpr(InvalidGPRReg)
1044 {
1045 }
1046
1047 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1048     : m_jit(jit)
1049     , m_gpr(InvalidGPRReg)
1050 {
1051     m_gpr = m_jit->allocate();
1052 }
1053
1054 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1055     : m_jit(jit)
1056     , m_gpr(InvalidGPRReg)
1057 {
1058     m_gpr = m_jit->allocate(specific);
1059 }
1060
1061 #if USE(JSVALUE32_64)
1062 GPRTemporary::GPRTemporary(
1063     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1064     : m_jit(jit)
1065     , m_gpr(InvalidGPRReg)
1066 {
1067     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1068         m_gpr = m_jit->reuse(op1.gpr(which));
1069     else
1070         m_gpr = m_jit->allocate();
1071 }
1072 #endif // USE(JSVALUE32_64)
1073
1074 JSValueRegsTemporary::JSValueRegsTemporary() { }
1075
1076 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1077 #if USE(JSVALUE64)
1078     : m_gpr(jit)
1079 #else
1080     : m_payloadGPR(jit)
1081     , m_tagGPR(jit)
1082 #endif
1083 {
1084 }
1085
1086 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1087
1088 JSValueRegs JSValueRegsTemporary::regs()
1089 {
1090 #if USE(JSVALUE64)
1091     return JSValueRegs(m_gpr.gpr());
1092 #else
1093     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1094 #endif
1095 }
1096
1097 void GPRTemporary::adopt(GPRTemporary& other)
1098 {
1099     ASSERT(!m_jit);
1100     ASSERT(m_gpr == InvalidGPRReg);
1101     ASSERT(other.m_jit);
1102     ASSERT(other.m_gpr != InvalidGPRReg);
1103     m_jit = other.m_jit;
1104     m_gpr = other.m_gpr;
1105     other.m_jit = 0;
1106     other.m_gpr = InvalidGPRReg;
1107 }
1108
1109 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1110     : m_jit(jit)
1111     , m_fpr(InvalidFPRReg)
1112 {
1113     m_fpr = m_jit->fprAllocate();
1114 }
1115
1116 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1117     : m_jit(jit)
1118     , m_fpr(InvalidFPRReg)
1119 {
1120     if (m_jit->canReuse(op1.node()))
1121         m_fpr = m_jit->reuse(op1.fpr());
1122     else
1123         m_fpr = m_jit->fprAllocate();
1124 }
1125
1126 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1127     : m_jit(jit)
1128     , m_fpr(InvalidFPRReg)
1129 {
1130     if (m_jit->canReuse(op1.node()))
1131         m_fpr = m_jit->reuse(op1.fpr());
1132     else if (m_jit->canReuse(op2.node()))
1133         m_fpr = m_jit->reuse(op2.fpr());
1134     else
1135         m_fpr = m_jit->fprAllocate();
1136 }
1137
1138 #if USE(JSVALUE32_64)
1139 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1140     : m_jit(jit)
1141     , m_fpr(InvalidFPRReg)
1142 {
1143     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1144         m_fpr = m_jit->reuse(op1.fpr());
1145     else
1146         m_fpr = m_jit->fprAllocate();
1147 }
1148 #endif
1149
1150 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1151 {
1152     BasicBlock* taken = branchNode->branchData()->taken.block;
1153     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1154     
1155     SpeculateDoubleOperand op1(this, node->child1());
1156     SpeculateDoubleOperand op2(this, node->child2());
1157     
1158     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1159     jump(notTaken);
1160 }
1161
1162 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1163 {
1164     BasicBlock* taken = branchNode->branchData()->taken.block;
1165     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1166
1167     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1168     
1169     if (taken == nextBlock()) {
1170         condition = MacroAssembler::NotEqual;
1171         BasicBlock* tmp = taken;
1172         taken = notTaken;
1173         notTaken = tmp;
1174     }
1175
1176     SpeculateCellOperand op1(this, node->child1());
1177     SpeculateCellOperand op2(this, node->child2());
1178     
1179     GPRReg op1GPR = op1.gpr();
1180     GPRReg op2GPR = op2.gpr();
1181     
1182     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1183         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1184             speculationCheck(
1185                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1186         }
1187         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1188             speculationCheck(
1189                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1190         }
1191     } else {
1192         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1193             speculationCheck(
1194                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1195                 m_jit.branchIfNotObject(op1GPR));
1196         }
1197         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1198             m_jit.branchTest8(
1199                 MacroAssembler::NonZero, 
1200                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1201                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1202
1203         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1204             speculationCheck(
1205                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1206                 m_jit.branchIfNotObject(op2GPR));
1207         }
1208         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1209             m_jit.branchTest8(
1210                 MacroAssembler::NonZero, 
1211                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1212                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1213     }
1214
1215     branchPtr(condition, op1GPR, op2GPR, taken);
1216     jump(notTaken);
1217 }
1218
1219 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1220 {
1221     BasicBlock* taken = branchNode->branchData()->taken.block;
1222     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1223
1224     // The branch instruction will branch to the taken block.
1225     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1226     if (taken == nextBlock()) {
1227         condition = JITCompiler::invert(condition);
1228         BasicBlock* tmp = taken;
1229         taken = notTaken;
1230         notTaken = tmp;
1231     }
1232
1233     if (node->child1()->isBooleanConstant()) {
1234         bool imm = node->child1()->asBoolean();
1235         SpeculateBooleanOperand op2(this, node->child2());
1236         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1237     } else if (node->child2()->isBooleanConstant()) {
1238         SpeculateBooleanOperand op1(this, node->child1());
1239         bool imm = node->child2()->asBoolean();
1240         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1241     } else {
1242         SpeculateBooleanOperand op1(this, node->child1());
1243         SpeculateBooleanOperand op2(this, node->child2());
1244         branch32(condition, op1.gpr(), op2.gpr(), taken);
1245     }
1246
1247     jump(notTaken);
1248 }
1249
1250 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1251 {
1252     BasicBlock* taken = branchNode->branchData()->taken.block;
1253     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1254
1255     // The branch instruction will branch to the taken block.
1256     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1257     if (taken == nextBlock()) {
1258         condition = JITCompiler::invert(condition);
1259         BasicBlock* tmp = taken;
1260         taken = notTaken;
1261         notTaken = tmp;
1262     }
1263
1264     if (node->child1()->isInt32Constant()) {
1265         int32_t imm = node->child1()->asInt32();
1266         SpeculateInt32Operand op2(this, node->child2());
1267         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1268     } else if (node->child2()->isInt32Constant()) {
1269         SpeculateInt32Operand op1(this, node->child1());
1270         int32_t imm = node->child2()->asInt32();
1271         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1272     } else {
1273         SpeculateInt32Operand op1(this, node->child1());
1274         SpeculateInt32Operand op2(this, node->child2());
1275         branch32(condition, op1.gpr(), op2.gpr(), taken);
1276     }
1277
1278     jump(notTaken);
1279 }
1280
1281 // Returns true if the compare is fused with a subsequent branch.
1282 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1283 {
1284     // Fused compare & branch.
1285     unsigned branchIndexInBlock = detectPeepHoleBranch();
1286     if (branchIndexInBlock != UINT_MAX) {
1287         Node* branchNode = m_block->at(branchIndexInBlock);
1288
1289         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1290         // so can be no intervening nodes to also reference the compare. 
1291         ASSERT(node->adjustedRefCount() == 1);
1292
1293         if (node->isBinaryUseKind(Int32Use))
1294             compilePeepHoleInt32Branch(node, branchNode, condition);
1295 #if USE(JSVALUE64)
1296         else if (node->isBinaryUseKind(Int52RepUse))
1297             compilePeepHoleInt52Branch(node, branchNode, condition);
1298 #endif // USE(JSVALUE64)
1299         else if (node->isBinaryUseKind(DoubleRepUse))
1300             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1301         else if (node->op() == CompareEq) {
1302             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1303                 // Use non-peephole comparison, for now.
1304                 return false;
1305             }
1306             if (node->isBinaryUseKind(BooleanUse))
1307                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1308             else if (node->isBinaryUseKind(ObjectUse))
1309                 compilePeepHoleObjectEquality(node, branchNode);
1310             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1311                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1312             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1313                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1314             else {
1315                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1316                 return true;
1317             }
1318         } else {
1319             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1320             return true;
1321         }
1322
1323         use(node->child1());
1324         use(node->child2());
1325         m_indexInBlock = branchIndexInBlock;
1326         m_currentNode = branchNode;
1327         return true;
1328     }
1329     return false;
1330 }
1331
1332 void SpeculativeJIT::noticeOSRBirth(Node* node)
1333 {
1334     if (!node->hasVirtualRegister())
1335         return;
1336     
1337     VirtualRegister virtualRegister = node->virtualRegister();
1338     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1339     
1340     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1341 }
1342
1343 void SpeculativeJIT::compileMovHint(Node* node)
1344 {
1345     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1346     
1347     Node* child = node->child1().node();
1348     noticeOSRBirth(child);
1349     
1350     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1351 }
1352
1353 void SpeculativeJIT::bail(AbortReason reason)
1354 {
1355     if (verboseCompilationEnabled())
1356         dataLog("Bailing compilation.\n");
1357     m_compileOkay = true;
1358     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1359     clearGenerationInfo();
1360 }
1361
1362 void SpeculativeJIT::compileCurrentBlock()
1363 {
1364     ASSERT(m_compileOkay);
1365     
1366     if (!m_block)
1367         return;
1368     
1369     ASSERT(m_block->isReachable);
1370     
1371     m_jit.blockHeads()[m_block->index] = m_jit.label();
1372
1373     if (!m_block->intersectionOfCFAHasVisited) {
1374         // Don't generate code for basic blocks that are unreachable according to CFA.
1375         // But to be sure that nobody has generated a jump to this block, drop in a
1376         // breakpoint here.
1377         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1378         return;
1379     }
1380
1381     m_stream->appendAndLog(VariableEvent::reset());
1382     
1383     m_jit.jitAssertHasValidCallFrame();
1384     m_jit.jitAssertTagsInPlace();
1385     m_jit.jitAssertArgumentCountSane();
1386
1387     m_state.reset();
1388     m_state.beginBasicBlock(m_block);
1389     
1390     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1391         int operand = m_block->variablesAtHead.operandForIndex(i);
1392         Node* node = m_block->variablesAtHead[i];
1393         if (!node)
1394             continue; // No need to record dead SetLocal's.
1395         
1396         VariableAccessData* variable = node->variableAccessData();
1397         DataFormat format;
1398         if (!node->refCount())
1399             continue; // No need to record dead SetLocal's.
1400         format = dataFormatFor(variable->flushFormat());
1401         m_stream->appendAndLog(
1402             VariableEvent::setLocal(
1403                 VirtualRegister(operand),
1404                 variable->machineLocal(),
1405                 format));
1406     }
1407     
1408     m_codeOriginForExitTarget = CodeOrigin();
1409     m_codeOriginForExitProfile = CodeOrigin();
1410     
1411     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1412         m_currentNode = m_block->at(m_indexInBlock);
1413         
1414         // We may have hit a contradiction that the CFA was aware of but that the JIT
1415         // didn't cause directly.
1416         if (!m_state.isValid()) {
1417             bail(DFGBailedAtTopOfBlock);
1418             return;
1419         }
1420
1421         if (ASSERT_DISABLED)
1422             m_canExit = true; // Essentially disable the assertions.
1423         else
1424             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1425         
1426         m_interpreter.startExecuting();
1427         m_jit.setForNode(m_currentNode);
1428         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1429         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1430         m_lastGeneratedNode = m_currentNode->op();
1431         
1432         ASSERT(m_currentNode->shouldGenerate());
1433         
1434         if (verboseCompilationEnabled()) {
1435             dataLogF(
1436                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1437                 (int)m_currentNode->index(),
1438                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1439             dataLog("\n");
1440         }
1441         
1442         compile(m_currentNode);
1443         
1444         if (belongsInMinifiedGraph(m_currentNode->op()))
1445             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1446         
1447 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1448         m_jit.clearRegisterAllocationOffsets();
1449 #endif
1450         
1451         if (!m_compileOkay) {
1452             bail(DFGBailedAtEndOfNode);
1453             return;
1454         }
1455         
1456         // Make sure that the abstract state is rematerialized for the next node.
1457         m_interpreter.executeEffects(m_indexInBlock);
1458     }
1459     
1460     // Perform the most basic verification that children have been used correctly.
1461     if (!ASSERT_DISABLED) {
1462         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1463             GenerationInfo& info = m_generationInfo[index];
1464             RELEASE_ASSERT(!info.alive());
1465         }
1466     }
1467 }
1468
1469 // If we are making type predictions about our arguments then
1470 // we need to check that they are correct on function entry.
1471 void SpeculativeJIT::checkArgumentTypes()
1472 {
1473     ASSERT(!m_currentNode);
1474     m_isCheckingArgumentTypes = true;
1475     m_codeOriginForExitTarget = CodeOrigin(0);
1476     m_codeOriginForExitProfile = CodeOrigin(0);
1477
1478     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1479         Node* node = m_jit.graph().m_arguments[i];
1480         if (!node) {
1481             // The argument is dead. We don't do any checks for such arguments.
1482             continue;
1483         }
1484         
1485         ASSERT(node->op() == SetArgument);
1486         ASSERT(node->shouldGenerate());
1487
1488         VariableAccessData* variableAccessData = node->variableAccessData();
1489         FlushFormat format = variableAccessData->flushFormat();
1490         
1491         if (format == FlushedJSValue)
1492             continue;
1493         
1494         VirtualRegister virtualRegister = variableAccessData->local();
1495
1496         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1497         
1498 #if USE(JSVALUE64)
1499         switch (format) {
1500         case FlushedInt32: {
1501             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1502             break;
1503         }
1504         case FlushedBoolean: {
1505             GPRTemporary temp(this);
1506             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1507             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1508             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1509             break;
1510         }
1511         case FlushedCell: {
1512             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1513             break;
1514         }
1515         default:
1516             RELEASE_ASSERT_NOT_REACHED();
1517             break;
1518         }
1519 #else
1520         switch (format) {
1521         case FlushedInt32: {
1522             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1523             break;
1524         }
1525         case FlushedBoolean: {
1526             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1527             break;
1528         }
1529         case FlushedCell: {
1530             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1531             break;
1532         }
1533         default:
1534             RELEASE_ASSERT_NOT_REACHED();
1535             break;
1536         }
1537 #endif
1538     }
1539     m_isCheckingArgumentTypes = false;
1540 }
1541
1542 bool SpeculativeJIT::compile()
1543 {
1544     checkArgumentTypes();
1545     
1546     ASSERT(!m_currentNode);
1547     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1548         m_jit.setForBlockIndex(blockIndex);
1549         m_block = m_jit.graph().block(blockIndex);
1550         compileCurrentBlock();
1551     }
1552     linkBranches();
1553     return true;
1554 }
1555
1556 void SpeculativeJIT::createOSREntries()
1557 {
1558     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1559         BasicBlock* block = m_jit.graph().block(blockIndex);
1560         if (!block)
1561             continue;
1562         if (!block->isOSRTarget)
1563             continue;
1564         
1565         // Currently we don't have OSR entry trampolines. We could add them
1566         // here if need be.
1567         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1568     }
1569 }
1570
1571 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1572 {
1573     unsigned osrEntryIndex = 0;
1574     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1575         BasicBlock* block = m_jit.graph().block(blockIndex);
1576         if (!block)
1577             continue;
1578         if (!block->isOSRTarget)
1579             continue;
1580         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1581     }
1582     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1583 }
1584
1585 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1586 {
1587     Edge child3 = m_jit.graph().varArgChild(node, 2);
1588     Edge child4 = m_jit.graph().varArgChild(node, 3);
1589
1590     ArrayMode arrayMode = node->arrayMode();
1591     
1592     GPRReg baseReg = base.gpr();
1593     GPRReg propertyReg = property.gpr();
1594     
1595     SpeculateDoubleOperand value(this, child3);
1596
1597     FPRReg valueReg = value.fpr();
1598     
1599     DFG_TYPE_CHECK(
1600         JSValueRegs(), child3, SpecFullRealNumber,
1601         m_jit.branchDouble(
1602             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1603     
1604     if (!m_compileOkay)
1605         return;
1606     
1607     StorageOperand storage(this, child4);
1608     GPRReg storageReg = storage.gpr();
1609
1610     if (node->op() == PutByValAlias) {
1611         // Store the value to the array.
1612         GPRReg propertyReg = property.gpr();
1613         FPRReg valueReg = value.fpr();
1614         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1615         
1616         noResult(m_currentNode);
1617         return;
1618     }
1619     
1620     GPRTemporary temporary;
1621     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1622
1623     MacroAssembler::Jump slowCase;
1624     
1625     if (arrayMode.isInBounds()) {
1626         speculationCheck(
1627             OutOfBounds, JSValueRegs(), 0,
1628             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1629     } else {
1630         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1631         
1632         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1633         
1634         if (!arrayMode.isOutOfBounds())
1635             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1636         
1637         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1638         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1639         
1640         inBounds.link(&m_jit);
1641     }
1642     
1643     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1644
1645     base.use();
1646     property.use();
1647     value.use();
1648     storage.use();
1649     
1650     if (arrayMode.isOutOfBounds()) {
1651         addSlowPathGenerator(
1652             slowPathCall(
1653                 slowCase, this,
1654                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1655                 NoResult, baseReg, propertyReg, valueReg));
1656     }
1657
1658     noResult(m_currentNode, UseChildrenCalledExplicitly);
1659 }
1660
1661 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1662 {
1663     SpeculateCellOperand string(this, node->child1());
1664     SpeculateStrictInt32Operand index(this, node->child2());
1665     StorageOperand storage(this, node->child3());
1666
1667     GPRReg stringReg = string.gpr();
1668     GPRReg indexReg = index.gpr();
1669     GPRReg storageReg = storage.gpr();
1670     
1671     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1672
1673     // unsigned comparison so we can filter out negative indices and indices that are too large
1674     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1675
1676     GPRTemporary scratch(this);
1677     GPRReg scratchReg = scratch.gpr();
1678
1679     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1680
1681     // Load the character into scratchReg
1682     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1683
1684     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1685     JITCompiler::Jump cont8Bit = m_jit.jump();
1686
1687     is16Bit.link(&m_jit);
1688
1689     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1690
1691     cont8Bit.link(&m_jit);
1692
1693     int32Result(scratchReg, m_currentNode);
1694 }
1695
1696 void SpeculativeJIT::compileGetByValOnString(Node* node)
1697 {
1698     SpeculateCellOperand base(this, node->child1());
1699     SpeculateStrictInt32Operand property(this, node->child2());
1700     StorageOperand storage(this, node->child3());
1701     GPRReg baseReg = base.gpr();
1702     GPRReg propertyReg = property.gpr();
1703     GPRReg storageReg = storage.gpr();
1704
1705     GPRTemporary scratch(this);
1706     GPRReg scratchReg = scratch.gpr();
1707 #if USE(JSVALUE32_64)
1708     GPRTemporary resultTag;
1709     GPRReg resultTagReg = InvalidGPRReg;
1710     if (node->arrayMode().isOutOfBounds()) {
1711         GPRTemporary realResultTag(this);
1712         resultTag.adopt(realResultTag);
1713         resultTagReg = resultTag.gpr();
1714     }
1715 #endif
1716
1717     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1718
1719     // unsigned comparison so we can filter out negative indices and indices that are too large
1720     JITCompiler::Jump outOfBounds = m_jit.branch32(
1721         MacroAssembler::AboveOrEqual, propertyReg,
1722         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1723     if (node->arrayMode().isInBounds())
1724         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1725
1726     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1727
1728     // Load the character into scratchReg
1729     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1730
1731     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1732     JITCompiler::Jump cont8Bit = m_jit.jump();
1733
1734     is16Bit.link(&m_jit);
1735
1736     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1737
1738     JITCompiler::Jump bigCharacter =
1739         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1740
1741     // 8 bit string values don't need the isASCII check.
1742     cont8Bit.link(&m_jit);
1743
1744     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1745     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1746     m_jit.loadPtr(scratchReg, scratchReg);
1747
1748     addSlowPathGenerator(
1749         slowPathCall(
1750             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1751
1752     if (node->arrayMode().isOutOfBounds()) {
1753 #if USE(JSVALUE32_64)
1754         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1755 #endif
1756
1757         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1758         if (globalObject->stringPrototypeChainIsSane()) {
1759             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1760             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1761             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1762             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1763             // indexed properties either.
1764             // https://bugs.webkit.org/show_bug.cgi?id=144668
1765             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1766             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1767             
1768 #if USE(JSVALUE64)
1769             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1770                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1771 #else
1772             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1773                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1774                 baseReg, propertyReg));
1775 #endif
1776         } else {
1777 #if USE(JSVALUE64)
1778             addSlowPathGenerator(
1779                 slowPathCall(
1780                     outOfBounds, this, operationGetByValStringInt,
1781                     scratchReg, baseReg, propertyReg));
1782 #else
1783             addSlowPathGenerator(
1784                 slowPathCall(
1785                     outOfBounds, this, operationGetByValStringInt,
1786                     resultTagReg, scratchReg, baseReg, propertyReg));
1787 #endif
1788         }
1789         
1790 #if USE(JSVALUE64)
1791         jsValueResult(scratchReg, m_currentNode);
1792 #else
1793         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1794 #endif
1795     } else
1796         cellResult(scratchReg, m_currentNode);
1797 }
1798
1799 void SpeculativeJIT::compileFromCharCode(Node* node)
1800 {
1801     SpeculateStrictInt32Operand property(this, node->child1());
1802     GPRReg propertyReg = property.gpr();
1803     GPRTemporary smallStrings(this);
1804     GPRTemporary scratch(this);
1805     GPRReg scratchReg = scratch.gpr();
1806     GPRReg smallStringsReg = smallStrings.gpr();
1807
1808     JITCompiler::JumpList slowCases;
1809     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1810     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1811     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1812
1813     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1814     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1815     cellResult(scratchReg, m_currentNode);
1816 }
1817
1818 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1819 {
1820     VirtualRegister virtualRegister = node->virtualRegister();
1821     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1822
1823     switch (info.registerFormat()) {
1824     case DataFormatStorage:
1825         RELEASE_ASSERT_NOT_REACHED();
1826
1827     case DataFormatBoolean:
1828     case DataFormatCell:
1829         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1830         return GeneratedOperandTypeUnknown;
1831
1832     case DataFormatNone:
1833     case DataFormatJSCell:
1834     case DataFormatJS:
1835     case DataFormatJSBoolean:
1836     case DataFormatJSDouble:
1837         return GeneratedOperandJSValue;
1838
1839     case DataFormatJSInt32:
1840     case DataFormatInt32:
1841         return GeneratedOperandInteger;
1842
1843     default:
1844         RELEASE_ASSERT_NOT_REACHED();
1845         return GeneratedOperandTypeUnknown;
1846     }
1847 }
1848
1849 void SpeculativeJIT::compileValueToInt32(Node* node)
1850 {
1851     switch (node->child1().useKind()) {
1852 #if USE(JSVALUE64)
1853     case Int52RepUse: {
1854         SpeculateStrictInt52Operand op1(this, node->child1());
1855         GPRTemporary result(this, Reuse, op1);
1856         GPRReg op1GPR = op1.gpr();
1857         GPRReg resultGPR = result.gpr();
1858         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1859         int32Result(resultGPR, node, DataFormatInt32);
1860         return;
1861     }
1862 #endif // USE(JSVALUE64)
1863         
1864     case DoubleRepUse: {
1865         GPRTemporary result(this);
1866         SpeculateDoubleOperand op1(this, node->child1());
1867         FPRReg fpr = op1.fpr();
1868         GPRReg gpr = result.gpr();
1869         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1870         
1871         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1872         
1873         int32Result(gpr, node);
1874         return;
1875     }
1876     
1877     case NumberUse:
1878     case NotCellUse: {
1879         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1880         case GeneratedOperandInteger: {
1881             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1882             GPRTemporary result(this, Reuse, op1);
1883             m_jit.move(op1.gpr(), result.gpr());
1884             int32Result(result.gpr(), node, op1.format());
1885             return;
1886         }
1887         case GeneratedOperandJSValue: {
1888             GPRTemporary result(this);
1889 #if USE(JSVALUE64)
1890             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1891
1892             GPRReg gpr = op1.gpr();
1893             GPRReg resultGpr = result.gpr();
1894             FPRTemporary tempFpr(this);
1895             FPRReg fpr = tempFpr.fpr();
1896
1897             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1898             JITCompiler::JumpList converted;
1899
1900             if (node->child1().useKind() == NumberUse) {
1901                 DFG_TYPE_CHECK(
1902                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1903                     m_jit.branchTest64(
1904                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1905             } else {
1906                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1907                 
1908                 DFG_TYPE_CHECK(
1909                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1910                 
1911                 // It's not a cell: so true turns into 1 and all else turns into 0.
1912                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1913                 converted.append(m_jit.jump());
1914                 
1915                 isNumber.link(&m_jit);
1916             }
1917
1918             // First, if we get here we have a double encoded as a JSValue
1919             m_jit.move(gpr, resultGpr);
1920             unboxDouble(resultGpr, fpr);
1921
1922             silentSpillAllRegisters(resultGpr);
1923             callOperation(toInt32, resultGpr, fpr);
1924             silentFillAllRegisters(resultGpr);
1925
1926             converted.append(m_jit.jump());
1927
1928             isInteger.link(&m_jit);
1929             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1930
1931             converted.link(&m_jit);
1932 #else
1933             Node* childNode = node->child1().node();
1934             VirtualRegister virtualRegister = childNode->virtualRegister();
1935             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1936
1937             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1938
1939             GPRReg payloadGPR = op1.payloadGPR();
1940             GPRReg resultGpr = result.gpr();
1941         
1942             JITCompiler::JumpList converted;
1943
1944             if (info.registerFormat() == DataFormatJSInt32)
1945                 m_jit.move(payloadGPR, resultGpr);
1946             else {
1947                 GPRReg tagGPR = op1.tagGPR();
1948                 FPRTemporary tempFpr(this);
1949                 FPRReg fpr = tempFpr.fpr();
1950                 FPRTemporary scratch(this);
1951
1952                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1953
1954                 if (node->child1().useKind() == NumberUse) {
1955                     DFG_TYPE_CHECK(
1956                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1957                         m_jit.branch32(
1958                             MacroAssembler::AboveOrEqual, tagGPR,
1959                             TrustedImm32(JSValue::LowestTag)));
1960                 } else {
1961                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1962                     
1963                     DFG_TYPE_CHECK(
1964                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1965                         m_jit.branchIfCell(op1.jsValueRegs()));
1966                     
1967                     // It's not a cell: so true turns into 1 and all else turns into 0.
1968                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1969                     m_jit.move(TrustedImm32(0), resultGpr);
1970                     converted.append(m_jit.jump());
1971                     
1972                     isBoolean.link(&m_jit);
1973                     m_jit.move(payloadGPR, resultGpr);
1974                     converted.append(m_jit.jump());
1975                     
1976                     isNumber.link(&m_jit);
1977                 }
1978
1979                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1980
1981                 silentSpillAllRegisters(resultGpr);
1982                 callOperation(toInt32, resultGpr, fpr);
1983                 silentFillAllRegisters(resultGpr);
1984
1985                 converted.append(m_jit.jump());
1986
1987                 isInteger.link(&m_jit);
1988                 m_jit.move(payloadGPR, resultGpr);
1989
1990                 converted.link(&m_jit);
1991             }
1992 #endif
1993             int32Result(resultGpr, node);
1994             return;
1995         }
1996         case GeneratedOperandTypeUnknown:
1997             RELEASE_ASSERT(!m_compileOkay);
1998             return;
1999         }
2000         RELEASE_ASSERT_NOT_REACHED();
2001         return;
2002     }
2003     
2004     default:
2005         ASSERT(!m_compileOkay);
2006         return;
2007     }
2008 }
2009
2010 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2011 {
2012     if (doesOverflow(node->arithMode())) {
2013         // We know that this sometimes produces doubles. So produce a double every
2014         // time. This at least allows subsequent code to not have weird conditionals.
2015             
2016         SpeculateInt32Operand op1(this, node->child1());
2017         FPRTemporary result(this);
2018             
2019         GPRReg inputGPR = op1.gpr();
2020         FPRReg outputFPR = result.fpr();
2021             
2022         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2023             
2024         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2025         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2026         positive.link(&m_jit);
2027             
2028         doubleResult(outputFPR, node);
2029         return;
2030     }
2031     
2032     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2033
2034     SpeculateInt32Operand op1(this, node->child1());
2035     GPRTemporary result(this);
2036
2037     m_jit.move(op1.gpr(), result.gpr());
2038
2039     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2040
2041     int32Result(result.gpr(), node, op1.format());
2042 }
2043
2044 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2045 {
2046     SpeculateDoubleOperand op1(this, node->child1());
2047     FPRTemporary scratch(this);
2048     GPRTemporary result(this);
2049     
2050     FPRReg valueFPR = op1.fpr();
2051     FPRReg scratchFPR = scratch.fpr();
2052     GPRReg resultGPR = result.gpr();
2053
2054     JITCompiler::JumpList failureCases;
2055     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2056     m_jit.branchConvertDoubleToInt32(
2057         valueFPR, resultGPR, failureCases, scratchFPR,
2058         shouldCheckNegativeZero(node->arithMode()));
2059     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2060
2061     int32Result(resultGPR, node);
2062 }
2063
2064 void SpeculativeJIT::compileDoubleRep(Node* node)
2065 {
2066     switch (node->child1().useKind()) {
2067     case NumberUse: {
2068         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2069     
2070         if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2071             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2072             FPRTemporary result(this);
2073             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2074             doubleResult(result.fpr(), node);
2075             return;
2076         }
2077     
2078         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2079         FPRTemporary result(this);
2080     
2081 #if USE(JSVALUE64)
2082         GPRTemporary temp(this);
2083
2084         GPRReg op1GPR = op1.gpr();
2085         GPRReg tempGPR = temp.gpr();
2086         FPRReg resultFPR = result.fpr();
2087     
2088         JITCompiler::Jump isInteger = m_jit.branch64(
2089             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2090     
2091         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2092             typeCheck(
2093                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2094                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2095         }
2096     
2097         m_jit.move(op1GPR, tempGPR);
2098         unboxDouble(tempGPR, resultFPR);
2099         JITCompiler::Jump done = m_jit.jump();
2100     
2101         isInteger.link(&m_jit);
2102         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2103         done.link(&m_jit);
2104 #else // USE(JSVALUE64) -> this is the 32_64 case
2105         FPRTemporary temp(this);
2106     
2107         GPRReg op1TagGPR = op1.tagGPR();
2108         GPRReg op1PayloadGPR = op1.payloadGPR();
2109         FPRReg tempFPR = temp.fpr();
2110         FPRReg resultFPR = result.fpr();
2111     
2112         JITCompiler::Jump isInteger = m_jit.branch32(
2113             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2114     
2115         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2116             typeCheck(
2117                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2118                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2119         }
2120     
2121         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2122         JITCompiler::Jump done = m_jit.jump();
2123     
2124         isInteger.link(&m_jit);
2125         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2126         done.link(&m_jit);
2127 #endif // USE(JSVALUE64)
2128     
2129         doubleResult(resultFPR, node);
2130         return;
2131     }
2132         
2133 #if USE(JSVALUE64)
2134     case Int52RepUse: {
2135         SpeculateStrictInt52Operand value(this, node->child1());
2136         FPRTemporary result(this);
2137         
2138         GPRReg valueGPR = value.gpr();
2139         FPRReg resultFPR = result.fpr();
2140
2141         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2142         
2143         doubleResult(resultFPR, node);
2144         return;
2145     }
2146 #endif // USE(JSVALUE64)
2147         
2148     default:
2149         RELEASE_ASSERT_NOT_REACHED();
2150         return;
2151     }
2152 }
2153
2154 void SpeculativeJIT::compileValueRep(Node* node)
2155 {
2156     switch (node->child1().useKind()) {
2157     case DoubleRepUse: {
2158         SpeculateDoubleOperand value(this, node->child1());
2159         JSValueRegsTemporary result(this);
2160         
2161         FPRReg valueFPR = value.fpr();
2162         JSValueRegs resultRegs = result.regs();
2163         
2164         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2165         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2166         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2167         // local was purified.
2168         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2169             m_jit.purifyNaN(valueFPR);
2170
2171         boxDouble(valueFPR, resultRegs);
2172         
2173         jsValueResult(resultRegs, node);
2174         return;
2175     }
2176         
2177 #if USE(JSVALUE64)
2178     case Int52RepUse: {
2179         SpeculateStrictInt52Operand value(this, node->child1());
2180         GPRTemporary result(this);
2181         
2182         GPRReg valueGPR = value.gpr();
2183         GPRReg resultGPR = result.gpr();
2184         
2185         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2186         
2187         jsValueResult(resultGPR, node);
2188         return;
2189     }
2190 #endif // USE(JSVALUE64)
2191         
2192     default:
2193         RELEASE_ASSERT_NOT_REACHED();
2194         return;
2195     }
2196 }
2197
2198 static double clampDoubleToByte(double d)
2199 {
2200     d += 0.5;
2201     if (!(d > 0))
2202         d = 0;
2203     else if (d > 255)
2204         d = 255;
2205     return d;
2206 }
2207
2208 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2209 {
2210     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2211     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2212     jit.xorPtr(result, result);
2213     MacroAssembler::Jump clamped = jit.jump();
2214     tooBig.link(&jit);
2215     jit.move(JITCompiler::TrustedImm32(255), result);
2216     clamped.link(&jit);
2217     inBounds.link(&jit);
2218 }
2219
2220 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2221 {
2222     // Unordered compare so we pick up NaN
2223     static const double zero = 0;
2224     static const double byteMax = 255;
2225     static const double half = 0.5;
2226     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2227     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2228     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2229     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2230     
2231     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2232     // FIXME: This should probably just use a floating point round!
2233     // https://bugs.webkit.org/show_bug.cgi?id=72054
2234     jit.addDouble(source, scratch);
2235     jit.truncateDoubleToInt32(scratch, result);   
2236     MacroAssembler::Jump truncatedInt = jit.jump();
2237     
2238     tooSmall.link(&jit);
2239     jit.xorPtr(result, result);
2240     MacroAssembler::Jump zeroed = jit.jump();
2241     
2242     tooBig.link(&jit);
2243     jit.move(JITCompiler::TrustedImm32(255), result);
2244     
2245     truncatedInt.link(&jit);
2246     zeroed.link(&jit);
2247
2248 }
2249
2250 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2251 {
2252     if (node->op() == PutByValAlias)
2253         return JITCompiler::Jump();
2254     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2255         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2256     if (view) {
2257         uint32_t length = view->length();
2258         Node* indexNode = m_jit.graph().child(node, 1).node();
2259         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2260             return JITCompiler::Jump();
2261         return m_jit.branch32(
2262             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2263     }
2264     return m_jit.branch32(
2265         MacroAssembler::AboveOrEqual, indexGPR,
2266         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2267 }
2268
2269 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2270 {
2271     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2272     if (!jump.isSet())
2273         return;
2274     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2275 }
2276
2277 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2278 {
2279     ASSERT(isInt(type));
2280     
2281     SpeculateCellOperand base(this, node->child1());
2282     SpeculateStrictInt32Operand property(this, node->child2());
2283     StorageOperand storage(this, node->child3());
2284
2285     GPRReg baseReg = base.gpr();
2286     GPRReg propertyReg = property.gpr();
2287     GPRReg storageReg = storage.gpr();
2288
2289     GPRTemporary result(this);
2290     GPRReg resultReg = result.gpr();
2291
2292     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2293
2294     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2295     switch (elementSize(type)) {
2296     case 1:
2297         if (isSigned(type))
2298             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2299         else
2300             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2301         break;
2302     case 2:
2303         if (isSigned(type))
2304             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2305         else
2306             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2307         break;
2308     case 4:
2309         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2310         break;
2311     default:
2312         CRASH();
2313     }
2314     if (elementSize(type) < 4 || isSigned(type)) {
2315         int32Result(resultReg, node);
2316         return;
2317     }
2318     
2319     ASSERT(elementSize(type) == 4 && !isSigned(type));
2320     if (node->shouldSpeculateInt32()) {
2321         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2322         int32Result(resultReg, node);
2323         return;
2324     }
2325     
2326 #if USE(JSVALUE64)
2327     if (node->shouldSpeculateMachineInt()) {
2328         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2329         strictInt52Result(resultReg, node);
2330         return;
2331     }
2332 #endif
2333     
2334     FPRTemporary fresult(this);
2335     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2336     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2337     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2338     positive.link(&m_jit);
2339     doubleResult(fresult.fpr(), node);
2340 }
2341
2342 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2343 {
2344     ASSERT(isInt(type));
2345     
2346     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2347     GPRReg storageReg = storage.gpr();
2348     
2349     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2350     
2351     GPRTemporary value;
2352     GPRReg valueGPR = InvalidGPRReg;
2353     
2354     if (valueUse->isConstant()) {
2355         JSValue jsValue = valueUse->asJSValue();
2356         if (!jsValue.isNumber()) {
2357             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2358             noResult(node);
2359             return;
2360         }
2361         double d = jsValue.asNumber();
2362         if (isClamped(type)) {
2363             ASSERT(elementSize(type) == 1);
2364             d = clampDoubleToByte(d);
2365         }
2366         GPRTemporary scratch(this);
2367         GPRReg scratchReg = scratch.gpr();
2368         m_jit.move(Imm32(toInt32(d)), scratchReg);
2369         value.adopt(scratch);
2370         valueGPR = scratchReg;
2371     } else {
2372         switch (valueUse.useKind()) {
2373         case Int32Use: {
2374             SpeculateInt32Operand valueOp(this, valueUse);
2375             GPRTemporary scratch(this);
2376             GPRReg scratchReg = scratch.gpr();
2377             m_jit.move(valueOp.gpr(), scratchReg);
2378             if (isClamped(type)) {
2379                 ASSERT(elementSize(type) == 1);
2380                 compileClampIntegerToByte(m_jit, scratchReg);
2381             }
2382             value.adopt(scratch);
2383             valueGPR = scratchReg;
2384             break;
2385         }
2386             
2387 #if USE(JSVALUE64)
2388         case Int52RepUse: {
2389             SpeculateStrictInt52Operand valueOp(this, valueUse);
2390             GPRTemporary scratch(this);
2391             GPRReg scratchReg = scratch.gpr();
2392             m_jit.move(valueOp.gpr(), scratchReg);
2393             if (isClamped(type)) {
2394                 ASSERT(elementSize(type) == 1);
2395                 MacroAssembler::Jump inBounds = m_jit.branch64(
2396                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2397                 MacroAssembler::Jump tooBig = m_jit.branch64(
2398                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2399                 m_jit.move(TrustedImm32(0), scratchReg);
2400                 MacroAssembler::Jump clamped = m_jit.jump();
2401                 tooBig.link(&m_jit);
2402                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2403                 clamped.link(&m_jit);
2404                 inBounds.link(&m_jit);
2405             }
2406             value.adopt(scratch);
2407             valueGPR = scratchReg;
2408             break;
2409         }
2410 #endif // USE(JSVALUE64)
2411             
2412         case DoubleRepUse: {
2413             if (isClamped(type)) {
2414                 ASSERT(elementSize(type) == 1);
2415                 SpeculateDoubleOperand valueOp(this, valueUse);
2416                 GPRTemporary result(this);
2417                 FPRTemporary floatScratch(this);
2418                 FPRReg fpr = valueOp.fpr();
2419                 GPRReg gpr = result.gpr();
2420                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2421                 value.adopt(result);
2422                 valueGPR = gpr;
2423             } else {
2424                 SpeculateDoubleOperand valueOp(this, valueUse);
2425                 GPRTemporary result(this);
2426                 FPRReg fpr = valueOp.fpr();
2427                 GPRReg gpr = result.gpr();
2428                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2429                 m_jit.xorPtr(gpr, gpr);
2430                 MacroAssembler::Jump fixed = m_jit.jump();
2431                 notNaN.link(&m_jit);
2432                 
2433                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2434                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2435                 
2436                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2437                 
2438                 fixed.link(&m_jit);
2439                 value.adopt(result);
2440                 valueGPR = gpr;
2441             }
2442             break;
2443         }
2444             
2445         default:
2446             RELEASE_ASSERT_NOT_REACHED();
2447             break;
2448         }
2449     }
2450     
2451     ASSERT_UNUSED(valueGPR, valueGPR != property);
2452     ASSERT(valueGPR != base);
2453     ASSERT(valueGPR != storageReg);
2454     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2455     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2456         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2457         outOfBounds = MacroAssembler::Jump();
2458     }
2459
2460     switch (elementSize(type)) {
2461     case 1:
2462         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2463         break;
2464     case 2:
2465         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2466         break;
2467     case 4:
2468         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2469         break;
2470     default:
2471         CRASH();
2472     }
2473     if (outOfBounds.isSet())
2474         outOfBounds.link(&m_jit);
2475     noResult(node);
2476 }
2477
2478 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2479 {
2480     ASSERT(isFloat(type));
2481     
2482     SpeculateCellOperand base(this, node->child1());
2483     SpeculateStrictInt32Operand property(this, node->child2());
2484     StorageOperand storage(this, node->child3());
2485
2486     GPRReg baseReg = base.gpr();
2487     GPRReg propertyReg = property.gpr();
2488     GPRReg storageReg = storage.gpr();
2489
2490     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2491
2492     FPRTemporary result(this);
2493     FPRReg resultReg = result.fpr();
2494     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2495     switch (elementSize(type)) {
2496     case 4:
2497         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2498         m_jit.convertFloatToDouble(resultReg, resultReg);
2499         break;
2500     case 8: {
2501         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2502         break;
2503     }
2504     default:
2505         RELEASE_ASSERT_NOT_REACHED();
2506     }
2507     
2508     doubleResult(resultReg, node);
2509 }
2510
2511 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2512 {
2513     ASSERT(isFloat(type));
2514     
2515     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2516     GPRReg storageReg = storage.gpr();
2517     
2518     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2519     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2520
2521     SpeculateDoubleOperand valueOp(this, valueUse);
2522     FPRTemporary scratch(this);
2523     FPRReg valueFPR = valueOp.fpr();
2524     FPRReg scratchFPR = scratch.fpr();
2525
2526     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2527     
2528     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2529     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2530         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2531         outOfBounds = MacroAssembler::Jump();
2532     }
2533     
2534     switch (elementSize(type)) {
2535     case 4: {
2536         m_jit.moveDouble(valueFPR, scratchFPR);
2537         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2538         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2539         break;
2540     }
2541     case 8:
2542         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2543         break;
2544     default:
2545         RELEASE_ASSERT_NOT_REACHED();
2546     }
2547     if (outOfBounds.isSet())
2548         outOfBounds.link(&m_jit);
2549     noResult(node);
2550 }
2551
2552 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2553 {
2554     // Check that prototype is an object.
2555     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2556     
2557     // Initialize scratchReg with the value being checked.
2558     m_jit.move(valueReg, scratchReg);
2559     
2560     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2561     MacroAssembler::Label loop(&m_jit);
2562     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2563     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2564     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2565 #if USE(JSVALUE64)
2566     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2567 #else
2568     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2569 #endif
2570     
2571     // No match - result is false.
2572 #if USE(JSVALUE64)
2573     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2574 #else
2575     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2576 #endif
2577     MacroAssembler::Jump putResult = m_jit.jump();
2578     
2579     isInstance.link(&m_jit);
2580 #if USE(JSVALUE64)
2581     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2582 #else
2583     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2584 #endif
2585     
2586     putResult.link(&m_jit);
2587 }
2588
2589 void SpeculativeJIT::compileInstanceOf(Node* node)
2590 {
2591     if (node->child1().useKind() == UntypedUse) {
2592         // It might not be a cell. Speculate less aggressively.
2593         // Or: it might only be used once (i.e. by us), so we get zero benefit
2594         // from speculating any more aggressively than we absolutely need to.
2595         
2596         JSValueOperand value(this, node->child1());
2597         SpeculateCellOperand prototype(this, node->child2());
2598         GPRTemporary scratch(this);
2599         GPRTemporary scratch2(this);
2600         
2601         GPRReg prototypeReg = prototype.gpr();
2602         GPRReg scratchReg = scratch.gpr();
2603         GPRReg scratch2Reg = scratch2.gpr();
2604         
2605         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2606         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2607         moveFalseTo(scratchReg);
2608
2609         MacroAssembler::Jump done = m_jit.jump();
2610         
2611         isCell.link(&m_jit);
2612         
2613         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2614         
2615         done.link(&m_jit);
2616
2617         blessedBooleanResult(scratchReg, node);
2618         return;
2619     }
2620     
2621     SpeculateCellOperand value(this, node->child1());
2622     SpeculateCellOperand prototype(this, node->child2());
2623     
2624     GPRTemporary scratch(this);
2625     GPRTemporary scratch2(this);
2626     
2627     GPRReg valueReg = value.gpr();
2628     GPRReg prototypeReg = prototype.gpr();
2629     GPRReg scratchReg = scratch.gpr();
2630     GPRReg scratch2Reg = scratch2.gpr();
2631     
2632     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2633
2634     blessedBooleanResult(scratchReg, node);
2635 }
2636
2637 void SpeculativeJIT::compileAdd(Node* node)
2638 {
2639     switch (node->binaryUseKind()) {
2640     case Int32Use: {
2641         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2642         
2643         if (node->child1()->isInt32Constant()) {
2644             int32_t imm1 = node->child1()->asInt32();
2645             SpeculateInt32Operand op2(this, node->child2());
2646             GPRTemporary result(this);
2647
2648             if (!shouldCheckOverflow(node->arithMode())) {
2649                 m_jit.move(op2.gpr(), result.gpr());
2650                 m_jit.add32(Imm32(imm1), result.gpr());
2651             } else
2652                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2653
2654             int32Result(result.gpr(), node);
2655             return;
2656         }
2657         
2658         if (node->child2()->isInt32Constant()) {
2659             SpeculateInt32Operand op1(this, node->child1());
2660             int32_t imm2 = node->child2()->asInt32();
2661             GPRTemporary result(this);
2662                 
2663             if (!shouldCheckOverflow(node->arithMode())) {
2664                 m_jit.move(op1.gpr(), result.gpr());
2665                 m_jit.add32(Imm32(imm2), result.gpr());
2666             } else
2667                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2668
2669             int32Result(result.gpr(), node);
2670             return;
2671         }
2672                 
2673         SpeculateInt32Operand op1(this, node->child1());
2674         SpeculateInt32Operand op2(this, node->child2());
2675         GPRTemporary result(this, Reuse, op1, op2);
2676
2677         GPRReg gpr1 = op1.gpr();
2678         GPRReg gpr2 = op2.gpr();
2679         GPRReg gprResult = result.gpr();
2680
2681         if (!shouldCheckOverflow(node->arithMode())) {
2682             if (gpr1 == gprResult)
2683                 m_jit.add32(gpr2, gprResult);
2684             else {
2685                 m_jit.move(gpr2, gprResult);
2686                 m_jit.add32(gpr1, gprResult);
2687             }
2688         } else {
2689             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2690                 
2691             if (gpr1 == gprResult)
2692                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2693             else if (gpr2 == gprResult)
2694                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2695             else
2696                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2697         }
2698
2699         int32Result(gprResult, node);
2700         return;
2701     }
2702         
2703 #if USE(JSVALUE64)
2704     case Int52RepUse: {
2705         ASSERT(shouldCheckOverflow(node->arithMode()));
2706         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2707
2708         // Will we need an overflow check? If we can prove that neither input can be
2709         // Int52 then the overflow check will not be necessary.
2710         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2711             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2712             SpeculateWhicheverInt52Operand op1(this, node->child1());
2713             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2714             GPRTemporary result(this, Reuse, op1);
2715             m_jit.move(op1.gpr(), result.gpr());
2716             m_jit.add64(op2.gpr(), result.gpr());
2717             int52Result(result.gpr(), node, op1.format());
2718             return;
2719         }
2720         
2721         SpeculateInt52Operand op1(this, node->child1());
2722         SpeculateInt52Operand op2(this, node->child2());
2723         GPRTemporary result(this);
2724         m_jit.move(op1.gpr(), result.gpr());
2725         speculationCheck(
2726             Int52Overflow, JSValueRegs(), 0,
2727             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2728         int52Result(result.gpr(), node);
2729         return;
2730     }
2731 #endif // USE(JSVALUE64)
2732     
2733     case DoubleRepUse: {
2734         SpeculateDoubleOperand op1(this, node->child1());
2735         SpeculateDoubleOperand op2(this, node->child2());
2736         FPRTemporary result(this, op1, op2);
2737
2738         FPRReg reg1 = op1.fpr();
2739         FPRReg reg2 = op2.fpr();
2740         m_jit.addDouble(reg1, reg2, result.fpr());
2741
2742         doubleResult(result.fpr(), node);
2743         return;
2744     }
2745         
2746     default:
2747         RELEASE_ASSERT_NOT_REACHED();
2748         break;
2749     }
2750 }
2751
2752 void SpeculativeJIT::compileMakeRope(Node* node)
2753 {
2754     ASSERT(node->child1().useKind() == KnownStringUse);
2755     ASSERT(node->child2().useKind() == KnownStringUse);
2756     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2757     
2758     SpeculateCellOperand op1(this, node->child1());
2759     SpeculateCellOperand op2(this, node->child2());
2760     SpeculateCellOperand op3(this, node->child3());
2761     GPRTemporary result(this);
2762     GPRTemporary allocator(this);
2763     GPRTemporary scratch(this);
2764     
2765     GPRReg opGPRs[3];
2766     unsigned numOpGPRs;
2767     opGPRs[0] = op1.gpr();
2768     opGPRs[1] = op2.gpr();
2769     if (node->child3()) {
2770         opGPRs[2] = op3.gpr();
2771         numOpGPRs = 3;
2772     } else {
2773         opGPRs[2] = InvalidGPRReg;
2774         numOpGPRs = 2;
2775     }
2776     GPRReg resultGPR = result.gpr();
2777     GPRReg allocatorGPR = allocator.gpr();
2778     GPRReg scratchGPR = scratch.gpr();
2779     
2780     JITCompiler::JumpList slowPath;
2781     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2782     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2783     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2784         
2785     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2786     for (unsigned i = 0; i < numOpGPRs; ++i)
2787         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2788     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2789         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2790     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2791     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2792     if (!ASSERT_DISABLED) {
2793         JITCompiler::Jump ok = m_jit.branch32(
2794             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2795         m_jit.abortWithReason(DFGNegativeStringLength);
2796         ok.link(&m_jit);
2797     }
2798     for (unsigned i = 1; i < numOpGPRs; ++i) {
2799         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2800         speculationCheck(
2801             Uncountable, JSValueSource(), nullptr,
2802             m_jit.branchAdd32(
2803                 JITCompiler::Overflow,
2804                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2805     }
2806     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2807     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2808     if (!ASSERT_DISABLED) {
2809         JITCompiler::Jump ok = m_jit.branch32(
2810             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2811         m_jit.abortWithReason(DFGNegativeStringLength);
2812         ok.link(&m_jit);
2813     }
2814     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2815     
2816     switch (numOpGPRs) {
2817     case 2:
2818         addSlowPathGenerator(slowPathCall(
2819             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2820         break;
2821     case 3:
2822         addSlowPathGenerator(slowPathCall(
2823             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2824         break;
2825     default:
2826         RELEASE_ASSERT_NOT_REACHED();
2827         break;
2828     }
2829         
2830     cellResult(resultGPR, node);
2831 }
2832
2833 void SpeculativeJIT::compileArithClz32(Node* node)
2834 {
2835     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2836     SpeculateInt32Operand value(this, node->child1());
2837     GPRTemporary result(this, Reuse, value);
2838     GPRReg valueReg = value.gpr();
2839     GPRReg resultReg = result.gpr();
2840     m_jit.countLeadingZeros32(valueReg, resultReg);
2841     int32Result(resultReg, node);
2842 }
2843
2844 void SpeculativeJIT::compileArithSub(Node* node)
2845 {
2846     switch (node->binaryUseKind()) {
2847     case Int32Use: {
2848         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2849         
2850         if (node->child2()->isNumberConstant()) {
2851             SpeculateInt32Operand op1(this, node->child1());
2852             int32_t imm2 = node->child2()->asInt32();
2853             GPRTemporary result(this);
2854
2855             if (!shouldCheckOverflow(node->arithMode())) {
2856                 m_jit.move(op1.gpr(), result.gpr());
2857                 m_jit.sub32(Imm32(imm2), result.gpr());
2858             } else {
2859                 GPRTemporary scratch(this);
2860                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2861             }
2862
2863             int32Result(result.gpr(), node);
2864             return;
2865         }
2866             
2867         if (node->child1()->isNumberConstant()) {
2868             int32_t imm1 = node->child1()->asInt32();
2869             SpeculateInt32Operand op2(this, node->child2());
2870             GPRTemporary result(this);
2871                 
2872             m_jit.move(Imm32(imm1), result.gpr());
2873             if (!shouldCheckOverflow(node->arithMode()))
2874                 m_jit.sub32(op2.gpr(), result.gpr());
2875             else
2876                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2877                 
2878             int32Result(result.gpr(), node);
2879             return;
2880         }
2881             
2882         SpeculateInt32Operand op1(this, node->child1());
2883         SpeculateInt32Operand op2(this, node->child2());
2884         GPRTemporary result(this);
2885
2886         if (!shouldCheckOverflow(node->arithMode())) {
2887             m_jit.move(op1.gpr(), result.gpr());
2888             m_jit.sub32(op2.gpr(), result.gpr());
2889         } else
2890             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2891
2892         int32Result(result.gpr(), node);
2893         return;
2894     }
2895         
2896 #if USE(JSVALUE64)
2897     case Int52RepUse: {
2898         ASSERT(shouldCheckOverflow(node->arithMode()));
2899         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2900
2901         // Will we need an overflow check? If we can prove that neither input can be
2902         // Int52 then the overflow check will not be necessary.
2903         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2904             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2905             SpeculateWhicheverInt52Operand op1(this, node->child1());
2906             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2907             GPRTemporary result(this, Reuse, op1);
2908             m_jit.move(op1.gpr(), result.gpr());
2909             m_jit.sub64(op2.gpr(), result.gpr());
2910             int52Result(result.gpr(), node, op1.format());
2911             return;
2912         }
2913         
2914         SpeculateInt52Operand op1(this, node->child1());
2915         SpeculateInt52Operand op2(this, node->child2());
2916         GPRTemporary result(this);
2917         m_jit.move(op1.gpr(), result.gpr());
2918         speculationCheck(
2919             Int52Overflow, JSValueRegs(), 0,
2920             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2921         int52Result(result.gpr(), node);
2922         return;
2923     }
2924 #endif // USE(JSVALUE64)
2925
2926     case DoubleRepUse: {
2927         SpeculateDoubleOperand op1(this, node->child1());
2928         SpeculateDoubleOperand op2(this, node->child2());
2929         FPRTemporary result(this, op1);
2930
2931         FPRReg reg1 = op1.fpr();
2932         FPRReg reg2 = op2.fpr();
2933         m_jit.subDouble(reg1, reg2, result.fpr());
2934
2935         doubleResult(result.fpr(), node);
2936         return;
2937     }
2938         
2939     default:
2940         RELEASE_ASSERT_NOT_REACHED();
2941         return;
2942     }
2943 }
2944
2945 void SpeculativeJIT::compileArithNegate(Node* node)
2946 {
2947     switch (node->child1().useKind()) {
2948     case Int32Use: {
2949         SpeculateInt32Operand op1(this, node->child1());
2950         GPRTemporary result(this);
2951
2952         m_jit.move(op1.gpr(), result.gpr());
2953
2954         // Note: there is no notion of being not used as a number, but someone
2955         // caring about negative zero.
2956         
2957         if (!shouldCheckOverflow(node->arithMode()))
2958             m_jit.neg32(result.gpr());
2959         else if (!shouldCheckNegativeZero(node->arithMode()))
2960             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2961         else {
2962             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2963             m_jit.neg32(result.gpr());
2964         }
2965
2966         int32Result(result.gpr(), node);
2967         return;
2968     }
2969
2970 #if USE(JSVALUE64)
2971     case Int52RepUse: {
2972         ASSERT(shouldCheckOverflow(node->arithMode()));
2973         
2974         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2975             SpeculateWhicheverInt52Operand op1(this, node->child1());
2976             GPRTemporary result(this);
2977             GPRReg op1GPR = op1.gpr();
2978             GPRReg resultGPR = result.gpr();
2979             m_jit.move(op1GPR, resultGPR);
2980             m_jit.neg64(resultGPR);
2981             if (shouldCheckNegativeZero(node->arithMode())) {
2982                 speculationCheck(
2983                     NegativeZero, JSValueRegs(), 0,
2984                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2985             }
2986             int52Result(resultGPR, node, op1.format());
2987             return;
2988         }
2989         
2990         SpeculateInt52Operand op1(this, node->child1());
2991         GPRTemporary result(this);
2992         GPRReg op1GPR = op1.gpr();
2993         GPRReg resultGPR = result.gpr();
2994         m_jit.move(op1GPR, resultGPR);
2995         speculationCheck(
2996             Int52Overflow, JSValueRegs(), 0,
2997             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2998         if (shouldCheckNegativeZero(node->arithMode())) {
2999             speculationCheck(
3000                 NegativeZero, JSValueRegs(), 0,
3001                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3002         }
3003         int52Result(resultGPR, node);
3004         return;
3005     }
3006 #endif // USE(JSVALUE64)
3007         
3008     case DoubleRepUse: {
3009         SpeculateDoubleOperand op1(this, node->child1());
3010         FPRTemporary result(this);
3011         
3012         m_jit.negateDouble(op1.fpr(), result.fpr());
3013         
3014         doubleResult(result.fpr(), node);
3015         return;
3016     }
3017         
3018     default:
3019         RELEASE_ASSERT_NOT_REACHED();
3020         return;
3021     }
3022 }
3023 void SpeculativeJIT::compileArithMul(Node* node)
3024 {
3025     switch (node->binaryUseKind()) {
3026     case Int32Use: {
3027         SpeculateInt32Operand op1(this, node->child1());
3028         SpeculateInt32Operand op2(this, node->child2());
3029         GPRTemporary result(this);
3030
3031         GPRReg reg1 = op1.gpr();
3032         GPRReg reg2 = op2.gpr();
3033
3034         // We can perform truncated multiplications if we get to this point, because if the
3035         // fixup phase could not prove that it would be safe, it would have turned us into
3036         // a double multiplication.
3037         if (!shouldCheckOverflow(node->arithMode())) {
3038             m_jit.move(reg1, result.gpr());
3039             m_jit.mul32(reg2, result.gpr());
3040         } else {
3041             speculationCheck(
3042                 Overflow, JSValueRegs(), 0,
3043                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3044         }
3045             
3046         // Check for negative zero, if the users of this node care about such things.
3047         if (shouldCheckNegativeZero(node->arithMode())) {
3048             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3049             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3050             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3051             resultNonZero.link(&m_jit);
3052         }
3053
3054         int32Result(result.gpr(), node);
3055         return;
3056     }
3057     
3058 #if USE(JSVALUE64)   
3059     case Int52RepUse: {
3060         ASSERT(shouldCheckOverflow(node->arithMode()));
3061         
3062         // This is super clever. We want to do an int52 multiplication and check the
3063         // int52 overflow bit. There is no direct hardware support for this, but we do
3064         // have the ability to do an int64 multiplication and check the int64 overflow
3065         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3066         // registers, with the high 12 bits being sign-extended. We can do:
3067         //
3068         //     (a * (b << 12))
3069         //
3070         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3071         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3072         // multiplication overflows is identical to whether the 'a * b' 52-bit
3073         // multiplication overflows.
3074         //
3075         // In our nomenclature, this is:
3076         //
3077         //     strictInt52(a) * int52(b) => int52
3078         //
3079         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3080         // bits.
3081         //
3082         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3083         // we just do whatever is more convenient for op1 and have op2 do the
3084         // opposite. This ensures that we do at most one shift.
3085
3086         SpeculateWhicheverInt52Operand op1(this, node->child1());
3087         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3088         GPRTemporary result(this);
3089         
3090         GPRReg op1GPR = op1.gpr();
3091         GPRReg op2GPR = op2.gpr();
3092         GPRReg resultGPR = result.gpr();
3093         
3094         m_jit.move(op1GPR, resultGPR);
3095         speculationCheck(
3096             Int52Overflow, JSValueRegs(), 0,
3097             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3098         
3099         if (shouldCheckNegativeZero(node->arithMode())) {
3100             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3101                 MacroAssembler::NonZero, resultGPR);
3102             speculationCheck(
3103                 NegativeZero, JSValueRegs(), 0,
3104                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3105             speculationCheck(
3106                 NegativeZero, JSValueRegs(), 0,
3107                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3108             resultNonZero.link(&m_jit);
3109         }
3110         
3111         int52Result(resultGPR, node);
3112         return;
3113     }
3114 #endif // USE(JSVALUE64)
3115         
3116     case DoubleRepUse: {
3117         SpeculateDoubleOperand op1(this, node->child1());
3118         SpeculateDoubleOperand op2(this, node->child2());
3119         FPRTemporary result(this, op1, op2);
3120         
3121         FPRReg reg1 = op1.fpr();
3122         FPRReg reg2 = op2.fpr();
3123         
3124         m_jit.mulDouble(reg1, reg2, result.fpr());
3125         
3126         doubleResult(result.fpr(), node);
3127         return;
3128     }
3129         
3130     default:
3131         RELEASE_ASSERT_NOT_REACHED();
3132         return;
3133     }
3134 }
3135
3136 void SpeculativeJIT::compileArithDiv(Node* node)
3137 {
3138     switch (node->binaryUseKind()) {
3139     case Int32Use: {
3140 #if CPU(X86) || CPU(X86_64)
3141         SpeculateInt32Operand op1(this, node->child1());
3142         SpeculateInt32Operand op2(this, node->child2());
3143         GPRTemporary eax(this, X86Registers::eax);
3144         GPRTemporary edx(this, X86Registers::edx);
3145         GPRReg op1GPR = op1.gpr();
3146         GPRReg op2GPR = op2.gpr();
3147     
3148         GPRReg op2TempGPR;
3149         GPRReg temp;
3150         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3151             op2TempGPR = allocate();
3152             temp = op2TempGPR;
3153         } else {
3154             op2TempGPR = InvalidGPRReg;
3155             if (op1GPR == X86Registers::eax)
3156                 temp = X86Registers::edx;
3157             else
3158                 temp = X86Registers::eax;
3159         }
3160     
3161         ASSERT(temp != op1GPR);
3162         ASSERT(temp != op2GPR);
3163     
3164         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3165     
3166         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3167     
3168         JITCompiler::JumpList done;
3169         if (shouldCheckOverflow(node->arithMode())) {
3170             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3171             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3172         } else {
3173             // This is the case where we convert the result to an int after we're done, and we
3174             // already know that the denominator is either -1 or 0. So, if the denominator is
3175             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3176             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3177             // are happy to fall through to a normal division, since we're just dividing
3178             // something by negative 1.
3179         
3180             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3181             m_jit.move(TrustedImm32(0), eax.gpr());
3182             done.append(m_jit.jump());
3183         
3184             notZero.link(&m_jit);
3185             JITCompiler::Jump notNeg2ToThe31 =
3186                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3187             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3188             done.append(m_jit.jump());
3189         
3190             notNeg2ToThe31.link(&m_jit);
3191         }
3192     
3193         safeDenominator.link(&m_jit);
3194     
3195         // If the user cares about negative zero, then speculate that we're not about
3196         // to produce negative zero.
3197         if (shouldCheckNegativeZero(node->arithMode())) {
3198             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3199             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3200             numeratorNonZero.link(&m_jit);
3201         }
3202     
3203         if (op2TempGPR != InvalidGPRReg) {
3204             m_jit.move(op2GPR, op2TempGPR);
3205             op2GPR = op2TempGPR;
3206         }
3207             
3208         m_jit.move(op1GPR, eax.gpr());
3209         m_jit.assembler().cdq();
3210         m_jit.assembler().idivl_r(op2GPR);
3211             
3212         if (op2TempGPR != InvalidGPRReg)
3213             unlock(op2TempGPR);
3214
3215         // Check that there was no remainder. If there had been, then we'd be obligated to
3216         // produce a double result instead.
3217         if (shouldCheckOverflow(node->arithMode()))
3218             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3219         
3220         done.link(&m_jit);
3221         int32Result(eax.gpr(), node);
3222 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3223         SpeculateInt32Operand op1(this, node->child1());
3224         SpeculateInt32Operand op2(this, node->child2());
3225         GPRReg op1GPR = op1.gpr();
3226         GPRReg op2GPR = op2.gpr();
3227         GPRTemporary quotient(this);
3228         GPRTemporary multiplyAnswer(this);
3229
3230         // If the user cares about negative zero, then speculate that we're not about
3231         // to produce negative zero.
3232         if (shouldCheckNegativeZero(node->arithMode())) {
3233             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3234             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3235             numeratorNonZero.link(&m_jit);
3236         }
3237
3238         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3239
3240         // Check that there was no remainder. If there had been, then we'd be obligated to
3241         // produce a double result instead.
3242         if (shouldCheckOverflow(node->arithMode())) {
3243             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3244             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3245         }
3246
3247         int32Result(quotient.gpr(), node);
3248 #else
3249         RELEASE_ASSERT_NOT_REACHED();
3250 #endif
3251         break;
3252     }
3253         
3254     case DoubleRepUse: {
3255         SpeculateDoubleOperand op1(this, node->child1());
3256         SpeculateDoubleOperand op2(this, node->child2());
3257         FPRTemporary result(this, op1);
3258         
3259         FPRReg reg1 = op1.fpr();
3260         FPRReg reg2 = op2.fpr();
3261         m_jit.divDouble(reg1, reg2, result.fpr());
3262         
3263         doubleResult(result.fpr(), node);
3264         break;
3265     }
3266         
3267     default:
3268         RELEASE_ASSERT_NOT_REACHED();
3269         break;
3270     }
3271 }
3272
3273 void SpeculativeJIT::compileArithMod(Node* node)
3274 {
3275     switch (node->binaryUseKind()) {
3276     case Int32Use: {
3277         // In the fast path, the dividend value could be the final result
3278         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3279         SpeculateStrictInt32Operand op1(this, node->child1());
3280         
3281         if (node->child2()->isInt32Constant()) {
3282             int32_t divisor = node->child2()->asInt32();
3283             if (divisor > 1 && hasOneBitSet(divisor)) {
3284                 unsigned logarithm = WTF::fastLog2(divisor);
3285                 GPRReg dividendGPR = op1.gpr();
3286                 GPRTemporary result(this);
3287                 GPRReg resultGPR = result.gpr();
3288
3289                 // This is what LLVM generates. It's pretty crazy. Here's my
3290                 // attempt at understanding it.
3291                 
3292                 // First, compute either divisor - 1, or 0, depending on whether
3293                 // the dividend is negative:
3294                 //
3295                 // If dividend < 0:  resultGPR = divisor - 1
3296                 // If dividend >= 0: resultGPR = 0
3297                 m_jit.move(dividendGPR, resultGPR);
3298                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3299                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3300                 
3301                 // Add in the dividend, so that:
3302                 //
3303                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3304                 // If dividend >= 0: resultGPR = dividend
3305                 m_jit.add32(dividendGPR, resultGPR);
3306                 
3307                 // Mask so as to only get the *high* bits. This rounds down
3308                 // (towards negative infinity) resultGPR to the nearest multiple
3309                 // of divisor, so that:
3310                 //
3311                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3312                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3313                 //
3314                 // Note that this can be simplified to:
3315                 //
3316                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3317                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3318                 //
3319                 // Note that if the dividend is negative, resultGPR will also be negative.
3320                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3321                 // zero, because of how things are conditionalized.
3322                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3323                 
3324                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3325                 //
3326                 // resultGPR = dividendGPR - resultGPR
3327                 m_jit.neg32(resultGPR);
3328                 m_jit.add32(dividendGPR, resultGPR);
3329                 
3330                 if (shouldCheckNegativeZero(node->arithMode())) {
3331                     // Check that we're not about to create negative zero.
3332                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3333                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3334                     numeratorPositive.link(&m_jit);
3335                 }
3336
3337                 int32Result(resultGPR, node);
3338                 return;
3339             }
3340         }
3341         
3342 #if CPU(X86) || CPU(X86_64)
3343         if (node->child2()->isInt32Constant()) {
3344             int32_t divisor = node->child2()->asInt32();
3345             if (divisor && divisor != -1) {
3346                 GPRReg op1Gpr = op1.gpr();
3347
3348                 GPRTemporary eax(this, X86Registers::eax);
3349                 GPRTemporary edx(this, X86Registers::edx);
3350                 GPRTemporary scratch(this);
3351                 GPRReg scratchGPR = scratch.gpr();
3352
3353                 GPRReg op1SaveGPR;
3354                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3355                     op1SaveGPR = allocate();
3356                     ASSERT(op1Gpr != op1SaveGPR);
3357                     m_jit.move(op1Gpr, op1SaveGPR);
3358                 } else
3359                     op1SaveGPR = op1Gpr;
3360                 ASSERT(op1SaveGPR != X86Registers::eax);
3361                 ASSERT(op1SaveGPR != X86Registers::edx);
3362
3363                 m_jit.move(op1Gpr, eax.gpr());
3364                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3365                 m_jit.assembler().cdq();
3366                 m_jit.assembler().idivl_r(scratchGPR);
3367                 if (shouldCheckNegativeZero(node->arithMode())) {
3368                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3369                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3370                     numeratorPositive.link(&m_jit);
3371                 }
3372             
3373                 if (op1SaveGPR != op1Gpr)
3374                     unlock(op1SaveGPR);
3375
3376                 int32Result(edx.gpr(), node);
3377                 return;
3378             }
3379         }
3380 #endif
3381
3382         SpeculateInt32Operand op2(this, node->child2());
3383 #if CPU(X86) || CPU(X86_64)
3384         GPRTemporary eax(this, X86Registers::eax);
3385         GPRTemporary edx(this, X86Registers::edx);
3386         GPRReg op1GPR = op1.gpr();
3387         GPRReg op2GPR = op2.gpr();
3388     
3389         GPRReg op2TempGPR;
3390         GPRReg temp;
3391         GPRReg op1SaveGPR;
3392     
3393         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3394             op2TempGPR = allocate();
3395             temp = op2TempGPR;
3396         } else {
3397             op2TempGPR = InvalidGPRReg;
3398             if (op1GPR == X86Registers::eax)
3399                 temp = X86Registers::edx;
3400             else
3401                 temp = X86Registers::eax;
3402         }
3403     
3404         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3405             op1SaveGPR = allocate();
3406             ASSERT(op1GPR != op1SaveGPR);
3407             m_jit.move(op1GPR, op1SaveGPR);
3408         } else
3409             op1SaveGPR = op1GPR;
3410     
3411         ASSERT(temp != op1GPR);
3412         ASSERT(temp != op2GPR);
3413         ASSERT(op1SaveGPR != X86Registers::eax);
3414         ASSERT(op1SaveGPR != X86Registers::edx);
3415     
3416         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3417     
3418         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3419     
3420         JITCompiler::JumpList done;
3421         
3422         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3423         // separate case for that. But it probably doesn't matter so much.
3424         if (shouldCheckOverflow(node->arithMode())) {
3425             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3426             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3427         } else {
3428             // This is the case where we convert the result to an int after we're done, and we
3429             // already know that the denominator is either -1 or 0. So, if the denominator is
3430             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3431             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3432             // happy to fall through to a normal division, since we're just dividing something
3433             // by negative 1.
3434         
3435             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3436             m_jit.move(TrustedImm32(0), edx.gpr());
3437             done.append(m_jit.jump());
3438         
3439             notZero.link(&m_jit);
3440             JITCompiler::Jump notNeg2ToThe31 =
3441                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3442             m_jit.move(TrustedImm32(0), edx.gpr());
3443             done.append(m_jit.jump());
3444         
3445             notNeg2ToThe31.link(&m_jit);
3446         }
3447         
3448         safeDenominator.link(&m_jit);
3449             
3450         if (op2TempGPR != InvalidGPRReg) {
3451             m_jit.move(op2GPR, op2TempGPR);
3452             op2GPR = op2TempGPR;
3453         }
3454             
3455         m_jit.move(op1GPR, eax.gpr());
3456         m_jit.assembler().cdq();
3457         m_jit.assembler().idivl_r(op2GPR);
3458             
3459         if (op2TempGPR != InvalidGPRReg)
3460             unlock(op2TempGPR);
3461
3462         // Check that we're not about to create negative zero.
3463         if (shouldCheckNegativeZero(node->arithMode())) {
3464             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3465             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3466             numeratorPositive.link(&m_jit);
3467         }
3468     
3469         if (op1SaveGPR != op1GPR)
3470             unlock(op1SaveGPR);
3471             
3472         done.link(&m_jit);
3473         int32Result(edx.gpr(), node);
3474
3475 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3476         GPRTemporary temp(this);
3477         GPRTemporary quotientThenRemainder(this);
3478         GPRTemporary multiplyAnswer(this);
3479         GPRReg dividendGPR = op1.gpr();
3480         GPRReg divisorGPR = op2.gpr();
3481         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3482         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3483
3484         JITCompiler::JumpList done;
3485     
3486         if (shouldCheckOverflow(node->arithMode()))
3487             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3488         else {
3489             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3490             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3491             done.append(m_jit.jump());
3492             denominatorNotZero.link(&m_jit);
3493         }
3494
3495         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3496         // FIXME: It seems like there are cases where we don't need this? What if we have
3497         // arithMode() == Arith::Unchecked?
3498         // https://bugs.webkit.org/show_bug.cgi?id=126444
3499         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3500 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3501         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3502 #else
3503         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3504 #endif
3505
3506         // If the user cares about negative zero, then speculate that we're not about
3507         // to produce negative zero.
3508         if (shouldCheckNegativeZero(node->arithMode())) {
3509             // Check that we're not about to create negative zero.
3510             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3511             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3512             numeratorPositive.link(&m_jit);
3513         }
3514
3515         done.link(&m_jit);
3516
3517         int32Result(quotientThenRemainderGPR, node);
3518 #else // not architecture that can do integer division
3519         RELEASE_ASSERT_NOT_REACHED();
3520 #endif
3521         return;
3522     }
3523         
3524     case DoubleRepUse: {
3525         SpeculateDoubleOperand op1(this, node->child1());
3526         SpeculateDoubleOperand op2(this, node->child2());
3527         
3528         FPRReg op1FPR = op1.fpr();
3529         FPRReg op2FPR = op2.fpr();
3530         
3531         flushRegisters();
3532         
3533         FPRResult result(this);
3534         
3535         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3536         
3537         doubleResult(result.fpr(), node);
3538         return;
3539     }
3540         
3541     default:
3542         RELEASE_ASSERT_NOT_REACHED();
3543         return;
3544     }
3545 }
3546
3547 void SpeculativeJIT::compileArithRound(Node* node)
3548 {
3549     ASSERT(node->child1().useKind() == DoubleRepUse);
3550
3551     SpeculateDoubleOperand value(this, node->child1());
3552     FPRReg valueFPR = value.fpr();
3553
3554     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3555         FPRTemporary oneHalf(this);
3556         GPRTemporary roundedResultAsInt32(this);
3557         FPRReg oneHalfFPR = oneHalf.fpr();
3558         GPRReg resultGPR = roundedResultAsInt32.gpr();
3559
3560         static const double halfConstant = 0.5;
3561         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3562         m_jit.addDouble(valueFPR, oneHalfFPR);
3563
3564         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3565         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3566         int32Result(resultGPR, node);
3567         return;
3568     }
3569
3570     flushRegisters();
3571     FPRResult roundedResultAsDouble(this);
3572     FPRReg resultFPR = roundedResultAsDouble.fpr();
3573     callOperation(jsRound, resultFPR, valueFPR);
3574     if (producesInteger(node->arithRoundingMode())) {
3575         GPRTemporary roundedResultAsInt32(this);
3576         FPRTemporary scratch(this);
3577         FPRReg scratchFPR = scratch.fpr();
3578         GPRReg resultGPR = roundedResultAsInt32.gpr();
3579         JITCompiler::JumpList failureCases;
3580         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3581         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3582
3583         int32Result(resultGPR, node);
3584     } else
3585         doubleResult(resultFPR, node);
3586 }
3587
3588 void SpeculativeJIT::compileArithSqrt(Node* node)
3589 {
3590     SpeculateDoubleOperand op1(this, node->child1());
3591     FPRReg op1FPR = op1.fpr();
3592
3593     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3594         flushRegisters();
3595         FPRResult result(this);
3596         callOperation(sqrt, result.fpr(), op1FPR);
3597         doubleResult(result.fpr(), node);
3598     } else {
3599         FPRTemporary result(this, op1);
3600         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3601         doubleResult(result.fpr(), node);
3602     }
3603 }
3604
3605 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3606 // Every register is clobbered by this helper.
3607 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3608 {
3609     MacroAssembler::JumpList skipFastPath;
3610     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3611     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3612
3613     static const double oneConstant = 1.0;
3614     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3615
3616     MacroAssembler::Label startLoop(assembler.label());
3617     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3618     assembler.mulDouble(xOperand, result);
3619     exponentIsEven.link(&assembler);
3620     assembler.mulDouble(xOperand, xOperand);
3621     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3622     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3623
3624     MacroAssembler::Jump skipSlowPath = assembler.jump();
3625     skipFastPath.link(&assembler);
3626
3627     return skipSlowPath;
3628 }
3629
3630 void SpeculativeJIT::compileArithPow(Node* node)
3631 {
3632     if (node->child2().useKind() == Int32Use) {
3633         SpeculateDoubleOperand xOperand(this, node->child1());
3634         SpeculateInt32Operand yOperand(this, node->child2());
3635         FPRReg xOperandfpr = xOperand.fpr();
3636         GPRReg yOperandGpr = yOperand.gpr();
3637         FPRTemporary yOperandfpr(this);
3638
3639         flushRegisters();
3640
3641         FPRResult result(this);
3642         FPRReg resultFpr = result.fpr();
3643
3644         FPRTemporary xOperandCopy(this);
3645         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3646         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3647
3648         GPRTemporary counter(this);
3649         GPRReg counterGpr = counter.gpr();
3650         m_jit.move(yOperandGpr, counterGpr);
3651
3652         MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3653         m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
3654         callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
3655
3656         skipFallback.link(&m_jit);
3657         doubleResult(resultFpr, node);
3658         return;
3659     }
3660
3661     SpeculateDoubleOperand xOperand(this, node->child1());
3662     SpeculateDoubleOperand yOperand(this, node->child2());
3663     FPRReg xOperandfpr = xOperand.fpr();
3664     FPRReg yOperandfpr = yOperand.fpr();
3665
3666     flushRegisters();
3667
3668     FPRResult result(this);
3669     FPRReg resultFpr = result.fpr();
3670
3671     FPRTemporary xOperandCopy(this);
3672     FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3673
3674     FPRTemporary scratch(this);
3675     FPRReg scratchFpr = scratch.fpr();
3676
3677     GPRTemporary yOperandInteger(this);
3678     GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
3679     MacroAssembler::JumpList failedExponentConversionToInteger;
3680     m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
3681
3682     m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3683     MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
3684     failedExponentConversionToInteger.link(&m_jit);
3685
3686     callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
3687     skipFallback.link(&m_jit);
3688     doubleResult(resultFpr, node);
3689 }
3690
3691 void SpeculativeJIT::compileArithLog(Node* node)
3692 {
3693     SpeculateDoubleOperand op1(this, node->child1());
3694     FPRReg op1FPR = op1.fpr();
3695     flushRegisters();
3696     FPRResult result(this);
3697     callOperation(log, result.fpr(), op1FPR);
3698     doubleResult(result.fpr(), node);
3699 }
3700
3701 // Returns true if the compare is fused with a subsequent branch.
3702 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3703 {
3704     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3705         return true;
3706
3707     if (node->isBinaryUseKind(Int32Use)) {
3708         compileInt32Compare(node, condition);
3709         return false;
3710     }
3711     
3712 #if USE(JSVALUE64)
3713     if (node->isBinaryUseKind(Int52RepUse)) {
3714         compileInt52Compare(node, condition);
3715         return false;
3716     }
3717 #endif // USE(JSVALUE64)
3718     
3719     if (node->isBinaryUseKind(DoubleRepUse)) {
3720         compileDoubleCompare(node, doubleCondition);
3721         return false;
3722     }
3723     
3724     if (node->op() == CompareEq) {
3725         if (node->isBinaryUseKind(StringUse)) {
3726             compileStringEquality(node);
3727             return false;
3728         }
3729         
3730         if (node->isBinaryUseKind(BooleanUse)) {
3731             compileBooleanCompare(node, condition);
3732             return false;
3733         }
3734
3735         if (node->isBinaryUseKind(StringIdentUse)) {
3736             compileStringIdentEquality(node);
3737             return false;
3738         }
3739         
3740         if (node->isBinaryUseKind(ObjectUse)) {
3741             compileObjectEquality(node);
3742             return false;
3743         }
3744         
3745         if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3746             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3747             return false;
3748         }
3749         
3750         if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3751             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3752             return false;
3753         }
3754     }
3755     
3756     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3757     return false;
3758 }
3759
3760 bool SpeculativeJIT::compileStrictEq(Node* node)
3761 {
3762     if (node->isBinaryUseKind(BooleanUse)) {
3763         unsigned branchIndexInBlock = detectPeepHoleBranch();
3764         if (branchIndexInBlock != UINT_MAX) {
3765             Node* branchNode = m_block->at(branchIndexInBlock);
3766             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3767             use(node->child1());
3768             use(node->child2());
3769             m_indexInBlock = branchIndexInBlock;
3770             m_currentNode = branchNode;
3771             return true;
3772         }
3773         compileBooleanCompare(node, MacroAssembler::Equal);
3774         return false;
3775     }
3776
3777     if (node->isBinaryUseKind(Int32Use)) {
3778         unsigned branchIndexInBlock = detectPeepHoleBranch();
3779         if (branchIndexInBlock != UINT_MAX) {
3780             Node* branchNode = m_block->at(branchIndexInBlock);
3781             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3782             use(node->child1());
3783             use(node->child2());
3784             m_indexInBlock = branchIndexInBlock;
3785             m_currentNode = branchNode;
3786             return true;
3787         }
3788         compileInt32Compare(node, MacroAssembler::Equal);
3789         return false;
3790     }
3791     
3792 #if USE(JSVALUE64)   
3793     if (node->isBinaryUseKind(Int52RepUse)) {
3794         unsigned branchIndexInBlock = detectPeepHoleBranch();
3795         if (branchIndexInBlock != UINT_MAX) {
3796             Node* branchNode = m_block->at(branchIndexInBlock);
3797             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3798             use(node->child1());
3799             use(node->child2());
3800             m_indexInBlock = branchIndexInBlock;
3801             m_currentNode = branchNode;
3802             return true;
3803         }