r190735 Caused us to maybe trample the base's tag-GPR on 32-bit inline cache when...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITSubGenerator.h"
42 #include "JSArrowFunction.h"
43 #include "JSCInlines.h"
44 #include "JSEnvironmentRecord.h"
45 #include "JSLexicalEnvironment.h"
46 #include "LinkBuffer.h"
47 #include "ScopedArguments.h"
48 #include "ScratchRegisterAllocator.h"
49 #include "WriteBarrierBuffer.h"
50 #include <wtf/MathExtras.h>
51
52 namespace JSC { namespace DFG {
53
54 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
55     : m_compileOkay(true)
56     , m_jit(jit)
57     , m_currentNode(0)
58     , m_lastGeneratedNode(LastNodeType)
59     , m_indexInBlock(0)
60     , m_generationInfo(m_jit.graph().frameRegisterCount())
61     , m_state(m_jit.graph())
62     , m_interpreter(m_jit.graph(), m_state)
63     , m_stream(&jit.jitCode()->variableEventStream)
64     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
65 {
66 }
67
68 SpeculativeJIT::~SpeculativeJIT()
69 {
70 }
71
72 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
73 {
74     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
75     
76     GPRTemporary scratch(this);
77     GPRTemporary scratch2(this);
78     GPRReg scratchGPR = scratch.gpr();
79     GPRReg scratch2GPR = scratch2.gpr();
80     
81     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
82     
83     JITCompiler::JumpList slowCases;
84     
85     slowCases.append(
86         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
87     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
88     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
89     
90     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
91     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
92     
93     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
94 #if USE(JSVALUE64)
95         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
96         for (unsigned i = numElements; i < vectorLength; ++i)
97             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
98 #else
99         EncodedValueDescriptor value;
100         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
101         for (unsigned i = numElements; i < vectorLength; ++i) {
102             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
103             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
104         }
105 #endif
106     }
107     
108     // I want a slow path that also loads out the storage pointer, and that's
109     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
110     // of work for a very small piece of functionality. :-/
111     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
112         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
113         structure, numElements));
114 }
115
116 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
117 {
118     if (inlineCallFrame && !inlineCallFrame->isVarargs())
119         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
120     else {
121         VirtualRegister argumentCountRegister;
122         if (!inlineCallFrame)
123             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
124         else
125             argumentCountRegister = inlineCallFrame->argumentCountRegister;
126         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
127         if (!includeThis)
128             m_jit.sub32(TrustedImm32(1), lengthGPR);
129     }
130 }
131
132 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
133 {
134     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
135 }
136
137 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
138 {
139     if (origin.inlineCallFrame) {
140         if (origin.inlineCallFrame->isClosureCall) {
141             m_jit.loadPtr(
142                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
143                 calleeGPR);
144         } else {
145             m_jit.move(
146                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
147                 calleeGPR);
148         }
149     } else
150         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
151 }
152
153 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
154 {
155     m_jit.addPtr(
156         TrustedImm32(
157             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
158         GPRInfo::callFrameRegister, startGPR);
159 }
160
161 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
162 {
163     if (!doOSRExitFuzzing())
164         return MacroAssembler::Jump();
165     
166     MacroAssembler::Jump result;
167     
168     m_jit.pushToSave(GPRInfo::regT0);
169     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
170     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
171     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
172     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
173     unsigned at = Options::fireOSRExitFuzzAt();
174     if (at || atOrAfter) {
175         unsigned threshold;
176         MacroAssembler::RelationalCondition condition;
177         if (atOrAfter) {
178             threshold = atOrAfter;
179             condition = MacroAssembler::Below;
180         } else {
181             threshold = at;
182             condition = MacroAssembler::NotEqual;
183         }
184         MacroAssembler::Jump ok = m_jit.branch32(
185             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
186         m_jit.popToRestore(GPRInfo::regT0);
187         result = m_jit.jump();
188         ok.link(&m_jit);
189     }
190     m_jit.popToRestore(GPRInfo::regT0);
191     
192     return result;
193 }
194
195 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
196 {
197     if (!m_compileOkay)
198         return;
199     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
200     if (fuzzJump.isSet()) {
201         JITCompiler::JumpList jumpsToFail;
202         jumpsToFail.append(fuzzJump);
203         jumpsToFail.append(jumpToFail);
204         m_jit.appendExitInfo(jumpsToFail);
205     } else
206         m_jit.appendExitInfo(jumpToFail);
207     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
208 }
209
210 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
211 {
212     if (!m_compileOkay)
213         return;
214     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
215     if (fuzzJump.isSet()) {
216         JITCompiler::JumpList myJumpsToFail;
217         myJumpsToFail.append(jumpsToFail);
218         myJumpsToFail.append(fuzzJump);
219         m_jit.appendExitInfo(myJumpsToFail);
220     } else
221         m_jit.appendExitInfo(jumpsToFail);
222     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
223 }
224
225 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
226 {
227     if (!m_compileOkay)
228         return OSRExitJumpPlaceholder();
229     unsigned index = m_jit.jitCode()->osrExit.size();
230     m_jit.appendExitInfo();
231     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
232     return OSRExitJumpPlaceholder(index);
233 }
234
235 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
236 {
237     return speculationCheck(kind, jsValueSource, nodeUse.node());
238 }
239
240 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
241 {
242     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
243 }
244
245 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
246 {
247     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
248 }
249
250 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
251 {
252     if (!m_compileOkay)
253         return;
254     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
255     m_jit.appendExitInfo(jumpToFail);
256     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
257 }
258
259 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
260 {
261     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
262 }
263
264 void SpeculativeJIT::emitInvalidationPoint(Node* node)
265 {
266     if (!m_compileOkay)
267         return;
268     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
269     m_jit.jitCode()->appendOSRExit(OSRExit(
270         UncountableInvalidation, JSValueSource(),
271         m_jit.graph().methodOfGettingAValueProfileFor(node),
272         this, m_stream->size()));
273     info.m_replacementSource = m_jit.watchpointLabel();
274     ASSERT(info.m_replacementSource.isSet());
275     noResult(node);
276 }
277
278 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
279 {
280     if (!m_compileOkay)
281         return;
282     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
283     m_compileOkay = false;
284     if (verboseCompilationEnabled())
285         dataLog("Bailing compilation.\n");
286 }
287
288 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
289 {
290     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
291 }
292
293 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
294 {
295     ASSERT(needsTypeCheck(edge, typesPassedThrough));
296     m_interpreter.filter(edge, typesPassedThrough);
297     speculationCheck(BadType, source, edge.node(), jumpToFail);
298 }
299
300 RegisterSet SpeculativeJIT::usedRegisters()
301 {
302     RegisterSet result;
303     
304     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
305         GPRReg gpr = GPRInfo::toRegister(i);
306         if (m_gprs.isInUse(gpr))
307             result.set(gpr);
308     }
309     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
310         FPRReg fpr = FPRInfo::toRegister(i);
311         if (m_fprs.isInUse(fpr))
312             result.set(fpr);
313     }
314     
315     result.merge(RegisterSet::stubUnavailableRegisters());
316     
317     return result;
318 }
319
320 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
321 {
322     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
323 }
324
325 void SpeculativeJIT::runSlowPathGenerators()
326 {
327     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
328         m_slowPathGenerators[i]->generate(this);
329 }
330
331 // On Windows we need to wrap fmod; on other platforms we can call it directly.
332 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
333 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
334 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
335 {
336     return fmod(x, y);
337 }
338 #else
339 #define fmodAsDFGOperation fmod
340 #endif
341
342 void SpeculativeJIT::clearGenerationInfo()
343 {
344     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
345         m_generationInfo[i] = GenerationInfo();
346     m_gprs = RegisterBank<GPRInfo>();
347     m_fprs = RegisterBank<FPRInfo>();
348 }
349
350 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
351 {
352     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
353     Node* node = info.node();
354     DataFormat registerFormat = info.registerFormat();
355     ASSERT(registerFormat != DataFormatNone);
356     ASSERT(registerFormat != DataFormatDouble);
357         
358     SilentSpillAction spillAction;
359     SilentFillAction fillAction;
360         
361     if (!info.needsSpill())
362         spillAction = DoNothingForSpill;
363     else {
364 #if USE(JSVALUE64)
365         ASSERT(info.gpr() == source);
366         if (registerFormat == DataFormatInt32)
367             spillAction = Store32Payload;
368         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
369             spillAction = StorePtr;
370         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
371             spillAction = Store64;
372         else {
373             ASSERT(registerFormat & DataFormatJS);
374             spillAction = Store64;
375         }
376 #elif USE(JSVALUE32_64)
377         if (registerFormat & DataFormatJS) {
378             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
379             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
380         } else {
381             ASSERT(info.gpr() == source);
382             spillAction = Store32Payload;
383         }
384 #endif
385     }
386         
387     if (registerFormat == DataFormatInt32) {
388         ASSERT(info.gpr() == source);
389         ASSERT(isJSInt32(info.registerFormat()));
390         if (node->hasConstant()) {
391             ASSERT(node->isInt32Constant());
392             fillAction = SetInt32Constant;
393         } else
394             fillAction = Load32Payload;
395     } else if (registerFormat == DataFormatBoolean) {
396 #if USE(JSVALUE64)
397         RELEASE_ASSERT_NOT_REACHED();
398 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
399         fillAction = DoNothingForFill;
400 #endif
401 #elif USE(JSVALUE32_64)
402         ASSERT(info.gpr() == source);
403         if (node->hasConstant()) {
404             ASSERT(node->isBooleanConstant());
405             fillAction = SetBooleanConstant;
406         } else
407             fillAction = Load32Payload;
408 #endif
409     } else if (registerFormat == DataFormatCell) {
410         ASSERT(info.gpr() == source);
411         if (node->hasConstant()) {
412             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
413             node->asCell(); // To get the assertion.
414             fillAction = SetCellConstant;
415         } else {
416 #if USE(JSVALUE64)
417             fillAction = LoadPtr;
418 #else
419             fillAction = Load32Payload;
420 #endif
421         }
422     } else if (registerFormat == DataFormatStorage) {
423         ASSERT(info.gpr() == source);
424         fillAction = LoadPtr;
425     } else if (registerFormat == DataFormatInt52) {
426         if (node->hasConstant())
427             fillAction = SetInt52Constant;
428         else if (info.spillFormat() == DataFormatInt52)
429             fillAction = Load64;
430         else if (info.spillFormat() == DataFormatStrictInt52)
431             fillAction = Load64ShiftInt52Left;
432         else if (info.spillFormat() == DataFormatNone)
433             fillAction = Load64;
434         else {
435             RELEASE_ASSERT_NOT_REACHED();
436 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
437             fillAction = Load64; // Make GCC happy.
438 #endif
439         }
440     } else if (registerFormat == DataFormatStrictInt52) {
441         if (node->hasConstant())
442             fillAction = SetStrictInt52Constant;
443         else if (info.spillFormat() == DataFormatInt52)
444             fillAction = Load64ShiftInt52Right;
445         else if (info.spillFormat() == DataFormatStrictInt52)
446             fillAction = Load64;
447         else if (info.spillFormat() == DataFormatNone)
448             fillAction = Load64;
449         else {
450             RELEASE_ASSERT_NOT_REACHED();
451 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
452             fillAction = Load64; // Make GCC happy.
453 #endif
454         }
455     } else {
456         ASSERT(registerFormat & DataFormatJS);
457 #if USE(JSVALUE64)
458         ASSERT(info.gpr() == source);
459         if (node->hasConstant()) {
460             if (node->isCellConstant())
461                 fillAction = SetTrustedJSConstant;
462             else
463                 fillAction = SetJSConstant;
464         } else if (info.spillFormat() == DataFormatInt32) {
465             ASSERT(registerFormat == DataFormatJSInt32);
466             fillAction = Load32PayloadBoxInt;
467         } else
468             fillAction = Load64;
469 #else
470         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
471         if (node->hasConstant())
472             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
473         else if (info.payloadGPR() == source)
474             fillAction = Load32Payload;
475         else { // Fill the Tag
476             switch (info.spillFormat()) {
477             case DataFormatInt32:
478                 ASSERT(registerFormat == DataFormatJSInt32);
479                 fillAction = SetInt32Tag;
480                 break;
481             case DataFormatCell:
482                 ASSERT(registerFormat == DataFormatJSCell);
483                 fillAction = SetCellTag;
484                 break;
485             case DataFormatBoolean:
486                 ASSERT(registerFormat == DataFormatJSBoolean);
487                 fillAction = SetBooleanTag;
488                 break;
489             default:
490                 fillAction = Load32Tag;
491                 break;
492             }
493         }
494 #endif
495     }
496         
497     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
498 }
499     
500 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
501 {
502     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
503     Node* node = info.node();
504     ASSERT(info.registerFormat() == DataFormatDouble);
505
506     SilentSpillAction spillAction;
507     SilentFillAction fillAction;
508         
509     if (!info.needsSpill())
510         spillAction = DoNothingForSpill;
511     else {
512         ASSERT(!node->hasConstant());
513         ASSERT(info.spillFormat() == DataFormatNone);
514         ASSERT(info.fpr() == source);
515         spillAction = StoreDouble;
516     }
517         
518 #if USE(JSVALUE64)
519     if (node->hasConstant()) {
520         node->asNumber(); // To get the assertion.
521         fillAction = SetDoubleConstant;
522     } else {
523         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
524         fillAction = LoadDouble;
525     }
526 #elif USE(JSVALUE32_64)
527     ASSERT(info.registerFormat() == DataFormatDouble);
528     if (node->hasConstant()) {
529         node->asNumber(); // To get the assertion.
530         fillAction = SetDoubleConstant;
531     } else
532         fillAction = LoadDouble;
533 #endif
534
535     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
536 }
537     
538 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
539 {
540     switch (plan.spillAction()) {
541     case DoNothingForSpill:
542         break;
543     case Store32Tag:
544         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
545         break;
546     case Store32Payload:
547         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
548         break;
549     case StorePtr:
550         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
551         break;
552 #if USE(JSVALUE64)
553     case Store64:
554         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
555         break;
556 #endif
557     case StoreDouble:
558         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
559         break;
560     default:
561         RELEASE_ASSERT_NOT_REACHED();
562     }
563 }
564     
565 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
566 {
567 #if USE(JSVALUE32_64)
568     UNUSED_PARAM(canTrample);
569 #endif
570     switch (plan.fillAction()) {
571     case DoNothingForFill:
572         break;
573     case SetInt32Constant:
574         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
575         break;
576 #if USE(JSVALUE64)
577     case SetInt52Constant:
578         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
579         break;
580     case SetStrictInt52Constant:
581         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
582         break;
583 #endif // USE(JSVALUE64)
584     case SetBooleanConstant:
585         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
586         break;
587     case SetCellConstant:
588         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
589         break;
590 #if USE(JSVALUE64)
591     case SetTrustedJSConstant:
592         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
593         break;
594     case SetJSConstant:
595         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
596         break;
597     case SetDoubleConstant:
598         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
599         m_jit.move64ToDouble(canTrample, plan.fpr());
600         break;
601     case Load32PayloadBoxInt:
602         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
603         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
604         break;
605     case Load32PayloadConvertToInt52:
606         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
607         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
608         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
609         break;
610     case Load32PayloadSignExtend:
611         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
612         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
613         break;
614 #else
615     case SetJSConstantTag:
616         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
617         break;
618     case SetJSConstantPayload:
619         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
620         break;
621     case SetInt32Tag:
622         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
623         break;
624     case SetCellTag:
625         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
626         break;
627     case SetBooleanTag:
628         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
629         break;
630     case SetDoubleConstant:
631         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
632         break;
633 #endif
634     case Load32Tag:
635         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
636         break;
637     case Load32Payload:
638         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
639         break;
640     case LoadPtr:
641         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
642         break;
643 #if USE(JSVALUE64)
644     case Load64:
645         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
646         break;
647     case Load64ShiftInt52Right:
648         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
649         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
650         break;
651     case Load64ShiftInt52Left:
652         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
653         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
654         break;
655 #endif
656     case LoadDouble:
657         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
658         break;
659     default:
660         RELEASE_ASSERT_NOT_REACHED();
661     }
662 }
663     
664 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
665 {
666     switch (arrayMode.arrayClass()) {
667     case Array::OriginalArray: {
668         CRASH();
669 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
670         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
671         return result;
672 #endif
673     }
674         
675     case Array::Array:
676         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
677         return m_jit.branch32(
678             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
679         
680     case Array::NonArray:
681     case Array::OriginalNonArray:
682         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
683         return m_jit.branch32(
684             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
685         
686     case Array::PossiblyArray:
687         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
688         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
689     }
690     
691     RELEASE_ASSERT_NOT_REACHED();
692     return JITCompiler::Jump();
693 }
694
695 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
696 {
697     JITCompiler::JumpList result;
698     
699     switch (arrayMode.type()) {
700     case Array::Int32:
701         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
702
703     case Array::Double:
704         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
705
706     case Array::Contiguous:
707         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
708
709     case Array::Undecided:
710         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
711
712     case Array::ArrayStorage:
713     case Array::SlowPutArrayStorage: {
714         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
715         
716         if (arrayMode.isJSArray()) {
717             if (arrayMode.isSlowPut()) {
718                 result.append(
719                     m_jit.branchTest32(
720                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
721                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
722                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
723                 result.append(
724                     m_jit.branch32(
725                         MacroAssembler::Above, tempGPR,
726                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
727                 break;
728             }
729             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
730             result.append(
731                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
732             break;
733         }
734         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
735         if (arrayMode.isSlowPut()) {
736             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
737             result.append(
738                 m_jit.branch32(
739                     MacroAssembler::Above, tempGPR,
740                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
741             break;
742         }
743         result.append(
744             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
745         break;
746     }
747     default:
748         CRASH();
749         break;
750     }
751     
752     return result;
753 }
754
755 void SpeculativeJIT::checkArray(Node* node)
756 {
757     ASSERT(node->arrayMode().isSpecific());
758     ASSERT(!node->arrayMode().doesConversion());
759     
760     SpeculateCellOperand base(this, node->child1());
761     GPRReg baseReg = base.gpr();
762     
763     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
764         noResult(m_currentNode);
765         return;
766     }
767     
768     const ClassInfo* expectedClassInfo = 0;
769     
770     switch (node->arrayMode().type()) {
771     case Array::AnyTypedArray:
772     case Array::String:
773         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
774         break;
775     case Array::Int32:
776     case Array::Double:
777     case Array::Contiguous:
778     case Array::Undecided:
779     case Array::ArrayStorage:
780     case Array::SlowPutArrayStorage: {
781         GPRTemporary temp(this);
782         GPRReg tempGPR = temp.gpr();
783         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
784         speculationCheck(
785             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
786             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
787         
788         noResult(m_currentNode);
789         return;
790     }
791     case Array::DirectArguments:
792         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
793         noResult(m_currentNode);
794         return;
795     case Array::ScopedArguments:
796         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
797         noResult(m_currentNode);
798         return;
799     default:
800         speculateCellTypeWithoutTypeFiltering(
801             node->child1(), baseReg,
802             typeForTypedArrayType(node->arrayMode().typedArrayType()));
803         noResult(m_currentNode);
804         return;
805     }
806     
807     RELEASE_ASSERT(expectedClassInfo);
808     
809     GPRTemporary temp(this);
810     GPRTemporary temp2(this);
811     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
812     speculationCheck(
813         BadType, JSValueSource::unboxedCell(baseReg), node,
814         m_jit.branchPtr(
815             MacroAssembler::NotEqual,
816             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
817             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
818     
819     noResult(m_currentNode);
820 }
821
822 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
823 {
824     ASSERT(node->arrayMode().doesConversion());
825     
826     GPRTemporary temp(this);
827     GPRTemporary structure;
828     GPRReg tempGPR = temp.gpr();
829     GPRReg structureGPR = InvalidGPRReg;
830     
831     if (node->op() != ArrayifyToStructure) {
832         GPRTemporary realStructure(this);
833         structure.adopt(realStructure);
834         structureGPR = structure.gpr();
835     }
836         
837     // We can skip all that comes next if we already have array storage.
838     MacroAssembler::JumpList slowPath;
839     
840     if (node->op() == ArrayifyToStructure) {
841         slowPath.append(m_jit.branchWeakStructure(
842             JITCompiler::NotEqual,
843             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
844             node->structure()));
845     } else {
846         m_jit.load8(
847             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
848         
849         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
850     }
851     
852     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
853         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
854     
855     noResult(m_currentNode);
856 }
857
858 void SpeculativeJIT::arrayify(Node* node)
859 {
860     ASSERT(node->arrayMode().isSpecific());
861     
862     SpeculateCellOperand base(this, node->child1());
863     
864     if (!node->child2()) {
865         arrayify(node, base.gpr(), InvalidGPRReg);
866         return;
867     }
868     
869     SpeculateInt32Operand property(this, node->child2());
870     
871     arrayify(node, base.gpr(), property.gpr());
872 }
873
874 GPRReg SpeculativeJIT::fillStorage(Edge edge)
875 {
876     VirtualRegister virtualRegister = edge->virtualRegister();
877     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
878     
879     switch (info.registerFormat()) {
880     case DataFormatNone: {
881         if (info.spillFormat() == DataFormatStorage) {
882             GPRReg gpr = allocate();
883             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
884             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
885             info.fillStorage(*m_stream, gpr);
886             return gpr;
887         }
888         
889         // Must be a cell; fill it as a cell and then return the pointer.
890         return fillSpeculateCell(edge);
891     }
892         
893     case DataFormatStorage: {
894         GPRReg gpr = info.gpr();
895         m_gprs.lock(gpr);
896         return gpr;
897     }
898         
899     default:
900         return fillSpeculateCell(edge);
901     }
902 }
903
904 void SpeculativeJIT::useChildren(Node* node)
905 {
906     if (node->flags() & NodeHasVarArgs) {
907         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
908             if (!!m_jit.graph().m_varArgChildren[childIdx])
909                 use(m_jit.graph().m_varArgChildren[childIdx]);
910         }
911     } else {
912         Edge child1 = node->child1();
913         if (!child1) {
914             ASSERT(!node->child2() && !node->child3());
915             return;
916         }
917         use(child1);
918         
919         Edge child2 = node->child2();
920         if (!child2) {
921             ASSERT(!node->child3());
922             return;
923         }
924         use(child2);
925         
926         Edge child3 = node->child3();
927         if (!child3)
928             return;
929         use(child3);
930     }
931 }
932
933 void SpeculativeJIT::compileIn(Node* node)
934 {
935     SpeculateCellOperand base(this, node->child2());
936     GPRReg baseGPR = base.gpr();
937     
938     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
939         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
940             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
941             
942             GPRTemporary result(this);
943             GPRReg resultGPR = result.gpr();
944
945             use(node->child1());
946             
947             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
948             MacroAssembler::Label done = m_jit.label();
949             
950             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
951             // we can cast it to const AtomicStringImpl* safely.
952             auto slowPath = slowPathCall(
953                 jump.m_jump, this, operationInOptimize,
954                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
955                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
956             
957             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
958             stubInfo->codeOrigin = node->origin.semantic;
959             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
960             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
961 #if USE(JSVALUE32_64)
962             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
963             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
964 #endif
965             stubInfo->patch.usedRegisters = usedRegisters();
966
967             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
968             addSlowPathGenerator(WTF::move(slowPath));
969
970             base.use();
971
972             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
973             return;
974         }
975     }
976
977     JSValueOperand key(this, node->child1());
978     JSValueRegs regs = key.jsValueRegs();
979         
980     GPRFlushedCallResult result(this);
981     GPRReg resultGPR = result.gpr();
982         
983     base.use();
984     key.use();
985         
986     flushRegisters();
987     callOperation(
988         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
989         baseGPR, regs);
990     m_jit.exceptionCheck();
991     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
992 }
993
994 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
995 {
996     unsigned branchIndexInBlock = detectPeepHoleBranch();
997     if (branchIndexInBlock != UINT_MAX) {
998         Node* branchNode = m_block->at(branchIndexInBlock);
999
1000         ASSERT(node->adjustedRefCount() == 1);
1001         
1002         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1003     
1004         m_indexInBlock = branchIndexInBlock;
1005         m_currentNode = branchNode;
1006         
1007         return true;
1008     }
1009     
1010     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1011     
1012     return false;
1013 }
1014
1015 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1016 {
1017     unsigned branchIndexInBlock = detectPeepHoleBranch();
1018     if (branchIndexInBlock != UINT_MAX) {
1019         Node* branchNode = m_block->at(branchIndexInBlock);
1020
1021         ASSERT(node->adjustedRefCount() == 1);
1022         
1023         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1024     
1025         m_indexInBlock = branchIndexInBlock;
1026         m_currentNode = branchNode;
1027         
1028         return true;
1029     }
1030     
1031     nonSpeculativeNonPeepholeStrictEq(node, invert);
1032     
1033     return false;
1034 }
1035
1036 static const char* dataFormatString(DataFormat format)
1037 {
1038     // These values correspond to the DataFormat enum.
1039     const char* strings[] = {
1040         "[  ]",
1041         "[ i]",
1042         "[ d]",
1043         "[ c]",
1044         "Err!",
1045         "Err!",
1046         "Err!",
1047         "Err!",
1048         "[J ]",
1049         "[Ji]",
1050         "[Jd]",
1051         "[Jc]",
1052         "Err!",
1053         "Err!",
1054         "Err!",
1055         "Err!",
1056     };
1057     return strings[format];
1058 }
1059
1060 void SpeculativeJIT::dump(const char* label)
1061 {
1062     if (label)
1063         dataLogF("<%s>\n", label);
1064
1065     dataLogF("  gprs:\n");
1066     m_gprs.dump();
1067     dataLogF("  fprs:\n");
1068     m_fprs.dump();
1069     dataLogF("  VirtualRegisters:\n");
1070     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1071         GenerationInfo& info = m_generationInfo[i];
1072         if (info.alive())
1073             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1074         else
1075             dataLogF("    % 3d:[__][__]", i);
1076         if (info.registerFormat() == DataFormatDouble)
1077             dataLogF(":fpr%d\n", info.fpr());
1078         else if (info.registerFormat() != DataFormatNone
1079 #if USE(JSVALUE32_64)
1080             && !(info.registerFormat() & DataFormatJS)
1081 #endif
1082             ) {
1083             ASSERT(info.gpr() != InvalidGPRReg);
1084             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1085         } else
1086             dataLogF("\n");
1087     }
1088     if (label)
1089         dataLogF("</%s>\n", label);
1090 }
1091
1092 GPRTemporary::GPRTemporary()
1093     : m_jit(0)
1094     , m_gpr(InvalidGPRReg)
1095 {
1096 }
1097
1098 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1099     : m_jit(jit)
1100     , m_gpr(InvalidGPRReg)
1101 {
1102     m_gpr = m_jit->allocate();
1103 }
1104
1105 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1106     : m_jit(jit)
1107     , m_gpr(InvalidGPRReg)
1108 {
1109     m_gpr = m_jit->allocate(specific);
1110 }
1111
1112 #if USE(JSVALUE32_64)
1113 GPRTemporary::GPRTemporary(
1114     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1115     : m_jit(jit)
1116     , m_gpr(InvalidGPRReg)
1117 {
1118     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1119         m_gpr = m_jit->reuse(op1.gpr(which));
1120     else
1121         m_gpr = m_jit->allocate();
1122 }
1123 #endif // USE(JSVALUE32_64)
1124
1125 JSValueRegsTemporary::JSValueRegsTemporary() { }
1126
1127 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1128 #if USE(JSVALUE64)
1129     : m_gpr(jit)
1130 #else
1131     : m_payloadGPR(jit)
1132     , m_tagGPR(jit)
1133 #endif
1134 {
1135 }
1136
1137 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1138
1139 JSValueRegs JSValueRegsTemporary::regs()
1140 {
1141 #if USE(JSVALUE64)
1142     return JSValueRegs(m_gpr.gpr());
1143 #else
1144     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1145 #endif
1146 }
1147
1148 void GPRTemporary::adopt(GPRTemporary& other)
1149 {
1150     ASSERT(!m_jit);
1151     ASSERT(m_gpr == InvalidGPRReg);
1152     ASSERT(other.m_jit);
1153     ASSERT(other.m_gpr != InvalidGPRReg);
1154     m_jit = other.m_jit;
1155     m_gpr = other.m_gpr;
1156     other.m_jit = 0;
1157     other.m_gpr = InvalidGPRReg;
1158 }
1159
1160 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1161     : m_jit(jit)
1162     , m_fpr(InvalidFPRReg)
1163 {
1164     m_fpr = m_jit->fprAllocate();
1165 }
1166
1167 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1168     : m_jit(jit)
1169     , m_fpr(InvalidFPRReg)
1170 {
1171     if (m_jit->canReuse(op1.node()))
1172         m_fpr = m_jit->reuse(op1.fpr());
1173     else
1174         m_fpr = m_jit->fprAllocate();
1175 }
1176
1177 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1178     : m_jit(jit)
1179     , m_fpr(InvalidFPRReg)
1180 {
1181     if (m_jit->canReuse(op1.node()))
1182         m_fpr = m_jit->reuse(op1.fpr());
1183     else if (m_jit->canReuse(op2.node()))
1184         m_fpr = m_jit->reuse(op2.fpr());
1185     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1186         m_fpr = m_jit->reuse(op1.fpr());
1187     else
1188         m_fpr = m_jit->fprAllocate();
1189 }
1190
1191 #if USE(JSVALUE32_64)
1192 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1193     : m_jit(jit)
1194     , m_fpr(InvalidFPRReg)
1195 {
1196     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1197         m_fpr = m_jit->reuse(op1.fpr());
1198     else
1199         m_fpr = m_jit->fprAllocate();
1200 }
1201 #endif
1202
1203 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1204 {
1205     BasicBlock* taken = branchNode->branchData()->taken.block;
1206     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1207     
1208     SpeculateDoubleOperand op1(this, node->child1());
1209     SpeculateDoubleOperand op2(this, node->child2());
1210     
1211     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1212     jump(notTaken);
1213 }
1214
1215 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1216 {
1217     BasicBlock* taken = branchNode->branchData()->taken.block;
1218     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1219
1220     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1221     
1222     if (taken == nextBlock()) {
1223         condition = MacroAssembler::NotEqual;
1224         BasicBlock* tmp = taken;
1225         taken = notTaken;
1226         notTaken = tmp;
1227     }
1228
1229     SpeculateCellOperand op1(this, node->child1());
1230     SpeculateCellOperand op2(this, node->child2());
1231     
1232     GPRReg op1GPR = op1.gpr();
1233     GPRReg op2GPR = op2.gpr();
1234     
1235     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1236         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1237             speculationCheck(
1238                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1239         }
1240         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1241             speculationCheck(
1242                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1243         }
1244     } else {
1245         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1246             speculationCheck(
1247                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1248                 m_jit.branchIfNotObject(op1GPR));
1249         }
1250         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1251             m_jit.branchTest8(
1252                 MacroAssembler::NonZero, 
1253                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1254                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1255
1256         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1257             speculationCheck(
1258                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1259                 m_jit.branchIfNotObject(op2GPR));
1260         }
1261         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1262             m_jit.branchTest8(
1263                 MacroAssembler::NonZero, 
1264                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1265                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1266     }
1267
1268     branchPtr(condition, op1GPR, op2GPR, taken);
1269     jump(notTaken);
1270 }
1271
1272 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1273 {
1274     BasicBlock* taken = branchNode->branchData()->taken.block;
1275     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1276
1277     // The branch instruction will branch to the taken block.
1278     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1279     if (taken == nextBlock()) {
1280         condition = JITCompiler::invert(condition);
1281         BasicBlock* tmp = taken;
1282         taken = notTaken;
1283         notTaken = tmp;
1284     }
1285
1286     if (node->child1()->isBooleanConstant()) {
1287         bool imm = node->child1()->asBoolean();
1288         SpeculateBooleanOperand op2(this, node->child2());
1289         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1290     } else if (node->child2()->isBooleanConstant()) {
1291         SpeculateBooleanOperand op1(this, node->child1());
1292         bool imm = node->child2()->asBoolean();
1293         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1294     } else {
1295         SpeculateBooleanOperand op1(this, node->child1());
1296         SpeculateBooleanOperand op2(this, node->child2());
1297         branch32(condition, op1.gpr(), op2.gpr(), taken);
1298     }
1299
1300     jump(notTaken);
1301 }
1302
1303 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1304 {
1305     BasicBlock* taken = branchNode->branchData()->taken.block;
1306     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1307
1308     // The branch instruction will branch to the taken block.
1309     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1310     if (taken == nextBlock()) {
1311         condition = JITCompiler::invert(condition);
1312         BasicBlock* tmp = taken;
1313         taken = notTaken;
1314         notTaken = tmp;
1315     }
1316
1317     if (node->child1()->isInt32Constant()) {
1318         int32_t imm = node->child1()->asInt32();
1319         SpeculateInt32Operand op2(this, node->child2());
1320         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1321     } else if (node->child2()->isInt32Constant()) {
1322         SpeculateInt32Operand op1(this, node->child1());
1323         int32_t imm = node->child2()->asInt32();
1324         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1325     } else {
1326         SpeculateInt32Operand op1(this, node->child1());
1327         SpeculateInt32Operand op2(this, node->child2());
1328         branch32(condition, op1.gpr(), op2.gpr(), taken);
1329     }
1330
1331     jump(notTaken);
1332 }
1333
1334 // Returns true if the compare is fused with a subsequent branch.
1335 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1336 {
1337     // Fused compare & branch.
1338     unsigned branchIndexInBlock = detectPeepHoleBranch();
1339     if (branchIndexInBlock != UINT_MAX) {
1340         Node* branchNode = m_block->at(branchIndexInBlock);
1341
1342         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1343         // so can be no intervening nodes to also reference the compare. 
1344         ASSERT(node->adjustedRefCount() == 1);
1345
1346         if (node->isBinaryUseKind(Int32Use))
1347             compilePeepHoleInt32Branch(node, branchNode, condition);
1348 #if USE(JSVALUE64)
1349         else if (node->isBinaryUseKind(Int52RepUse))
1350             compilePeepHoleInt52Branch(node, branchNode, condition);
1351 #endif // USE(JSVALUE64)
1352         else if (node->isBinaryUseKind(DoubleRepUse))
1353             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1354         else if (node->op() == CompareEq) {
1355             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1356                 // Use non-peephole comparison, for now.
1357                 return false;
1358             }
1359             if (node->isBinaryUseKind(BooleanUse))
1360                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1361             else if (node->isBinaryUseKind(SymbolUse))
1362                 compilePeepHoleSymbolEquality(node, branchNode);
1363             else if (node->isBinaryUseKind(ObjectUse))
1364                 compilePeepHoleObjectEquality(node, branchNode);
1365             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1366                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1367             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1368                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1369             else if (!needsTypeCheck(node->child1(), SpecOther))
1370                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1371             else if (!needsTypeCheck(node->child2(), SpecOther))
1372                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1373             else {
1374                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1375                 return true;
1376             }
1377         } else {
1378             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1379             return true;
1380         }
1381
1382         use(node->child1());
1383         use(node->child2());
1384         m_indexInBlock = branchIndexInBlock;
1385         m_currentNode = branchNode;
1386         return true;
1387     }
1388     return false;
1389 }
1390
1391 void SpeculativeJIT::noticeOSRBirth(Node* node)
1392 {
1393     if (!node->hasVirtualRegister())
1394         return;
1395     
1396     VirtualRegister virtualRegister = node->virtualRegister();
1397     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1398     
1399     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1400 }
1401
1402 void SpeculativeJIT::compileMovHint(Node* node)
1403 {
1404     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1405     
1406     Node* child = node->child1().node();
1407     noticeOSRBirth(child);
1408     
1409     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1410 }
1411
1412 void SpeculativeJIT::bail(AbortReason reason)
1413 {
1414     if (verboseCompilationEnabled())
1415         dataLog("Bailing compilation.\n");
1416     m_compileOkay = true;
1417     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1418     clearGenerationInfo();
1419 }
1420
1421 void SpeculativeJIT::compileCurrentBlock()
1422 {
1423     ASSERT(m_compileOkay);
1424     
1425     if (!m_block)
1426         return;
1427     
1428     ASSERT(m_block->isReachable);
1429     
1430     m_jit.blockHeads()[m_block->index] = m_jit.label();
1431
1432     if (!m_block->intersectionOfCFAHasVisited) {
1433         // Don't generate code for basic blocks that are unreachable according to CFA.
1434         // But to be sure that nobody has generated a jump to this block, drop in a
1435         // breakpoint here.
1436         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1437         return;
1438     }
1439
1440     m_stream->appendAndLog(VariableEvent::reset());
1441     
1442     m_jit.jitAssertHasValidCallFrame();
1443     m_jit.jitAssertTagsInPlace();
1444     m_jit.jitAssertArgumentCountSane();
1445
1446     m_state.reset();
1447     m_state.beginBasicBlock(m_block);
1448     
1449     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1450         int operand = m_block->variablesAtHead.operandForIndex(i);
1451         Node* node = m_block->variablesAtHead[i];
1452         if (!node)
1453             continue; // No need to record dead SetLocal's.
1454         
1455         VariableAccessData* variable = node->variableAccessData();
1456         DataFormat format;
1457         if (!node->refCount())
1458             continue; // No need to record dead SetLocal's.
1459         format = dataFormatFor(variable->flushFormat());
1460         m_stream->appendAndLog(
1461             VariableEvent::setLocal(
1462                 VirtualRegister(operand),
1463                 variable->machineLocal(),
1464                 format));
1465     }
1466
1467     m_origin = NodeOrigin();
1468     
1469     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1470         m_currentNode = m_block->at(m_indexInBlock);
1471         
1472         // We may have hit a contradiction that the CFA was aware of but that the JIT
1473         // didn't cause directly.
1474         if (!m_state.isValid()) {
1475             bail(DFGBailedAtTopOfBlock);
1476             return;
1477         }
1478
1479         m_interpreter.startExecuting();
1480         m_jit.setForNode(m_currentNode);
1481         m_origin = m_currentNode->origin;
1482         if (validationEnabled())
1483             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1484         m_lastGeneratedNode = m_currentNode->op();
1485         
1486         ASSERT(m_currentNode->shouldGenerate());
1487         
1488         if (verboseCompilationEnabled()) {
1489             dataLogF(
1490                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1491                 (int)m_currentNode->index(),
1492                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1493             dataLog("\n");
1494         }
1495         
1496         m_jit.jitAssertNoException();
1497
1498         compile(m_currentNode);
1499         
1500         if (belongsInMinifiedGraph(m_currentNode->op()))
1501             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1502         
1503 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1504         m_jit.clearRegisterAllocationOffsets();
1505 #endif
1506         
1507         if (!m_compileOkay) {
1508             bail(DFGBailedAtEndOfNode);
1509             return;
1510         }
1511         
1512         // Make sure that the abstract state is rematerialized for the next node.
1513         m_interpreter.executeEffects(m_indexInBlock);
1514     }
1515     
1516     // Perform the most basic verification that children have been used correctly.
1517     if (!ASSERT_DISABLED) {
1518         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1519             GenerationInfo& info = m_generationInfo[index];
1520             RELEASE_ASSERT(!info.alive());
1521         }
1522     }
1523 }
1524
1525 // If we are making type predictions about our arguments then
1526 // we need to check that they are correct on function entry.
1527 void SpeculativeJIT::checkArgumentTypes()
1528 {
1529     ASSERT(!m_currentNode);
1530     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1531
1532     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1533         Node* node = m_jit.graph().m_arguments[i];
1534         if (!node) {
1535             // The argument is dead. We don't do any checks for such arguments.
1536             continue;
1537         }
1538         
1539         ASSERT(node->op() == SetArgument);
1540         ASSERT(node->shouldGenerate());
1541
1542         VariableAccessData* variableAccessData = node->variableAccessData();
1543         FlushFormat format = variableAccessData->flushFormat();
1544         
1545         if (format == FlushedJSValue)
1546             continue;
1547         
1548         VirtualRegister virtualRegister = variableAccessData->local();
1549
1550         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1551         
1552 #if USE(JSVALUE64)
1553         switch (format) {
1554         case FlushedInt32: {
1555             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1556             break;
1557         }
1558         case FlushedBoolean: {
1559             GPRTemporary temp(this);
1560             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1561             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1562             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1563             break;
1564         }
1565         case FlushedCell: {
1566             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1567             break;
1568         }
1569         default:
1570             RELEASE_ASSERT_NOT_REACHED();
1571             break;
1572         }
1573 #else
1574         switch (format) {
1575         case FlushedInt32: {
1576             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1577             break;
1578         }
1579         case FlushedBoolean: {
1580             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1581             break;
1582         }
1583         case FlushedCell: {
1584             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1585             break;
1586         }
1587         default:
1588             RELEASE_ASSERT_NOT_REACHED();
1589             break;
1590         }
1591 #endif
1592     }
1593
1594     m_origin = NodeOrigin();
1595 }
1596
1597 bool SpeculativeJIT::compile()
1598 {
1599     checkArgumentTypes();
1600     
1601     ASSERT(!m_currentNode);
1602     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1603         m_jit.setForBlockIndex(blockIndex);
1604         m_block = m_jit.graph().block(blockIndex);
1605         compileCurrentBlock();
1606     }
1607     linkBranches();
1608     return true;
1609 }
1610
1611 void SpeculativeJIT::createOSREntries()
1612 {
1613     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1614         BasicBlock* block = m_jit.graph().block(blockIndex);
1615         if (!block)
1616             continue;
1617         if (!block->isOSRTarget)
1618             continue;
1619         
1620         // Currently we don't have OSR entry trampolines. We could add them
1621         // here if need be.
1622         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1623     }
1624 }
1625
1626 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1627 {
1628     unsigned osrEntryIndex = 0;
1629     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1630         BasicBlock* block = m_jit.graph().block(blockIndex);
1631         if (!block)
1632             continue;
1633         if (!block->isOSRTarget)
1634             continue;
1635         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1636     }
1637     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1638     
1639     if (verboseCompilationEnabled()) {
1640         DumpContext dumpContext;
1641         dataLog("OSR Entries:\n");
1642         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1643             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1644         if (!dumpContext.isEmpty())
1645             dumpContext.dump(WTF::dataFile());
1646     }
1647 }
1648
1649 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1650 {
1651     Edge child3 = m_jit.graph().varArgChild(node, 2);
1652     Edge child4 = m_jit.graph().varArgChild(node, 3);
1653
1654     ArrayMode arrayMode = node->arrayMode();
1655     
1656     GPRReg baseReg = base.gpr();
1657     GPRReg propertyReg = property.gpr();
1658     
1659     SpeculateDoubleOperand value(this, child3);
1660
1661     FPRReg valueReg = value.fpr();
1662     
1663     DFG_TYPE_CHECK(
1664         JSValueRegs(), child3, SpecFullRealNumber,
1665         m_jit.branchDouble(
1666             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1667     
1668     if (!m_compileOkay)
1669         return;
1670     
1671     StorageOperand storage(this, child4);
1672     GPRReg storageReg = storage.gpr();
1673
1674     if (node->op() == PutByValAlias) {
1675         // Store the value to the array.
1676         GPRReg propertyReg = property.gpr();
1677         FPRReg valueReg = value.fpr();
1678         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1679         
1680         noResult(m_currentNode);
1681         return;
1682     }
1683     
1684     GPRTemporary temporary;
1685     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1686
1687     MacroAssembler::Jump slowCase;
1688     
1689     if (arrayMode.isInBounds()) {
1690         speculationCheck(
1691             OutOfBounds, JSValueRegs(), 0,
1692             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1693     } else {
1694         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1695         
1696         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1697         
1698         if (!arrayMode.isOutOfBounds())
1699             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1700         
1701         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1702         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1703         
1704         inBounds.link(&m_jit);
1705     }
1706     
1707     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1708
1709     base.use();
1710     property.use();
1711     value.use();
1712     storage.use();
1713     
1714     if (arrayMode.isOutOfBounds()) {
1715         addSlowPathGenerator(
1716             slowPathCall(
1717                 slowCase, this,
1718                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1719                 NoResult, baseReg, propertyReg, valueReg));
1720     }
1721
1722     noResult(m_currentNode, UseChildrenCalledExplicitly);
1723 }
1724
1725 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1726 {
1727     SpeculateCellOperand string(this, node->child1());
1728     SpeculateStrictInt32Operand index(this, node->child2());
1729     StorageOperand storage(this, node->child3());
1730
1731     GPRReg stringReg = string.gpr();
1732     GPRReg indexReg = index.gpr();
1733     GPRReg storageReg = storage.gpr();
1734     
1735     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1736
1737     // unsigned comparison so we can filter out negative indices and indices that are too large
1738     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1739
1740     GPRTemporary scratch(this);
1741     GPRReg scratchReg = scratch.gpr();
1742
1743     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1744
1745     // Load the character into scratchReg
1746     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1747
1748     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1749     JITCompiler::Jump cont8Bit = m_jit.jump();
1750
1751     is16Bit.link(&m_jit);
1752
1753     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1754
1755     cont8Bit.link(&m_jit);
1756
1757     int32Result(scratchReg, m_currentNode);
1758 }
1759
1760 void SpeculativeJIT::compileGetByValOnString(Node* node)
1761 {
1762     SpeculateCellOperand base(this, node->child1());
1763     SpeculateStrictInt32Operand property(this, node->child2());
1764     StorageOperand storage(this, node->child3());
1765     GPRReg baseReg = base.gpr();
1766     GPRReg propertyReg = property.gpr();
1767     GPRReg storageReg = storage.gpr();
1768
1769     GPRTemporary scratch(this);
1770     GPRReg scratchReg = scratch.gpr();
1771 #if USE(JSVALUE32_64)
1772     GPRTemporary resultTag;
1773     GPRReg resultTagReg = InvalidGPRReg;
1774     if (node->arrayMode().isOutOfBounds()) {
1775         GPRTemporary realResultTag(this);
1776         resultTag.adopt(realResultTag);
1777         resultTagReg = resultTag.gpr();
1778     }
1779 #endif
1780
1781     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1782
1783     // unsigned comparison so we can filter out negative indices and indices that are too large
1784     JITCompiler::Jump outOfBounds = m_jit.branch32(
1785         MacroAssembler::AboveOrEqual, propertyReg,
1786         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1787     if (node->arrayMode().isInBounds())
1788         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1789
1790     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1791
1792     // Load the character into scratchReg
1793     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1794
1795     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1796     JITCompiler::Jump cont8Bit = m_jit.jump();
1797
1798     is16Bit.link(&m_jit);
1799
1800     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1801
1802     JITCompiler::Jump bigCharacter =
1803         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1804
1805     // 8 bit string values don't need the isASCII check.
1806     cont8Bit.link(&m_jit);
1807
1808     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1809     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1810     m_jit.loadPtr(scratchReg, scratchReg);
1811
1812     addSlowPathGenerator(
1813         slowPathCall(
1814             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1815
1816     if (node->arrayMode().isOutOfBounds()) {
1817 #if USE(JSVALUE32_64)
1818         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1819 #endif
1820
1821         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1822         if (globalObject->stringPrototypeChainIsSane()) {
1823             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1824             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1825             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1826             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1827             // indexed properties either.
1828             // https://bugs.webkit.org/show_bug.cgi?id=144668
1829             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1830             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1831             
1832 #if USE(JSVALUE64)
1833             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1834                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1835 #else
1836             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1837                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1838                 baseReg, propertyReg));
1839 #endif
1840         } else {
1841 #if USE(JSVALUE64)
1842             addSlowPathGenerator(
1843                 slowPathCall(
1844                     outOfBounds, this, operationGetByValStringInt,
1845                     scratchReg, baseReg, propertyReg));
1846 #else
1847             addSlowPathGenerator(
1848                 slowPathCall(
1849                     outOfBounds, this, operationGetByValStringInt,
1850                     resultTagReg, scratchReg, baseReg, propertyReg));
1851 #endif
1852         }
1853         
1854 #if USE(JSVALUE64)
1855         jsValueResult(scratchReg, m_currentNode);
1856 #else
1857         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1858 #endif
1859     } else
1860         cellResult(scratchReg, m_currentNode);
1861 }
1862
1863 void SpeculativeJIT::compileFromCharCode(Node* node)
1864 {
1865     SpeculateStrictInt32Operand property(this, node->child1());
1866     GPRReg propertyReg = property.gpr();
1867     GPRTemporary smallStrings(this);
1868     GPRTemporary scratch(this);
1869     GPRReg scratchReg = scratch.gpr();
1870     GPRReg smallStringsReg = smallStrings.gpr();
1871
1872     JITCompiler::JumpList slowCases;
1873     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1874     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1875     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1876
1877     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1878     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1879     cellResult(scratchReg, m_currentNode);
1880 }
1881
1882 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1883 {
1884     VirtualRegister virtualRegister = node->virtualRegister();
1885     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1886
1887     switch (info.registerFormat()) {
1888     case DataFormatStorage:
1889         RELEASE_ASSERT_NOT_REACHED();
1890
1891     case DataFormatBoolean:
1892     case DataFormatCell:
1893         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1894         return GeneratedOperandTypeUnknown;
1895
1896     case DataFormatNone:
1897     case DataFormatJSCell:
1898     case DataFormatJS:
1899     case DataFormatJSBoolean:
1900     case DataFormatJSDouble:
1901         return GeneratedOperandJSValue;
1902
1903     case DataFormatJSInt32:
1904     case DataFormatInt32:
1905         return GeneratedOperandInteger;
1906
1907     default:
1908         RELEASE_ASSERT_NOT_REACHED();
1909         return GeneratedOperandTypeUnknown;
1910     }
1911 }
1912
1913 void SpeculativeJIT::compileValueToInt32(Node* node)
1914 {
1915     switch (node->child1().useKind()) {
1916 #if USE(JSVALUE64)
1917     case Int52RepUse: {
1918         SpeculateStrictInt52Operand op1(this, node->child1());
1919         GPRTemporary result(this, Reuse, op1);
1920         GPRReg op1GPR = op1.gpr();
1921         GPRReg resultGPR = result.gpr();
1922         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1923         int32Result(resultGPR, node, DataFormatInt32);
1924         return;
1925     }
1926 #endif // USE(JSVALUE64)
1927         
1928     case DoubleRepUse: {
1929         GPRTemporary result(this);
1930         SpeculateDoubleOperand op1(this, node->child1());
1931         FPRReg fpr = op1.fpr();
1932         GPRReg gpr = result.gpr();
1933         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1934         
1935         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1936         
1937         int32Result(gpr, node);
1938         return;
1939     }
1940     
1941     case NumberUse:
1942     case NotCellUse: {
1943         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1944         case GeneratedOperandInteger: {
1945             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1946             GPRTemporary result(this, Reuse, op1);
1947             m_jit.move(op1.gpr(), result.gpr());
1948             int32Result(result.gpr(), node, op1.format());
1949             return;
1950         }
1951         case GeneratedOperandJSValue: {
1952             GPRTemporary result(this);
1953 #if USE(JSVALUE64)
1954             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1955
1956             GPRReg gpr = op1.gpr();
1957             GPRReg resultGpr = result.gpr();
1958             FPRTemporary tempFpr(this);
1959             FPRReg fpr = tempFpr.fpr();
1960
1961             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1962             JITCompiler::JumpList converted;
1963
1964             if (node->child1().useKind() == NumberUse) {
1965                 DFG_TYPE_CHECK(
1966                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1967                     m_jit.branchTest64(
1968                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1969             } else {
1970                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1971                 
1972                 DFG_TYPE_CHECK(
1973                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1974                 
1975                 // It's not a cell: so true turns into 1 and all else turns into 0.
1976                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1977                 converted.append(m_jit.jump());
1978                 
1979                 isNumber.link(&m_jit);
1980             }
1981
1982             // First, if we get here we have a double encoded as a JSValue
1983             m_jit.move(gpr, resultGpr);
1984             unboxDouble(resultGpr, fpr);
1985
1986             silentSpillAllRegisters(resultGpr);
1987             callOperation(toInt32, resultGpr, fpr);
1988             silentFillAllRegisters(resultGpr);
1989
1990             converted.append(m_jit.jump());
1991
1992             isInteger.link(&m_jit);
1993             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1994
1995             converted.link(&m_jit);
1996 #else
1997             Node* childNode = node->child1().node();
1998             VirtualRegister virtualRegister = childNode->virtualRegister();
1999             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2000
2001             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2002
2003             GPRReg payloadGPR = op1.payloadGPR();
2004             GPRReg resultGpr = result.gpr();
2005         
2006             JITCompiler::JumpList converted;
2007
2008             if (info.registerFormat() == DataFormatJSInt32)
2009                 m_jit.move(payloadGPR, resultGpr);
2010             else {
2011                 GPRReg tagGPR = op1.tagGPR();
2012                 FPRTemporary tempFpr(this);
2013                 FPRReg fpr = tempFpr.fpr();
2014                 FPRTemporary scratch(this);
2015
2016                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2017
2018                 if (node->child1().useKind() == NumberUse) {
2019                     DFG_TYPE_CHECK(
2020                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2021                         m_jit.branch32(
2022                             MacroAssembler::AboveOrEqual, tagGPR,
2023                             TrustedImm32(JSValue::LowestTag)));
2024                 } else {
2025                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2026                     
2027                     DFG_TYPE_CHECK(
2028                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2029                         m_jit.branchIfCell(op1.jsValueRegs()));
2030                     
2031                     // It's not a cell: so true turns into 1 and all else turns into 0.
2032                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2033                     m_jit.move(TrustedImm32(0), resultGpr);
2034                     converted.append(m_jit.jump());
2035                     
2036                     isBoolean.link(&m_jit);
2037                     m_jit.move(payloadGPR, resultGpr);
2038                     converted.append(m_jit.jump());
2039                     
2040                     isNumber.link(&m_jit);
2041                 }
2042
2043                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2044
2045                 silentSpillAllRegisters(resultGpr);
2046                 callOperation(toInt32, resultGpr, fpr);
2047                 silentFillAllRegisters(resultGpr);
2048
2049                 converted.append(m_jit.jump());
2050
2051                 isInteger.link(&m_jit);
2052                 m_jit.move(payloadGPR, resultGpr);
2053
2054                 converted.link(&m_jit);
2055             }
2056 #endif
2057             int32Result(resultGpr, node);
2058             return;
2059         }
2060         case GeneratedOperandTypeUnknown:
2061             RELEASE_ASSERT(!m_compileOkay);
2062             return;
2063         }
2064         RELEASE_ASSERT_NOT_REACHED();
2065         return;
2066     }
2067     
2068     default:
2069         ASSERT(!m_compileOkay);
2070         return;
2071     }
2072 }
2073
2074 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2075 {
2076     if (doesOverflow(node->arithMode())) {
2077         // We know that this sometimes produces doubles. So produce a double every
2078         // time. This at least allows subsequent code to not have weird conditionals.
2079             
2080         SpeculateInt32Operand op1(this, node->child1());
2081         FPRTemporary result(this);
2082             
2083         GPRReg inputGPR = op1.gpr();
2084         FPRReg outputFPR = result.fpr();
2085             
2086         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2087             
2088         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2089         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2090         positive.link(&m_jit);
2091             
2092         doubleResult(outputFPR, node);
2093         return;
2094     }
2095     
2096     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2097
2098     SpeculateInt32Operand op1(this, node->child1());
2099     GPRTemporary result(this);
2100
2101     m_jit.move(op1.gpr(), result.gpr());
2102
2103     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2104
2105     int32Result(result.gpr(), node, op1.format());
2106 }
2107
2108 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2109 {
2110     SpeculateDoubleOperand op1(this, node->child1());
2111     FPRTemporary scratch(this);
2112     GPRTemporary result(this);
2113     
2114     FPRReg valueFPR = op1.fpr();
2115     FPRReg scratchFPR = scratch.fpr();
2116     GPRReg resultGPR = result.gpr();
2117
2118     JITCompiler::JumpList failureCases;
2119     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2120     m_jit.branchConvertDoubleToInt32(
2121         valueFPR, resultGPR, failureCases, scratchFPR,
2122         shouldCheckNegativeZero(node->arithMode()));
2123     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2124
2125     int32Result(resultGPR, node);
2126 }
2127
2128 void SpeculativeJIT::compileDoubleRep(Node* node)
2129 {
2130     switch (node->child1().useKind()) {
2131     case RealNumberUse: {
2132         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2133         FPRTemporary result(this);
2134         
2135         JSValueRegs op1Regs = op1.jsValueRegs();
2136         FPRReg resultFPR = result.fpr();
2137         
2138 #if USE(JSVALUE64)
2139         GPRTemporary temp(this);
2140         GPRReg tempGPR = temp.gpr();
2141         m_jit.move(op1Regs.gpr(), tempGPR);
2142         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2143 #else
2144         FPRTemporary temp(this);
2145         FPRReg tempFPR = temp.fpr();
2146         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2147 #endif
2148         
2149         JITCompiler::Jump done = m_jit.branchDouble(
2150             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2151         
2152         DFG_TYPE_CHECK(
2153             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2154         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2155         
2156         done.link(&m_jit);
2157         
2158         doubleResult(resultFPR, node);
2159         return;
2160     }
2161     
2162     case NotCellUse:
2163     case NumberUse: {
2164         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2165
2166         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2167         if (isInt32Speculation(possibleTypes)) {
2168             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2169             FPRTemporary result(this);
2170             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2171             doubleResult(result.fpr(), node);
2172             return;
2173         }
2174
2175         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2176         FPRTemporary result(this);
2177
2178 #if USE(JSVALUE64)
2179         GPRTemporary temp(this);
2180
2181         GPRReg op1GPR = op1.gpr();
2182         GPRReg tempGPR = temp.gpr();
2183         FPRReg resultFPR = result.fpr();
2184         JITCompiler::JumpList done;
2185
2186         JITCompiler::Jump isInteger = m_jit.branch64(
2187             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2188
2189         if (node->child1().useKind() == NotCellUse) {
2190             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2191             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2192
2193             static const double zero = 0;
2194             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2195
2196             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2197             done.append(isNull);
2198
2199             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2200                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2201
2202             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2203             static const double one = 1;
2204             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2205             done.append(m_jit.jump());
2206             done.append(isFalse);
2207
2208             isUndefined.link(&m_jit);
2209             static const double NaN = PNaN;
2210             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2211             done.append(m_jit.jump());
2212
2213             isNumber.link(&m_jit);
2214         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2215             typeCheck(
2216                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2217                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2218         }
2219     
2220         m_jit.move(op1GPR, tempGPR);
2221         unboxDouble(tempGPR, resultFPR);
2222         done.append(m_jit.jump());
2223     
2224         isInteger.link(&m_jit);
2225         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2226         done.link(&m_jit);
2227 #else // USE(JSVALUE64) -> this is the 32_64 case
2228         FPRTemporary temp(this);
2229     
2230         GPRReg op1TagGPR = op1.tagGPR();
2231         GPRReg op1PayloadGPR = op1.payloadGPR();
2232         FPRReg tempFPR = temp.fpr();
2233         FPRReg resultFPR = result.fpr();
2234         JITCompiler::JumpList done;
2235     
2236         JITCompiler::Jump isInteger = m_jit.branch32(
2237             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2238
2239         if (node->child1().useKind() == NotCellUse) {
2240             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2241             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2242
2243             static const double zero = 0;
2244             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2245
2246             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2247             done.append(isNull);
2248
2249             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2250
2251             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2252             static const double one = 1;
2253             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2254             done.append(m_jit.jump());
2255             done.append(isFalse);
2256
2257             isUndefined.link(&m_jit);
2258             static const double NaN = PNaN;
2259             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2260             done.append(m_jit.jump());
2261
2262             isNumber.link(&m_jit);
2263         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2264             typeCheck(
2265                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2266                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2267         }
2268
2269         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2270         done.append(m_jit.jump());
2271     
2272         isInteger.link(&m_jit);
2273         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2274         done.link(&m_jit);
2275 #endif // USE(JSVALUE64)
2276     
2277         doubleResult(resultFPR, node);
2278         return;
2279     }
2280         
2281 #if USE(JSVALUE64)
2282     case Int52RepUse: {
2283         SpeculateStrictInt52Operand value(this, node->child1());
2284         FPRTemporary result(this);
2285         
2286         GPRReg valueGPR = value.gpr();
2287         FPRReg resultFPR = result.fpr();
2288
2289         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2290         
2291         doubleResult(resultFPR, node);
2292         return;
2293     }
2294 #endif // USE(JSVALUE64)
2295         
2296     default:
2297         RELEASE_ASSERT_NOT_REACHED();
2298         return;
2299     }
2300 }
2301
2302 void SpeculativeJIT::compileValueRep(Node* node)
2303 {
2304     switch (node->child1().useKind()) {
2305     case DoubleRepUse: {
2306         SpeculateDoubleOperand value(this, node->child1());
2307         JSValueRegsTemporary result(this);
2308         
2309         FPRReg valueFPR = value.fpr();
2310         JSValueRegs resultRegs = result.regs();
2311         
2312         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2313         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2314         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2315         // local was purified.
2316         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2317             m_jit.purifyNaN(valueFPR);
2318
2319         boxDouble(valueFPR, resultRegs);
2320         
2321         jsValueResult(resultRegs, node);
2322         return;
2323     }
2324         
2325 #if USE(JSVALUE64)
2326     case Int52RepUse: {
2327         SpeculateStrictInt52Operand value(this, node->child1());
2328         GPRTemporary result(this);
2329         
2330         GPRReg valueGPR = value.gpr();
2331         GPRReg resultGPR = result.gpr();
2332         
2333         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2334         
2335         jsValueResult(resultGPR, node);
2336         return;
2337     }
2338 #endif // USE(JSVALUE64)
2339         
2340     default:
2341         RELEASE_ASSERT_NOT_REACHED();
2342         return;
2343     }
2344 }
2345
2346 static double clampDoubleToByte(double d)
2347 {
2348     d += 0.5;
2349     if (!(d > 0))
2350         d = 0;
2351     else if (d > 255)
2352         d = 255;
2353     return d;
2354 }
2355
2356 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2357 {
2358     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2359     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2360     jit.xorPtr(result, result);
2361     MacroAssembler::Jump clamped = jit.jump();
2362     tooBig.link(&jit);
2363     jit.move(JITCompiler::TrustedImm32(255), result);
2364     clamped.link(&jit);
2365     inBounds.link(&jit);
2366 }
2367
2368 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2369 {
2370     // Unordered compare so we pick up NaN
2371     static const double zero = 0;
2372     static const double byteMax = 255;
2373     static const double half = 0.5;
2374     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2375     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2376     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2377     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2378     
2379     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2380     // FIXME: This should probably just use a floating point round!
2381     // https://bugs.webkit.org/show_bug.cgi?id=72054
2382     jit.addDouble(source, scratch);
2383     jit.truncateDoubleToInt32(scratch, result);   
2384     MacroAssembler::Jump truncatedInt = jit.jump();
2385     
2386     tooSmall.link(&jit);
2387     jit.xorPtr(result, result);
2388     MacroAssembler::Jump zeroed = jit.jump();
2389     
2390     tooBig.link(&jit);
2391     jit.move(JITCompiler::TrustedImm32(255), result);
2392     
2393     truncatedInt.link(&jit);
2394     zeroed.link(&jit);
2395
2396 }
2397
2398 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2399 {
2400     if (node->op() == PutByValAlias)
2401         return JITCompiler::Jump();
2402     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2403         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2404     if (view) {
2405         uint32_t length = view->length();
2406         Node* indexNode = m_jit.graph().child(node, 1).node();
2407         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2408             return JITCompiler::Jump();
2409         return m_jit.branch32(
2410             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2411     }
2412     return m_jit.branch32(
2413         MacroAssembler::AboveOrEqual, indexGPR,
2414         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2415 }
2416
2417 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2418 {
2419     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2420     if (!jump.isSet())
2421         return;
2422     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2423 }
2424
2425 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2426 {
2427     ASSERT(isInt(type));
2428     
2429     SpeculateCellOperand base(this, node->child1());
2430     SpeculateStrictInt32Operand property(this, node->child2());
2431     StorageOperand storage(this, node->child3());
2432
2433     GPRReg baseReg = base.gpr();
2434     GPRReg propertyReg = property.gpr();
2435     GPRReg storageReg = storage.gpr();
2436
2437     GPRTemporary result(this);
2438     GPRReg resultReg = result.gpr();
2439
2440     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2441
2442     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2443     switch (elementSize(type)) {
2444     case 1:
2445         if (isSigned(type))
2446             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2447         else
2448             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2449         break;
2450     case 2:
2451         if (isSigned(type))
2452             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2453         else
2454             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2455         break;
2456     case 4:
2457         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2458         break;
2459     default:
2460         CRASH();
2461     }
2462     if (elementSize(type) < 4 || isSigned(type)) {
2463         int32Result(resultReg, node);
2464         return;
2465     }
2466     
2467     ASSERT(elementSize(type) == 4 && !isSigned(type));
2468     if (node->shouldSpeculateInt32()) {
2469         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2470         int32Result(resultReg, node);
2471         return;
2472     }
2473     
2474 #if USE(JSVALUE64)
2475     if (node->shouldSpeculateMachineInt()) {
2476         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2477         strictInt52Result(resultReg, node);
2478         return;
2479     }
2480 #endif
2481     
2482     FPRTemporary fresult(this);
2483     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2484     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2485     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2486     positive.link(&m_jit);
2487     doubleResult(fresult.fpr(), node);
2488 }
2489
2490 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2491 {
2492     ASSERT(isInt(type));
2493     
2494     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2495     GPRReg storageReg = storage.gpr();
2496     
2497     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2498     
2499     GPRTemporary value;
2500     GPRReg valueGPR = InvalidGPRReg;
2501     
2502     if (valueUse->isConstant()) {
2503         JSValue jsValue = valueUse->asJSValue();
2504         if (!jsValue.isNumber()) {
2505             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2506             noResult(node);
2507             return;
2508         }
2509         double d = jsValue.asNumber();
2510         if (isClamped(type)) {
2511             ASSERT(elementSize(type) == 1);
2512             d = clampDoubleToByte(d);
2513         }
2514         GPRTemporary scratch(this);
2515         GPRReg scratchReg = scratch.gpr();
2516         m_jit.move(Imm32(toInt32(d)), scratchReg);
2517         value.adopt(scratch);
2518         valueGPR = scratchReg;
2519     } else {
2520         switch (valueUse.useKind()) {
2521         case Int32Use: {
2522             SpeculateInt32Operand valueOp(this, valueUse);
2523             GPRTemporary scratch(this);
2524             GPRReg scratchReg = scratch.gpr();
2525             m_jit.move(valueOp.gpr(), scratchReg);
2526             if (isClamped(type)) {
2527                 ASSERT(elementSize(type) == 1);
2528                 compileClampIntegerToByte(m_jit, scratchReg);
2529             }
2530             value.adopt(scratch);
2531             valueGPR = scratchReg;
2532             break;
2533         }
2534             
2535 #if USE(JSVALUE64)
2536         case Int52RepUse: {
2537             SpeculateStrictInt52Operand valueOp(this, valueUse);
2538             GPRTemporary scratch(this);
2539             GPRReg scratchReg = scratch.gpr();
2540             m_jit.move(valueOp.gpr(), scratchReg);
2541             if (isClamped(type)) {
2542                 ASSERT(elementSize(type) == 1);
2543                 MacroAssembler::Jump inBounds = m_jit.branch64(
2544                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2545                 MacroAssembler::Jump tooBig = m_jit.branch64(
2546                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2547                 m_jit.move(TrustedImm32(0), scratchReg);
2548                 MacroAssembler::Jump clamped = m_jit.jump();
2549                 tooBig.link(&m_jit);
2550                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2551                 clamped.link(&m_jit);
2552                 inBounds.link(&m_jit);
2553             }
2554             value.adopt(scratch);
2555             valueGPR = scratchReg;
2556             break;
2557         }
2558 #endif // USE(JSVALUE64)
2559             
2560         case DoubleRepUse: {
2561             if (isClamped(type)) {
2562                 ASSERT(elementSize(type) == 1);
2563                 SpeculateDoubleOperand valueOp(this, valueUse);
2564                 GPRTemporary result(this);
2565                 FPRTemporary floatScratch(this);
2566                 FPRReg fpr = valueOp.fpr();
2567                 GPRReg gpr = result.gpr();
2568                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2569                 value.adopt(result);
2570                 valueGPR = gpr;
2571             } else {
2572                 SpeculateDoubleOperand valueOp(this, valueUse);
2573                 GPRTemporary result(this);
2574                 FPRReg fpr = valueOp.fpr();
2575                 GPRReg gpr = result.gpr();
2576                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2577                 m_jit.xorPtr(gpr, gpr);
2578                 MacroAssembler::Jump fixed = m_jit.jump();
2579                 notNaN.link(&m_jit);
2580                 
2581                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2582                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2583                 
2584                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2585                 
2586                 fixed.link(&m_jit);
2587                 value.adopt(result);
2588                 valueGPR = gpr;
2589             }
2590             break;
2591         }
2592             
2593         default:
2594             RELEASE_ASSERT_NOT_REACHED();
2595             break;
2596         }
2597     }
2598     
2599     ASSERT_UNUSED(valueGPR, valueGPR != property);
2600     ASSERT(valueGPR != base);
2601     ASSERT(valueGPR != storageReg);
2602     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2603     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2604         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2605         outOfBounds = MacroAssembler::Jump();
2606     }
2607
2608     switch (elementSize(type)) {
2609     case 1:
2610         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2611         break;
2612     case 2:
2613         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2614         break;
2615     case 4:
2616         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2617         break;
2618     default:
2619         CRASH();
2620     }
2621     if (outOfBounds.isSet())
2622         outOfBounds.link(&m_jit);
2623     noResult(node);
2624 }
2625
2626 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2627 {
2628     ASSERT(isFloat(type));
2629     
2630     SpeculateCellOperand base(this, node->child1());
2631     SpeculateStrictInt32Operand property(this, node->child2());
2632     StorageOperand storage(this, node->child3());
2633
2634     GPRReg baseReg = base.gpr();
2635     GPRReg propertyReg = property.gpr();
2636     GPRReg storageReg = storage.gpr();
2637
2638     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2639
2640     FPRTemporary result(this);
2641     FPRReg resultReg = result.fpr();
2642     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2643     switch (elementSize(type)) {
2644     case 4:
2645         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2646         m_jit.convertFloatToDouble(resultReg, resultReg);
2647         break;
2648     case 8: {
2649         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2650         break;
2651     }
2652     default:
2653         RELEASE_ASSERT_NOT_REACHED();
2654     }
2655     
2656     doubleResult(resultReg, node);
2657 }
2658
2659 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2660 {
2661     ASSERT(isFloat(type));
2662     
2663     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2664     GPRReg storageReg = storage.gpr();
2665     
2666     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2667     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2668
2669     SpeculateDoubleOperand valueOp(this, valueUse);
2670     FPRTemporary scratch(this);
2671     FPRReg valueFPR = valueOp.fpr();
2672     FPRReg scratchFPR = scratch.fpr();
2673
2674     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2675     
2676     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2677     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2678         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2679         outOfBounds = MacroAssembler::Jump();
2680     }
2681     
2682     switch (elementSize(type)) {
2683     case 4: {
2684         m_jit.moveDouble(valueFPR, scratchFPR);
2685         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2686         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2687         break;
2688     }
2689     case 8:
2690         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2691         break;
2692     default:
2693         RELEASE_ASSERT_NOT_REACHED();
2694     }
2695     if (outOfBounds.isSet())
2696         outOfBounds.link(&m_jit);
2697     noResult(node);
2698 }
2699
2700 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2701 {
2702     // Check that prototype is an object.
2703     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2704     
2705     // Initialize scratchReg with the value being checked.
2706     m_jit.move(valueReg, scratchReg);
2707     
2708     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2709     MacroAssembler::Label loop(&m_jit);
2710     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2711     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2712     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2713 #if USE(JSVALUE64)
2714     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2715 #else
2716     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2717 #endif
2718     
2719     // No match - result is false.
2720 #if USE(JSVALUE64)
2721     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2722 #else
2723     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2724 #endif
2725     MacroAssembler::Jump putResult = m_jit.jump();
2726     
2727     isInstance.link(&m_jit);
2728 #if USE(JSVALUE64)
2729     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2730 #else
2731     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2732 #endif
2733     
2734     putResult.link(&m_jit);
2735 }
2736
2737 void SpeculativeJIT::compileInstanceOf(Node* node)
2738 {
2739     if (node->child1().useKind() == UntypedUse) {
2740         // It might not be a cell. Speculate less aggressively.
2741         // Or: it might only be used once (i.e. by us), so we get zero benefit
2742         // from speculating any more aggressively than we absolutely need to.
2743         
2744         JSValueOperand value(this, node->child1());
2745         SpeculateCellOperand prototype(this, node->child2());
2746         GPRTemporary scratch(this);
2747         GPRTemporary scratch2(this);
2748         
2749         GPRReg prototypeReg = prototype.gpr();
2750         GPRReg scratchReg = scratch.gpr();
2751         GPRReg scratch2Reg = scratch2.gpr();
2752         
2753         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2754         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2755         moveFalseTo(scratchReg);
2756
2757         MacroAssembler::Jump done = m_jit.jump();
2758         
2759         isCell.link(&m_jit);
2760         
2761         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2762         
2763         done.link(&m_jit);
2764
2765         blessedBooleanResult(scratchReg, node);
2766         return;
2767     }
2768     
2769     SpeculateCellOperand value(this, node->child1());
2770     SpeculateCellOperand prototype(this, node->child2());
2771     
2772     GPRTemporary scratch(this);
2773     GPRTemporary scratch2(this);
2774     
2775     GPRReg valueReg = value.gpr();
2776     GPRReg prototypeReg = prototype.gpr();
2777     GPRReg scratchReg = scratch.gpr();
2778     GPRReg scratch2Reg = scratch2.gpr();
2779     
2780     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2781
2782     blessedBooleanResult(scratchReg, node);
2783 }
2784
2785 void SpeculativeJIT::compileAdd(Node* node)
2786 {
2787     switch (node->binaryUseKind()) {
2788     case Int32Use: {
2789         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2790         
2791         if (node->child1()->isInt32Constant()) {
2792             int32_t imm1 = node->child1()->asInt32();
2793             SpeculateInt32Operand op2(this, node->child2());
2794             GPRTemporary result(this);
2795
2796             if (!shouldCheckOverflow(node->arithMode())) {
2797                 m_jit.move(op2.gpr(), result.gpr());
2798                 m_jit.add32(Imm32(imm1), result.gpr());
2799             } else
2800                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2801
2802             int32Result(result.gpr(), node);
2803             return;
2804         }
2805         
2806         if (node->child2()->isInt32Constant()) {
2807             SpeculateInt32Operand op1(this, node->child1());
2808             int32_t imm2 = node->child2()->asInt32();
2809             GPRTemporary result(this);
2810                 
2811             if (!shouldCheckOverflow(node->arithMode())) {
2812                 m_jit.move(op1.gpr(), result.gpr());
2813                 m_jit.add32(Imm32(imm2), result.gpr());
2814             } else
2815                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2816
2817             int32Result(result.gpr(), node);
2818             return;
2819         }
2820                 
2821         SpeculateInt32Operand op1(this, node->child1());
2822         SpeculateInt32Operand op2(this, node->child2());
2823         GPRTemporary result(this, Reuse, op1, op2);
2824
2825         GPRReg gpr1 = op1.gpr();
2826         GPRReg gpr2 = op2.gpr();
2827         GPRReg gprResult = result.gpr();
2828
2829         if (!shouldCheckOverflow(node->arithMode())) {
2830             if (gpr1 == gprResult)
2831                 m_jit.add32(gpr2, gprResult);
2832             else {
2833                 m_jit.move(gpr2, gprResult);
2834                 m_jit.add32(gpr1, gprResult);
2835             }
2836         } else {
2837             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2838                 
2839             if (gpr1 == gprResult)
2840                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2841             else if (gpr2 == gprResult)
2842                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2843             else
2844                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2845         }
2846
2847         int32Result(gprResult, node);
2848         return;
2849     }
2850         
2851 #if USE(JSVALUE64)
2852     case Int52RepUse: {
2853         ASSERT(shouldCheckOverflow(node->arithMode()));
2854         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2855
2856         // Will we need an overflow check? If we can prove that neither input can be
2857         // Int52 then the overflow check will not be necessary.
2858         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2859             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2860             SpeculateWhicheverInt52Operand op1(this, node->child1());
2861             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2862             GPRTemporary result(this, Reuse, op1);
2863             m_jit.move(op1.gpr(), result.gpr());
2864             m_jit.add64(op2.gpr(), result.gpr());
2865             int52Result(result.gpr(), node, op1.format());
2866             return;
2867         }
2868         
2869         SpeculateInt52Operand op1(this, node->child1());
2870         SpeculateInt52Operand op2(this, node->child2());
2871         GPRTemporary result(this);
2872         m_jit.move(op1.gpr(), result.gpr());
2873         speculationCheck(
2874             Int52Overflow, JSValueRegs(), 0,
2875             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2876         int52Result(result.gpr(), node);
2877         return;
2878     }
2879 #endif // USE(JSVALUE64)
2880     
2881     case DoubleRepUse: {
2882         SpeculateDoubleOperand op1(this, node->child1());
2883         SpeculateDoubleOperand op2(this, node->child2());
2884         FPRTemporary result(this, op1, op2);
2885
2886         FPRReg reg1 = op1.fpr();
2887         FPRReg reg2 = op2.fpr();
2888         m_jit.addDouble(reg1, reg2, result.fpr());
2889
2890         doubleResult(result.fpr(), node);
2891         return;
2892     }
2893         
2894     default:
2895         RELEASE_ASSERT_NOT_REACHED();
2896         break;
2897     }
2898 }
2899
2900 void SpeculativeJIT::compileMakeRope(Node* node)
2901 {
2902     ASSERT(node->child1().useKind() == KnownStringUse);
2903     ASSERT(node->child2().useKind() == KnownStringUse);
2904     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2905     
2906     SpeculateCellOperand op1(this, node->child1());
2907     SpeculateCellOperand op2(this, node->child2());
2908     SpeculateCellOperand op3(this, node->child3());
2909     GPRTemporary result(this);
2910     GPRTemporary allocator(this);
2911     GPRTemporary scratch(this);
2912     
2913     GPRReg opGPRs[3];
2914     unsigned numOpGPRs;
2915     opGPRs[0] = op1.gpr();
2916     opGPRs[1] = op2.gpr();
2917     if (node->child3()) {
2918         opGPRs[2] = op3.gpr();
2919         numOpGPRs = 3;
2920     } else {
2921         opGPRs[2] = InvalidGPRReg;
2922         numOpGPRs = 2;
2923     }
2924     GPRReg resultGPR = result.gpr();
2925     GPRReg allocatorGPR = allocator.gpr();
2926     GPRReg scratchGPR = scratch.gpr();
2927     
2928     JITCompiler::JumpList slowPath;
2929     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2930     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2931     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2932         
2933     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2934     for (unsigned i = 0; i < numOpGPRs; ++i)
2935         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2936     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2937         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2938     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2939     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2940     if (!ASSERT_DISABLED) {
2941         JITCompiler::Jump ok = m_jit.branch32(
2942             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2943         m_jit.abortWithReason(DFGNegativeStringLength);
2944         ok.link(&m_jit);
2945     }
2946     for (unsigned i = 1; i < numOpGPRs; ++i) {
2947         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2948         speculationCheck(
2949             Uncountable, JSValueSource(), nullptr,
2950             m_jit.branchAdd32(
2951                 JITCompiler::Overflow,
2952                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2953     }
2954     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2955     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2956     if (!ASSERT_DISABLED) {
2957         JITCompiler::Jump ok = m_jit.branch32(
2958             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2959         m_jit.abortWithReason(DFGNegativeStringLength);
2960         ok.link(&m_jit);
2961     }
2962     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2963     
2964     switch (numOpGPRs) {
2965     case 2:
2966         addSlowPathGenerator(slowPathCall(
2967             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2968         break;
2969     case 3:
2970         addSlowPathGenerator(slowPathCall(
2971             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2972         break;
2973     default:
2974         RELEASE_ASSERT_NOT_REACHED();
2975         break;
2976     }
2977         
2978     cellResult(resultGPR, node);
2979 }
2980
2981 void SpeculativeJIT::compileArithClz32(Node* node)
2982 {
2983     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2984     SpeculateInt32Operand value(this, node->child1());
2985     GPRTemporary result(this, Reuse, value);
2986     GPRReg valueReg = value.gpr();
2987     GPRReg resultReg = result.gpr();
2988     m_jit.countLeadingZeros32(valueReg, resultReg);
2989     int32Result(resultReg, node);
2990 }
2991
2992 void SpeculativeJIT::compileArithSub(Node* node)
2993 {
2994     switch (node->binaryUseKind()) {
2995     case Int32Use: {
2996         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2997         
2998         if (node->child2()->isInt32Constant()) {
2999             SpeculateInt32Operand op1(this, node->child1());
3000             int32_t imm2 = node->child2()->asInt32();
3001             GPRTemporary result(this);
3002
3003             if (!shouldCheckOverflow(node->arithMode())) {
3004                 m_jit.move(op1.gpr(), result.gpr());
3005                 m_jit.sub32(Imm32(imm2), result.gpr());
3006             } else {
3007                 GPRTemporary scratch(this);
3008                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3009             }
3010
3011             int32Result(result.gpr(), node);
3012             return;
3013         }
3014             
3015         if (node->child1()->isInt32Constant()) {
3016             int32_t imm1 = node->child1()->asInt32();
3017             SpeculateInt32Operand op2(this, node->child2());
3018             GPRTemporary result(this);
3019                 
3020             m_jit.move(Imm32(imm1), result.gpr());
3021             if (!shouldCheckOverflow(node->arithMode()))
3022                 m_jit.sub32(op2.gpr(), result.gpr());
3023             else
3024                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3025                 
3026             int32Result(result.gpr(), node);
3027             return;
3028         }
3029             
3030         SpeculateInt32Operand op1(this, node->child1());
3031         SpeculateInt32Operand op2(this, node->child2());
3032         GPRTemporary result(this);
3033
3034         if (!shouldCheckOverflow(node->arithMode())) {
3035             m_jit.move(op1.gpr(), result.gpr());
3036             m_jit.sub32(op2.gpr(), result.gpr());
3037         } else
3038             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3039
3040         int32Result(result.gpr(), node);
3041         return;
3042     }
3043         
3044 #if USE(JSVALUE64)
3045     case Int52RepUse: {
3046         ASSERT(shouldCheckOverflow(node->arithMode()));
3047         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3048
3049         // Will we need an overflow check? If we can prove that neither input can be
3050         // Int52 then the overflow check will not be necessary.
3051         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3052             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3053             SpeculateWhicheverInt52Operand op1(this, node->child1());
3054             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3055             GPRTemporary result(this, Reuse, op1);
3056             m_jit.move(op1.gpr(), result.gpr());
3057             m_jit.sub64(op2.gpr(), result.gpr());
3058             int52Result(result.gpr(), node, op1.format());
3059             return;
3060         }
3061         
3062         SpeculateInt52Operand op1(this, node->child1());
3063         SpeculateInt52Operand op2(this, node->child2());
3064         GPRTemporary result(this);
3065         m_jit.move(op1.gpr(), result.gpr());
3066         speculationCheck(
3067             Int52Overflow, JSValueRegs(), 0,
3068             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3069         int52Result(result.gpr(), node);
3070         return;
3071     }
3072 #endif // USE(JSVALUE64)
3073
3074     case DoubleRepUse: {
3075         SpeculateDoubleOperand op1(this, node->child1());
3076         SpeculateDoubleOperand op2(this, node->child2());
3077         FPRTemporary result(this, op1);
3078
3079         FPRReg reg1 = op1.fpr();
3080         FPRReg reg2 = op2.fpr();
3081         m_jit.subDouble(reg1, reg2, result.fpr());
3082
3083         doubleResult(result.fpr(), node);
3084         return;
3085     }
3086
3087     case UntypedUse: {
3088         JSValueOperand left(this, node->child1());
3089         JSValueOperand right(this, node->child2());
3090
3091         JSValueRegs leftRegs = left.jsValueRegs();
3092         JSValueRegs rightRegs = right.jsValueRegs();
3093
3094         ResultType leftType = m_state.forNode(node->child1()).resultType();
3095         ResultType rightType = m_state.forNode(node->child2()).resultType();
3096
3097         FPRTemporary leftNumber(this);
3098         FPRTemporary rightNumber(this);
3099         FPRReg leftFPR = leftNumber.fpr();
3100         FPRReg rightFPR = rightNumber.fpr();
3101
3102 #if USE(JSVALUE64)
3103         GPRTemporary result(this);
3104         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3105         GPRTemporary scratch(this);
3106         GPRReg scratchGPR = scratch.gpr();
3107         FPRReg scratchFPR = InvalidFPRReg;
3108 #else
3109         GPRTemporary resultTag(this);
3110         GPRTemporary resultPayload(this);
3111         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3112         GPRReg scratchGPR = resultTag.gpr();
3113         FPRTemporary fprScratch(this);
3114         FPRReg scratchFPR = fprScratch.fpr();
3115 #endif
3116
3117         JITSubGenerator gen(resultRegs, leftRegs, rightRegs, leftType, rightType,
3118             leftFPR, rightFPR, scratchGPR, scratchFPR);
3119         gen.generateFastPath(m_jit);
3120
3121         gen.slowPathJumpList().link(&m_jit);
3122         silentSpillAllRegisters(resultRegs);
3123         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3124         silentFillAllRegisters(resultRegs);
3125         m_jit.exceptionCheck();
3126
3127         gen.endJumpList().link(&m_jit);
3128         jsValueResult(resultRegs, node);
3129         return;
3130     }
3131
3132     default:
3133         RELEASE_ASSERT_NOT_REACHED();
3134         return;
3135     }
3136 }
3137
3138 void SpeculativeJIT::compileArithNegate(Node* node)
3139 {
3140     switch (node->child1().useKind()) {
3141     case Int32Use: {
3142         SpeculateInt32Operand op1(this, node->child1());
3143         GPRTemporary result(this);
3144
3145         m_jit.move(op1.gpr(), result.gpr());
3146
3147         // Note: there is no notion of being not used as a number, but someone
3148         // caring about negative zero.
3149         
3150         if (!shouldCheckOverflow(node->arithMode()))
3151             m_jit.neg32(result.gpr());
3152         else if (!shouldCheckNegativeZero(node->arithMode()))
3153             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3154         else {
3155             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3156             m_jit.neg32(result.gpr());
3157         }
3158
3159         int32Result(result.gpr(), node);
3160         return;
3161     }
3162
3163 #if USE(JSVALUE64)
3164     case Int52RepUse: {
3165         ASSERT(shouldCheckOverflow(node->arithMode()));
3166         
3167         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3168             SpeculateWhicheverInt52Operand op1(this, node->child1());
3169             GPRTemporary result(this);
3170             GPRReg op1GPR = op1.gpr();
3171             GPRReg resultGPR = result.gpr();
3172             m_jit.move(op1GPR, resultGPR);
3173             m_jit.neg64(resultGPR);
3174             if (shouldCheckNegativeZero(node->arithMode())) {
3175                 speculationCheck(
3176                     NegativeZero, JSValueRegs(), 0,
3177                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3178             }
3179             int52Result(resultGPR, node, op1.format());
3180             return;
3181         }
3182         
3183         SpeculateInt52Operand op1(this, node->child1());
3184         GPRTemporary result(this);
3185         GPRReg op1GPR = op1.gpr();
3186         GPRReg resultGPR = result.gpr();
3187         m_jit.move(op1GPR, resultGPR);
3188         speculationCheck(
3189             Int52Overflow, JSValueRegs(), 0,
3190             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3191         if (shouldCheckNegativeZero(node->arithMode())) {
3192             speculationCheck(
3193                 NegativeZero, JSValueRegs(), 0,
3194                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3195         }
3196         int52Result(resultGPR, node);
3197         return;
3198     }
3199 #endif // USE(JSVALUE64)
3200         
3201     case DoubleRepUse: {
3202         SpeculateDoubleOperand op1(this, node->child1());
3203         FPRTemporary result(this);
3204         
3205         m_jit.negateDouble(op1.fpr(), result.fpr());
3206         
3207         doubleResult(result.fpr(), node);
3208         return;
3209     }
3210         
3211     default:
3212         RELEASE_ASSERT_NOT_REACHED();
3213         return;
3214     }
3215 }
3216 void SpeculativeJIT::compileArithMul(Node* node)
3217 {
3218     switch (node->binaryUseKind()) {
3219     case Int32Use: {
3220         SpeculateInt32Operand op1(this, node->child1());
3221         SpeculateInt32Operand op2(this, node->child2());
3222         GPRTemporary result(this);
3223
3224         GPRReg reg1 = op1.gpr();
3225         GPRReg reg2 = op2.gpr();
3226
3227         // We can perform truncated multiplications if we get to this point, because if the
3228         // fixup phase could not prove that it would be safe, it would have turned us into
3229         // a double multiplication.
3230         if (!shouldCheckOverflow(node->arithMode())) {
3231             m_jit.move(reg1, result.gpr());
3232             m_jit.mul32(reg2, result.gpr());
3233         } else {
3234             speculationCheck(
3235                 Overflow, JSValueRegs(), 0,
3236                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3237         }
3238             
3239         // Check for negative zero, if the users of this node care about such things.
3240         if (shouldCheckNegativeZero(node->arithMode())) {
3241             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3242             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3243             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3244             resultNonZero.link(&m_jit);
3245         }
3246
3247         int32Result(result.gpr(), node);
3248         return;
3249     }
3250     
3251 #if USE(JSVALUE64)   
3252     case Int52RepUse: {
3253         ASSERT(shouldCheckOverflow(node->arithMode()));
3254         
3255         // This is super clever. We want to do an int52 multiplication and check the
3256         // int52 overflow bit. There is no direct hardware support for this, but we do
3257         // have the ability to do an int64 multiplication and check the int64 overflow
3258         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3259         // registers, with the high 12 bits being sign-extended. We can do:
3260         //
3261         //     (a * (b << 12))
3262         //
3263         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3264         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3265         // multiplication overflows is identical to whether the 'a * b' 52-bit
3266         // multiplication overflows.
3267         //
3268         // In our nomenclature, this is:
3269         //
3270         //     strictInt52(a) * int52(b) => int52
3271         //
3272         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3273         // bits.
3274         //
3275         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3276         // we just do whatever is more convenient for op1 and have op2 do the
3277         // opposite. This ensures that we do at most one shift.
3278
3279         SpeculateWhicheverInt52Operand op1(this, node->child1());
3280         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3281         GPRTemporary result(this);
3282         
3283         GPRReg op1GPR = op1.gpr();
3284         GPRReg op2GPR = op2.gpr();
3285         GPRReg resultGPR = result.gpr();
3286         
3287         m_jit.move(op1GPR, resultGPR);
3288         speculationCheck(
3289             Int52Overflow, JSValueRegs(), 0,
3290             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3291         
3292         if (shouldCheckNegativeZero(node->arithMode())) {
3293             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3294                 MacroAssembler::NonZero, resultGPR);
3295             speculationCheck(
3296                 NegativeZero, JSValueRegs(), 0,
3297                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3298             speculationCheck(
3299                 NegativeZero, JSValueRegs(), 0,
3300                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3301             resultNonZero.link(&m_jit);
3302         }
3303         
3304         int52Result(resultGPR, node);
3305         return;
3306     }
3307 #endif // USE(JSVALUE64)
3308         
3309     case DoubleRepUse: {
3310         SpeculateDoubleOperand op1(this, node->child1());
3311         SpeculateDoubleOperand op2(this, node->child2());
3312         FPRTemporary result(this, op1, op2);
3313         
3314         FPRReg reg1 = op1.fpr();
3315         FPRReg reg2 = op2.fpr();
3316         
3317         m_jit.mulDouble(reg1, reg2, result.fpr());
3318         
3319         doubleResult(result.fpr(), node);
3320         return;
3321     }
3322         
3323     default:
3324         RELEASE_ASSERT_NOT_REACHED();
3325         return;
3326     }
3327 }
3328
3329 void SpeculativeJIT::compileArithDiv(Node* node)
3330 {
3331     switch (node->binaryUseKind()) {
3332     case Int32Use: {
3333 #if CPU(X86) || CPU(X86_64)
3334         SpeculateInt32Operand op1(this, node->child1());
3335         SpeculateInt32Operand op2(this, node->child2());
3336         GPRTemporary eax(this, X86Registers::eax);
3337         GPRTemporary edx(this, X86Registers::edx);
3338         GPRReg op1GPR = op1.gpr();
3339         GPRReg op2GPR = op2.gpr();
3340     
3341         GPRReg op2TempGPR;
3342         GPRReg temp;
3343         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3344             op2TempGPR = allocate();
3345             temp = op2TempGPR;
3346         } else {
3347             op2TempGPR = InvalidGPRReg;
3348             if (op1GPR == X86Registers::eax)
3349                 temp = X86Registers::edx;
3350             else
3351                 temp = X86Registers::eax;
3352         }
3353     
3354         ASSERT(temp != op1GPR);
3355         ASSERT(temp != op2GPR);
3356     
3357         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3358     
3359         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3360     
3361         JITCompiler::JumpList done;
3362         if (shouldCheckOverflow(node->arithMode())) {
3363             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3364             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3365         } else {
3366             // This is the case where we convert the result to an int after we're done, and we
3367             // already know that the denominator is either -1 or 0. So, if the denominator is
3368             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3369             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3370             // are happy to fall through to a normal division, since we're just dividing
3371             // something by negative 1.
3372         
3373             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3374             m_jit.move(TrustedImm32(0), eax.gpr());
3375             done.append(m_jit.jump());
3376         
3377             notZero.link(&m_jit);
3378             JITCompiler::Jump notNeg2ToThe31 =
3379                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3380             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3381             done.append(m_jit.jump());
3382         
3383             notNeg2ToThe31.link(&m_jit);
3384         }
3385     
3386         safeDenominator.link(&m_jit);
3387     
3388         // If the user cares about negative zero, then speculate that we're not about
3389         // to produce negative zero.
3390         if (shouldCheckNegativeZero(node->arithMode())) {
3391             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3392             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3393             numeratorNonZero.link(&m_jit);
3394         }
3395     
3396         if (op2TempGPR != InvalidGPRReg) {
3397             m_jit.move(op2GPR, op2TempGPR);
3398             op2GPR = op2TempGPR;
3399         }
3400             
3401         m_jit.move(op1GPR, eax.gpr());
3402         m_jit.assembler().cdq();
3403         m_jit.assembler().idivl_r(op2GPR);
3404             
3405         if (op2TempGPR != InvalidGPRReg)
3406             unlock(op2TempGPR);
3407
3408         // Check that there was no remainder. If there had been, then we'd be obligated to
3409         // produce a double result instead.
3410         if (shouldCheckOverflow(node->arithMode()))
3411             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3412         
3413         done.link(&m_jit);
3414         int32Result(eax.gpr(), node);
3415 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3416         SpeculateInt32Operand op1(this, node->child1());
3417         SpeculateInt32Operand op2(this, node->child2());
3418         GPRReg op1GPR = op1.gpr();
3419         GPRReg op2GPR = op2.gpr();
3420         GPRTemporary quotient(this);
3421         GPRTemporary multiplyAnswer(this);
3422
3423         // If the user cares about negative zero, then speculate that we're not about
3424         // to produce negative zero.
3425         if (shouldCheckNegativeZero(node->arithMode())) {
3426             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3427             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3428             numeratorNonZero.link(&m_jit);
3429         }
3430
3431         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3432
3433         // Check that there was no remainder. If there had been, then we'd be obligated to
3434         // produce a double result instead.
3435         if (shouldCheckOverflow(node->arithMode())) {
3436             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3437             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3438         }
3439
3440         int32Result(quotient.gpr(), node);
3441 #else
3442         RELEASE_ASSERT_NOT_REACHED();
3443 #endif
3444         break;
3445     }
3446         
3447     case DoubleRepUse: {
3448         SpeculateDoubleOperand op1(this, node->child1());
3449         SpeculateDoubleOperand op2(this, node->child2());
3450         FPRTemporary result(this, op1);
3451         
3452         FPRReg reg1 = op1.fpr();
3453         FPRReg reg2 = op2.fpr();
3454         m_jit.divDouble(reg1, reg2, result.fpr());
3455         
3456         doubleResult(result.fpr(), node);
3457         break;
3458     }
3459         
3460     default:
3461         RELEASE_ASSERT_NOT_REACHED();
3462         break;
3463     }
3464 }
3465
3466 void SpeculativeJIT::compileArithMod(Node* node)
3467 {
3468     switch (node->binaryUseKind()) {
3469     case Int32Use: {
3470         // In the fast path, the dividend value could be the final result
3471         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3472         SpeculateStrictInt32Operand op1(this, node->child1());
3473         
3474         if (node->child2()->isInt32Constant()) {
3475             int32_t divisor = node->child2()->asInt32();
3476             if (divisor > 1 && hasOneBitSet(divisor)) {
3477                 unsigned logarithm = WTF::fastLog2(divisor);
3478                 GPRReg dividendGPR = op1.gpr();
3479                 GPRTemporary result(this);
3480                 GPRReg resultGPR = result.gpr();
3481
3482                 // This is what LLVM generates. It's pretty crazy. Here's my
3483                 // attempt at understanding it.
3484                 
3485                 // First, compute either divisor - 1, or 0, depending on whether
3486                 // the dividend is negative:
3487                 //
3488                 // If dividend < 0:  resultGPR = divisor - 1
3489                 // If dividend >= 0: resultGPR = 0
3490                 m_jit.move(dividendGPR, resultGPR);
3491                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3492                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3493                 
3494                 // Add in the dividend, so that:
3495                 //
3496                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3497                 // If dividend >= 0: resultGPR = dividend
3498                 m_jit.add32(dividendGPR, resultGPR);
3499                 
3500                 // Mask so as to only get the *high* bits. This rounds down
3501                 // (towards negative infinity) resultGPR to the nearest multiple
3502                 // of divisor, so that:
3503                 //
3504                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3505                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3506                 //
3507                 // Note that this can be simplified to:
3508                 //
3509                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3510                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3511                 //
3512                 // Note that if the dividend is negative, resultGPR will also be negative.
3513                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3514                 // zero, because of how things are conditionalized.
3515                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3516                 
3517                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3518                 //
3519                 // resultGPR = dividendGPR - resultGPR
3520                 m_jit.neg32(resultGPR);
3521                 m_jit.add32(dividendGPR, resultGPR);
3522                 
3523                 if (shouldCheckNegativeZero(node->arithMode())) {
3524                     // Check that we're not about to create negative zero.
3525                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3526                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3527                     numeratorPositive.link(&m_jit);
3528                 }
3529
3530                 int32Result(resultGPR, node);
3531                 return;
3532             }
3533         }
3534         
3535 #if CPU(X86) || CPU(X86_64)
3536         if (node->child2()->isInt32Constant()) {
3537             int32_t divisor = node->child2()->asInt32();
3538             if (divisor && divisor != -1) {
3539                 GPRReg op1Gpr = op1.gpr();
3540
3541                 GPRTemporary eax(this, X86Registers::eax);
3542                 GPRTemporary edx(this, X86Registers::edx);
3543                 GPRTemporary scratch(this);
3544                 GPRReg scratchGPR = scratch.gpr();
3545
3546                 GPRReg op1SaveGPR;
3547                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3548                     op1SaveGPR = allocate();
3549                     ASSERT(op1Gpr != op1SaveGPR);
3550                     m_jit.move(op1Gpr, op1SaveGPR);
3551                 } else
3552                     op1SaveGPR = op1Gpr;
3553                 ASSERT(op1SaveGPR != X86Registers::eax);
3554                 ASSERT(op1SaveGPR != X86Registers::edx);
3555
3556                 m_jit.move(op1Gpr, eax.gpr());
3557                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3558                 m_jit.assembler().cdq();
3559                 m_jit.assembler().idivl_r(scratchGPR);
3560                 if (shouldCheckNegativeZero(node->arithMode())) {
3561                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3562                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3563                     numeratorPositive.link(&m_jit);
3564                 }
3565             
3566                 if (op1SaveGPR != op1Gpr)
3567                     unlock(op1SaveGPR);
3568
3569                 int32Result(edx.gpr(), node);
3570                 return;
3571             }
3572         }
3573 #endif
3574
3575         SpeculateInt32Operand op2(this, node->child2());
3576 #if CPU(X86) || CPU(X86_64)
3577         GPRTemporary eax(this, X86Registers::eax);
3578         GPRTemporary edx(this, X86Registers::edx);
3579         GPRReg op1GPR = op1.gpr();
3580         GPRReg op2GPR = op2.gpr();
3581     
3582         GPRReg op2TempGPR;
3583         GPRReg temp;
3584         GPRReg op1SaveGPR;
3585     
3586         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3587             op2TempGPR = allocate();
3588             temp = op2TempGPR;
3589         } else {
3590             op2TempGPR = InvalidGPRReg;
3591             if (op1GPR == X86Registers::eax)
3592                 temp = X86Registers::edx;
3593             else
3594                 temp = X86Registers::eax;
3595         }
3596     
3597         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3598             op1SaveGPR = allocate();
3599             ASSERT(op1GPR != op1SaveGPR);
3600             m_jit.move(op1GPR, op1SaveGPR);
3601         } else
3602             op1SaveGPR = op1GPR;
3603     
3604         ASSERT(temp != op1GPR);
3605         ASSERT(temp != op2GPR);
3606         ASSERT(op1SaveGPR != X86Registers::eax);
3607         ASSERT(op1SaveGPR != X86Registers::edx);
3608     
3609         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3610     
3611         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3612     
3613         JITCompiler::JumpList done;
3614         
3615         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3616         // separate case for that. But it probably doesn't matter so much.
3617         if (shouldCheckOverflow(node->arithMode())) {
3618             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3619             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3620         } else {
3621             // This is the case where we convert the result to an int after we're done, and we
3622             // already know that the denominator is either -1 or 0. So, if the denominator is
3623             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3624             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3625             // happy to fall through to a normal division, since we're just dividing something
3626             // by negative 1.
3627         
3628             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3629             m_jit.move(TrustedImm32(0), edx.gpr());
3630             done.append(m_jit.jump());
3631         
3632             notZero.link(&m_jit);
3633             JITCompiler::Jump notNeg2ToThe31 =
3634                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3635             m_jit.move(TrustedImm32(0), edx.gpr());
3636             done.append(m_jit.jump());
3637         
3638             notNeg2ToThe31.link(&m_jit);
3639         }
3640         
3641         safeDenominator.link(&m_jit);
3642             
3643         if (op2TempGPR != InvalidGPRReg) {
3644             m_jit.move(op2GPR, op2TempGPR);
3645             op2GPR = op2TempGPR;
3646         }
3647             
3648         m_jit.move(op1GPR, eax.gpr());
3649         m_jit.assembler().cdq();
3650         m_jit.assembler().idivl_r(op2GPR);
3651             
3652         if (op2TempGPR != InvalidGPRReg)
3653             unlock(op2TempGPR);
3654
3655         // Check that we're not about to create negative zero.
3656         if (shouldCheckNegativeZero(node->arithMode())) {
3657             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3658             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3659             numeratorPositive.link(&m_jit);
3660         }
3661     
3662         if (op1SaveGPR != op1GPR)
3663             unlock(op1SaveGPR);
3664             
3665         done.link(&m_jit);
3666         int32Result(edx.gpr(), node);
3667
3668 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3669         GPRTemporary temp(this);
3670         GPRTemporary quotientThenRemainder(this);
3671         GPRTemporary multiplyAnswer(this);
3672         GPRReg dividendGPR = op1.gpr();
3673         GPRReg divisorGPR = op2.gpr();
3674         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3675         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3676
3677         JITCompiler::JumpList done;
3678     
3679         if (shouldCheckOverflow(node->arithMode()))
3680             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3681         else {
3682             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3683             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3684             done.append(m_jit.jump());
3685             denominatorNotZero.link(&m_jit);
3686         }
3687
3688         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3689         // FIXME: It seems like there are cases where we don't need this? What if we have
3690         // arithMode() == Arith::Unchecked?
3691         // https://bugs.webkit.org/show_bug.cgi?id=126444
3692         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3693 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3694         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3695 #else
3696         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3697 #endif
3698
3699         // If the user cares about negative zero, then speculate that we're not about
3700         // to produce negative zero.
3701         if (shouldCheckNegativeZero(node->arithMode())) {
3702             // Check that we're not about to create negative zero.
3703             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3704             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3705             numeratorPositive.link(&m_jit);
3706         }
3707
3708         done.link(&m_jit);
3709
3710         int32Result(quotientThenRemainderGPR, node);
3711 #else // not architecture that can do integer division
3712         RELEASE_ASSERT_NOT_REACHED();
3713 #endif
3714         return;
3715     }
3716         
3717     case DoubleRepUse: {
3718         SpeculateDoubleOperand op1(this, node->child1());
3719         SpeculateDoubleOperand op2(this, node->child2());
3720         
3721         FPRReg op1FPR = op1.fpr();
3722         FPRReg op2FPR = op2.fpr();
3723         
3724         flushRegisters();
3725         
3726         FPRResult result(this);
3727         
3728         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3729         
3730         doubleResult(result.fpr(), node);
3731         return;
3732     }
3733         
3734     default:
3735         RELEASE_ASSERT_NOT_REACHED();
3736         return;
3737     }
3738 }
3739
3740 void SpeculativeJIT::compileArithRound(Node* node)
3741 {
3742     ASSERT(node->child1().useKind() == DoubleRepUse);
3743
3744     SpeculateDoubleOperand value(this, node->child1());
3745     FPRReg valueFPR = value.fpr();
3746
3747     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3748         FPRTemporary oneHalf(this);
3749         GPRTemporary roundedResultAsInt32(this);
3750         FPRReg oneHalfFPR = oneHalf.fpr();
3751         GPRReg resultGPR = roundedResultAsInt32.gpr();
3752
3753         static const double halfConstant = 0.5;
3754         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3755         m_jit.addDouble(valueFPR, oneHalfFPR);
3756
3757         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3758         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3759         int32Result(resultGPR, node);
3760         return;
3761     }
3762
3763     flushRegisters();
3764     FPRResult roundedResultAsDouble(this);
3765     FPRReg resultFPR = roundedResultAsDouble.fpr();
3766     callOperation(jsRound, resultFPR, valueFPR);
3767     m_jit.exceptionCheck();
3768     if (producesInteger(node->arithRoundingMode())) {
3769         GPRTemporary roundedResultAsInt32(this);
3770         FPRTemporary scratch(this);
3771         FPRReg scratchFPR = scratch.fpr();
3772         GPRReg resultGPR = roundedResultAsInt32.gpr();
3773         JITCompiler::JumpList failureCases;
3774         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3775         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3776
3777         int32Result(resultGPR, node);
3778     } else
3779         doubleResult(resultFPR, node);
3780 }
3781
3782 void SpeculativeJIT::compileArithSqrt(Node* node)
3783 {
3784     SpeculateDoubleOperand op1(this, node->child1());
3785     FPRReg op1FPR = op1.fpr();
3786
3787     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::useArchitectureSpecificOptimizations()) {
3788         flushRegisters();
3789         FPRResult result(this);
3790         callOperation(sqrt, result.fpr(), op1FPR);
3791         doubleResult(result.fpr(), node);
3792     } else {
3793         FPRTemporary result(this, op1);
3794    &nbs