Unreviewed, roll out r240220 due to date-format-xparb regression
[WebKit-https.git] / Source / JavaScriptCore / jit / JITPropertyAccess.cpp
1 /*
2  * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "DirectArguments.h"
33 #include "GCAwareJITStubRoutine.h"
34 #include "GetterSetter.h"
35 #include "InterpreterInlines.h"
36 #include "JITInlines.h"
37 #include "JSArray.h"
38 #include "JSFunction.h"
39 #include "JSLexicalEnvironment.h"
40 #include "LinkBuffer.h"
41 #include "OpcodeInlines.h"
42 #include "ResultType.h"
43 #include "ScopedArguments.h"
44 #include "ScopedArgumentsTable.h"
45 #include "SlowPathCall.h"
46 #include "StructureStubInfo.h"
47 #include <wtf/ScopedLambda.h>
48 #include <wtf/StringPrintStream.h>
49
50
51 namespace JSC {
52 #if USE(JSVALUE64)
53
54 void JIT::emit_op_get_by_val(const Instruction* currentInstruction)
55 {
56     auto bytecode = currentInstruction->as<OpGetByVal>();
57     auto& metadata = bytecode.metadata(m_codeBlock);
58     int dst = bytecode.m_dst.offset();
59     int base = bytecode.m_base.offset();
60     int property = bytecode.m_property.offset();
61     ArrayProfile* profile = &metadata.m_arrayProfile;
62     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
63
64     emitGetVirtualRegister(base, regT0);
65     bool propertyNameIsIntegerConstant = isOperandConstantInt(property);
66     if (propertyNameIsIntegerConstant)
67         move(Imm32(getOperandConstantInt(property)), regT1);
68     else
69         emitGetVirtualRegister(property, regT1);
70
71     emitJumpSlowCaseIfNotJSCell(regT0, base);
72
73     PatchableJump notIndex;
74     if (!propertyNameIsIntegerConstant) {
75         notIndex = emitPatchableJumpIfNotInt(regT1);
76         addSlowCase(notIndex);
77
78         // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
79         // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
80         // number was signed since m_vectorLength is always less than intmax (since the total allocation
81         // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
82         // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
83         // extending since it makes it easier to re-tag the value in the slow case.
84         zeroExtend32ToPtr(regT1, regT1);
85     }
86
87     emitArrayProfilingSiteWithCell(regT0, regT2, profile);
88     and32(TrustedImm32(IndexingShapeMask), regT2);
89
90     PatchableJump badType;
91     JumpList slowCases;
92
93     JITArrayMode mode = chooseArrayMode(profile);
94     switch (mode) {
95     case JITInt32:
96         slowCases = emitInt32GetByVal(currentInstruction, badType);
97         break;
98     case JITDouble:
99         slowCases = emitDoubleGetByVal(currentInstruction, badType);
100         break;
101     case JITContiguous:
102         slowCases = emitContiguousGetByVal(currentInstruction, badType);
103         break;
104     case JITArrayStorage:
105         slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
106         break;
107     default:
108         CRASH();
109         break;
110     }
111     
112     addSlowCase(badType);
113     addSlowCase(slowCases);
114     
115     Label done = label();
116     
117     if (!ASSERT_DISABLED) {
118         Jump resultOK = branchIfNotEmpty(regT0);
119         abortWithReason(JITGetByValResultIsNotEmpty);
120         resultOK.link(this);
121     }
122
123     emitValueProfilingSite(metadata);
124     emitPutVirtualRegister(dst);
125
126     Label nextHotPath = label();
127
128     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
129 }
130
131 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
132 {
133     // base: regT0
134     // property: regT1
135     // scratch: regT3
136
137     int dst = bytecode.m_dst.offset();
138
139     slowCases.append(branchIfNotCell(regT1));
140     emitByValIdentifierCheck(byValInfo, regT1, regT3, propertyName, slowCases);
141
142     JITGetByIdGenerator gen(
143         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
144         propertyName.impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get);
145     gen.generateFastPath(*this);
146
147     fastDoneCase = jump();
148
149     Label coldPathBegin = label();
150     gen.slowPathJump().link(this);
151
152     Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());
153     gen.reportSlowPathCall(coldPathBegin, call);
154     slowDoneCase = jump();
155
156     return gen;
157 }
158
159 void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
160 {
161     auto bytecode = currentInstruction->as<OpGetByVal>();
162     int dst = bytecode.m_dst.offset();
163     int base = bytecode.m_base.offset();
164     int property = bytecode.m_property.offset();
165     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
166     
167     linkSlowCaseIfNotJSCell(iter, base); // base cell check
168
169     if (!isOperandConstantInt(property))
170         linkSlowCase(iter); // property int32 check
171     Jump nonCell = jump();
172     linkSlowCase(iter); // base array check
173     Jump notString = branchIfNotString(regT0);
174     emitNakedCall(CodeLocationLabel<NoPtrTag>(m_vm->getCTIStub(stringGetByValGenerator).retaggedCode<NoPtrTag>()));
175     Jump failed = branchTest64(Zero, regT0);
176     emitPutVirtualRegister(dst, regT0);
177     emitJumpSlowToHot(jump(), currentInstruction->size());
178     failed.link(this);
179     notString.link(this);
180     nonCell.link(this);
181     
182     linkSlowCase(iter); // vector length check
183     linkSlowCase(iter); // empty value
184     
185     Label slowPath = label();
186     
187     emitGetVirtualRegister(base, regT0);
188     emitGetVirtualRegister(property, regT1);
189     Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo);
190
191     m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
192     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
193     m_byValInstructionIndex++;
194
195     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
196 }
197
198 void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction)
199 {
200     emit_op_put_by_val<OpPutByValDirect>(currentInstruction);
201 }
202
203 template<typename Op>
204 void JIT::emit_op_put_by_val(const Instruction* currentInstruction)
205 {
206     auto bytecode = currentInstruction->as<Op>();
207     auto& metadata = bytecode.metadata(m_codeBlock);
208     int base = bytecode.m_base.offset();
209     int property = bytecode.m_property.offset();
210     ArrayProfile* profile = &metadata.m_arrayProfile;
211     ByValInfo* byValInfo = m_codeBlock->addByValInfo();
212
213     emitGetVirtualRegister(base, regT0);
214     bool propertyNameIsIntegerConstant = isOperandConstantInt(property);
215     if (propertyNameIsIntegerConstant)
216         move(Imm32(getOperandConstantInt(property)), regT1);
217     else
218         emitGetVirtualRegister(property, regT1);
219
220     emitJumpSlowCaseIfNotJSCell(regT0, base);
221     PatchableJump notIndex;
222     if (!propertyNameIsIntegerConstant) {
223         notIndex = emitPatchableJumpIfNotInt(regT1);
224         addSlowCase(notIndex);
225         // See comment in op_get_by_val.
226         zeroExtend32ToPtr(regT1, regT1);
227     }
228     emitArrayProfilingSiteWithCell(regT0, regT2, profile);
229
230     PatchableJump badType;
231     JumpList slowCases;
232
233     // FIXME: Maybe we should do this inline?
234     addSlowCase(branchTest32(NonZero, regT2, TrustedImm32(CopyOnWrite)));
235     and32(TrustedImm32(IndexingShapeMask), regT2);
236
237     JITArrayMode mode = chooseArrayMode(profile);
238     switch (mode) {
239     case JITInt32:
240         slowCases = emitInt32PutByVal(bytecode, badType);
241         break;
242     case JITDouble:
243         slowCases = emitDoublePutByVal(bytecode, badType);
244         break;
245     case JITContiguous:
246         slowCases = emitContiguousPutByVal(bytecode, badType);
247         break;
248     case JITArrayStorage:
249         slowCases = emitArrayStoragePutByVal(bytecode, badType);
250         break;
251     default:
252         CRASH();
253         break;
254     }
255     
256     addSlowCase(badType);
257     addSlowCase(slowCases);
258     
259     Label done = label();
260     
261     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
262 }
263
264 template<typename Op>
265 JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape)
266 {
267     auto& metadata = bytecode.metadata(m_codeBlock);
268     int value = bytecode.m_value.offset();
269     ArrayProfile* profile = &metadata.m_arrayProfile;
270     
271     JumpList slowCases;
272
273     badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
274     
275     loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
276     Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
277
278     Label storeResult = label();
279     emitGetVirtualRegister(value, regT3);
280     switch (indexingShape) {
281     case Int32Shape:
282         slowCases.append(branchIfNotInt32(regT3));
283         store64(regT3, BaseIndex(regT2, regT1, TimesEight));
284         break;
285     case DoubleShape: {
286         Jump notInt = branchIfNotInt32(regT3);
287         convertInt32ToDouble(regT3, fpRegT0);
288         Jump ready = jump();
289         notInt.link(this);
290         add64(tagTypeNumberRegister, regT3);
291         move64ToDouble(regT3, fpRegT0);
292         slowCases.append(branchIfNaN(fpRegT0));
293         ready.link(this);
294         storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
295         break;
296     }
297     case ContiguousShape:
298         store64(regT3, BaseIndex(regT2, regT1, TimesEight));
299         emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue);
300         break;
301     default:
302         CRASH();
303         break;
304     }
305     
306     Jump done = jump();
307     outOfBounds.link(this);
308     
309     slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
310     
311     emitArrayProfileStoreToHoleSpecialCase(profile);
312     
313     add32(TrustedImm32(1), regT1, regT3);
314     store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
315     jump().linkTo(storeResult, this);
316     
317     done.link(this);
318     
319     return slowCases;
320 }
321
322 template<typename Op>
323 JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType)
324 {
325     auto& metadata = bytecode.metadata(m_codeBlock);
326     int value = bytecode.m_value.offset();
327     ArrayProfile* profile = &metadata.m_arrayProfile;
328     
329     JumpList slowCases;
330     
331     badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
332     loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
333     slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
334
335     Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()));
336
337     Label storeResult(this);
338     emitGetVirtualRegister(value, regT3);
339     store64(regT3, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()));
340     emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue);
341     Jump end = jump();
342     
343     empty.link(this);
344     emitArrayProfileStoreToHoleSpecialCase(profile);
345     add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
346     branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
347
348     add32(TrustedImm32(1), regT1);
349     store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
350     sub32(TrustedImm32(1), regT1);
351     jump().linkTo(storeResult, this);
352
353     end.link(this);
354     
355     return slowCases;
356 }
357
358 template<typename Op>
359 JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
360 {
361     // base: regT0
362     // property: regT1
363     // scratch: regT2
364
365     int base = bytecode.m_base.offset();
366     int value = bytecode.m_value.offset();
367
368     slowCases.append(branchIfNotCell(regT1));
369     emitByValIdentifierCheck(byValInfo, regT1, regT1, propertyName, slowCases);
370
371     // Write barrier breaks the registers. So after issuing the write barrier,
372     // reload the registers.
373     emitGetVirtualRegisters(base, regT0, value, regT1);
374
375     JITPutByIdGenerator gen(
376         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
377         JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
378     gen.generateFastPath(*this);
379     emitWriteBarrier(base, value, ShouldFilterBase);
380     doneCases.append(jump());
381
382     Label coldPathBegin = label();
383     gen.slowPathJump().link(this);
384
385     Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl());
386     gen.reportSlowPathCall(coldPathBegin, call);
387     doneCases.append(jump());
388
389     return gen;
390 }
391
392 void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
393 {
394     bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct;
395     int base;
396     int property;
397     int value;
398
399     auto load = [&](auto bytecode) {
400         base = bytecode.m_base.offset();
401         property = bytecode.m_property.offset();
402         value = bytecode.m_value.offset();
403     };
404
405     if (isDirect)
406         load(currentInstruction->as<OpPutByValDirect>());
407     else
408         load(currentInstruction->as<OpPutByVal>());
409
410     ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
411
412     linkAllSlowCases(iter);
413     Label slowPath = label();
414
415     emitGetVirtualRegister(base, regT0);
416     emitGetVirtualRegister(property, regT1);
417     emitGetVirtualRegister(value, regT2);
418     Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo);
419
420     m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
421     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
422     m_byValInstructionIndex++;
423 }
424
425 void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction)
426 {
427     auto bytecode = currentInstruction->as<OpPutGetterById>();
428     emitGetVirtualRegister(bytecode.m_base.offset(), regT0);
429     int32_t options = bytecode.m_attributes;
430     emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1);
431     callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1);
432 }
433
434 void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction)
435 {
436     auto bytecode = currentInstruction->as<OpPutSetterById>();
437     emitGetVirtualRegister(bytecode.m_base.offset(), regT0);
438     int32_t options = bytecode.m_attributes;
439     emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1);
440     callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1);
441 }
442
443 void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction)
444 {
445     auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
446     emitGetVirtualRegister(bytecode.m_base.offset(), regT0);
447     int32_t attribute = bytecode.m_attributes;
448     emitGetVirtualRegister(bytecode.m_getter.offset(), regT1);
449     emitGetVirtualRegister(bytecode.m_setter.offset(), regT2);
450     callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), attribute, regT1, regT2);
451 }
452
453 void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction)
454 {
455     auto bytecode = currentInstruction->as<OpPutGetterByVal>();
456     emitGetVirtualRegister(bytecode.m_base.offset(), regT0);
457     emitGetVirtualRegister(bytecode.m_property.offset(), regT1);
458     int32_t attributes = bytecode.m_attributes;
459     emitGetVirtualRegister(bytecode.m_accessor, regT2);
460     callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2);
461 }
462
463 void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction)
464 {
465     auto bytecode = currentInstruction->as<OpPutSetterByVal>();
466     emitGetVirtualRegister(bytecode.m_base.offset(), regT0);
467     emitGetVirtualRegister(bytecode.m_property.offset(), regT1);
468     int32_t attributes = bytecode.m_attributes;
469     emitGetVirtualRegister(bytecode.m_accessor.offset(), regT2);
470     callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2);
471 }
472
473 void JIT::emit_op_del_by_id(const Instruction* currentInstruction)
474 {
475     auto bytecode = currentInstruction->as<OpDelById>();
476     int dst = bytecode.m_dst.offset();
477     int base = bytecode.m_base.offset();
478     int property = bytecode.m_property;
479     emitGetVirtualRegister(base, regT0);
480     callOperation(operationDeleteByIdJSResult, dst, regT0, m_codeBlock->identifier(property).impl());
481 }
482
483 void JIT::emit_op_del_by_val(const Instruction* currentInstruction)
484 {
485     auto bytecode = currentInstruction->as<OpDelByVal>();
486     int dst = bytecode.m_dst.offset();
487     int base = bytecode.m_base.offset();
488     int property = bytecode.m_property.offset();
489     emitGetVirtualRegister(base, regT0);
490     emitGetVirtualRegister(property, regT1);
491     callOperation(operationDeleteByValJSResult, dst, regT0, regT1);
492 }
493
494 void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction)
495 {
496     auto bytecode = currentInstruction->as<OpTryGetById>();
497     int resultVReg = bytecode.m_dst.offset();
498     int baseVReg = bytecode.m_base.offset();
499     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
500
501     emitGetVirtualRegister(baseVReg, regT0);
502
503     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
504
505     JITGetByIdGenerator gen(
506         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
507         ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGet);
508     gen.generateFastPath(*this);
509     addSlowCase(gen.slowPathJump());
510     m_getByIds.append(gen);
511     
512     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
513     emitPutVirtualRegister(resultVReg);
514 }
515
516 void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
517 {
518     linkAllSlowCases(iter);
519
520     auto bytecode = currentInstruction->as<OpTryGetById>();
521     int resultVReg = bytecode.m_dst.offset();
522     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
523
524     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
525
526     Label coldPathBegin = label();
527
528     Call call = callOperation(operationTryGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
529     
530     gen.reportSlowPathCall(coldPathBegin, call);
531 }
532
533 void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction)
534 {
535     auto bytecode = currentInstruction->as<OpGetByIdDirect>();
536     int resultVReg = bytecode.m_dst.offset();
537     int baseVReg = bytecode.m_base.offset();
538     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
539
540     emitGetVirtualRegister(baseVReg, regT0);
541
542     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
543
544     JITGetByIdGenerator gen(
545         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
546         ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetDirect);
547     gen.generateFastPath(*this);
548     addSlowCase(gen.slowPathJump());
549     m_getByIds.append(gen);
550
551     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
552     emitPutVirtualRegister(resultVReg);
553 }
554
555 void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
556 {
557     linkAllSlowCases(iter);
558
559     auto bytecode = currentInstruction->as<OpGetByIdDirect>();
560     int resultVReg = bytecode.m_dst.offset();
561     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
562
563     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
564
565     Label coldPathBegin = label();
566
567     Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
568
569     gen.reportSlowPathCall(coldPathBegin, call);
570 }
571
572 void JIT::emit_op_get_by_id(const Instruction* currentInstruction)
573 {
574     auto bytecode = currentInstruction->as<OpGetById>();
575     auto& metadata = bytecode.metadata(m_codeBlock);
576     int resultVReg = bytecode.m_dst.offset();
577     int baseVReg = bytecode.m_base.offset();
578     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
579
580     emitGetVirtualRegister(baseVReg, regT0);
581     
582     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
583     
584     if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
585         Jump notArrayLengthMode = branch8(NotEqual, AbsoluteAddress(&metadata.m_mode), TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength)));
586         emitArrayProfilingSiteWithCell(regT0, regT1, &metadata.m_modeMetadata.arrayLengthMode.arrayProfile);
587         notArrayLengthMode.link(this);
588     }
589
590     JITGetByIdGenerator gen(
591         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
592         ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get);
593     gen.generateFastPath(*this);
594     addSlowCase(gen.slowPathJump());
595     m_getByIds.append(gen);
596
597     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
598     emitPutVirtualRegister(resultVReg);
599 }
600
601 void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction)
602 {
603     auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
604     int resultVReg = bytecode.m_dst.offset();
605     int baseVReg = bytecode.m_base.offset();
606     int thisVReg = bytecode.m_thisValue.offset();
607     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
608
609     emitGetVirtualRegister(baseVReg, regT0);
610     emitGetVirtualRegister(thisVReg, regT1);
611     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
612     emitJumpSlowCaseIfNotJSCell(regT1, thisVReg);
613
614     JITGetByIdWithThisGenerator gen(
615         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
616         ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), JSValueRegs(regT1), AccessType::GetWithThis);
617     gen.generateFastPath(*this);
618     addSlowCase(gen.slowPathJump());
619     m_getByIdsWithThis.append(gen);
620
621     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
622     emitPutVirtualRegister(resultVReg);
623 }
624
625 void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
626 {
627     linkAllSlowCases(iter);
628
629     auto bytecode = currentInstruction->as<OpGetById>();
630     int resultVReg = bytecode.m_dst.offset();
631     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
632
633     JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
634     
635     Label coldPathBegin = label();
636
637     Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
638
639     gen.reportSlowPathCall(coldPathBegin, call);
640 }
641
642 void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
643 {
644     linkAllSlowCases(iter);
645
646     auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
647     int resultVReg = bytecode.m_dst.offset();
648     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
649
650     JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++];
651     
652     Label coldPathBegin = label();
653
654     Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl());
655
656     gen.reportSlowPathCall(coldPathBegin, call);
657 }
658
659 void JIT::emit_op_put_by_id(const Instruction* currentInstruction)
660 {
661     auto bytecode = currentInstruction->as<OpPutById>();
662     int baseVReg = bytecode.m_base.offset();
663     int valueVReg = bytecode.m_value.offset();
664     bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
665
666     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
667     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
668     // such that the Structure & offset are always at the same distance from this.
669
670     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
671
672     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
673
674     JITPutByIdGenerator gen(
675         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
676         JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
677         direct ? Direct : NotDirect);
678     
679     gen.generateFastPath(*this);
680     addSlowCase(gen.slowPathJump());
681     
682     emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
683
684     m_putByIds.append(gen);
685 }
686
687 void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
688 {
689     linkAllSlowCases(iter);
690
691     auto bytecode = currentInstruction->as<OpPutById>();
692     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
693
694     Label coldPathBegin(this);
695     
696     JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
697
698     Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
699
700     gen.reportSlowPathCall(coldPathBegin, call);
701 }
702
703 void JIT::emit_op_in_by_id(const Instruction* currentInstruction)
704 {
705     auto bytecode = currentInstruction->as<OpInById>();
706     int resultVReg = bytecode.m_dst.offset();
707     int baseVReg = bytecode.m_base.offset();
708     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
709
710     emitGetVirtualRegister(baseVReg, regT0);
711
712     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
713
714     JITInByIdGenerator gen(
715         m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
716         ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0));
717     gen.generateFastPath(*this);
718     addSlowCase(gen.slowPathJump());
719     m_inByIds.append(gen);
720
721     emitPutVirtualRegister(resultVReg);
722 }
723
724 void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
725 {
726     linkAllSlowCases(iter);
727
728     auto bytecode = currentInstruction->as<OpInById>();
729     int resultVReg = bytecode.m_dst.offset();
730     const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
731
732     JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++];
733
734     Label coldPathBegin = label();
735
736     Call call = callOperation(operationInByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
737
738     gen.reportSlowPathCall(coldPathBegin, call);
739 }
740
741 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
742 {
743     if (!needsVarInjectionChecks)
744         return;
745     addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
746 }
747
748 void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
749 {
750     emitVarInjectionCheck(needsVarInjectionChecks);
751     emitGetVirtualRegister(scope, regT0);
752     for (unsigned i = 0; i < depth; ++i)
753         loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
754     emitPutVirtualRegister(dst);
755 }
756
757 void JIT::emit_op_resolve_scope(const Instruction* currentInstruction)
758 {
759     auto bytecode = currentInstruction->as<OpResolveScope>();
760     auto& metadata = bytecode.metadata(m_codeBlock);
761     int dst = bytecode.m_dst.offset();
762     int scope = bytecode.m_scope.offset();
763     ResolveType resolveType = metadata.m_resolveType;
764     unsigned depth = metadata.m_localScopeDepth;
765
766     auto emitCode = [&] (ResolveType resolveType) {
767         switch (resolveType) {
768         case GlobalProperty:
769         case GlobalPropertyWithVarInjectionChecks:
770         case GlobalVar:
771         case GlobalVarWithVarInjectionChecks:
772         case GlobalLexicalVar:
773         case GlobalLexicalVarWithVarInjectionChecks: {
774             JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
775             RELEASE_ASSERT(constantScope);
776             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
777             move(TrustedImmPtr(constantScope), regT0);
778             emitPutVirtualRegister(dst);
779             break;
780         }
781         case ClosureVar:
782         case ClosureVarWithVarInjectionChecks:
783             emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
784             break;
785         case ModuleVar:
786             move(TrustedImmPtr(metadata.m_lexicalEnvironment.get()), regT0);
787             emitPutVirtualRegister(dst);
788             break;
789         case Dynamic:
790             addSlowCase(jump());
791             break;
792         case LocalClosureVar:
793         case UnresolvedProperty:
794         case UnresolvedPropertyWithVarInjectionChecks:
795             RELEASE_ASSERT_NOT_REACHED();
796         }
797     };
798
799     switch (resolveType) {
800     case GlobalProperty:
801     case GlobalPropertyWithVarInjectionChecks: {
802         // Since these GlobalProperty can be changed to GlobalLexicalVar, we should load the value from metadata.
803         JSScope** constantScopeSlot = metadata.m_constantScope.slot();
804         emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
805         loadPtr(constantScopeSlot, regT0);
806         emitPutVirtualRegister(dst);
807         break;
808     }
809     case UnresolvedProperty:
810     case UnresolvedPropertyWithVarInjectionChecks: {
811         JumpList skipToEnd;
812         load32(&metadata.m_resolveType, regT0);
813
814         Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
815         emitCode(GlobalProperty);
816         skipToEnd.append(jump());
817         notGlobalProperty.link(this);
818
819         Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
820         emitCode(GlobalPropertyWithVarInjectionChecks);
821         skipToEnd.append(jump());
822         notGlobalPropertyWithVarInjections.link(this);
823
824         Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
825         emitCode(GlobalLexicalVar);
826         skipToEnd.append(jump());
827         notGlobalLexicalVar.link(this);
828
829         Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
830         emitCode(GlobalLexicalVarWithVarInjectionChecks);
831         skipToEnd.append(jump());
832         notGlobalLexicalVarWithVarInjections.link(this);
833
834         addSlowCase(jump());
835         skipToEnd.link(this);
836         break;
837     }
838
839     default:
840         emitCode(resolveType);
841         break;
842     }
843 }
844
845 void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
846 {
847     loadPtr(structureSlot, regT1);
848     emitGetVirtualRegister(scope, regT0);
849     addSlowCase(branchTestPtr(Zero, regT1));
850     load32(Address(regT1, Structure::structureIDOffset()), regT1);
851     addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
852 }
853
854 void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg)
855 {
856     loadPtr(operand, reg);
857 }
858
859 void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg)
860 {
861     loadPtr(operand, reg);
862     loadPtr(reg, reg);
863 }
864
865 void JIT::emitGetClosureVar(int scope, uintptr_t operand)
866 {
867     emitGetVirtualRegister(scope, regT0);
868     loadPtr(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register)), regT0);
869 }
870
871 void JIT::emit_op_get_from_scope(const Instruction* currentInstruction)
872 {
873     auto bytecode = currentInstruction->as<OpGetFromScope>();
874     auto& metadata = bytecode.metadata(m_codeBlock);
875     int dst = bytecode.m_dst.offset();
876     int scope = bytecode.m_scope.offset();
877     ResolveType resolveType = metadata.m_getPutInfo.resolveType();
878     Structure** structureSlot = metadata.m_structure.slot();
879     uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand);
880
881     auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
882         switch (resolveType) {
883         case GlobalProperty:
884         case GlobalPropertyWithVarInjectionChecks: {
885             emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection.
886             GPRReg base = regT0;
887             GPRReg result = regT0;
888             GPRReg offset = regT1;
889             GPRReg scratch = regT2;
890
891             jitAssert(scopedLambda<Jump(void)>([&] () -> Jump {
892                 return branchPtr(Equal, base, TrustedImmPtr(m_codeBlock->globalObject()));
893             }));
894
895             load32(operandSlot, offset);
896             if (!ASSERT_DISABLED) {
897                 Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
898                 abortWithReason(JITOffsetIsNotOutOfLine);
899                 isOutOfLine.link(this);
900             }
901             loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
902             neg32(offset);
903             signExtend32ToPtr(offset, offset);
904             load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
905             break;
906         }
907         case GlobalVar:
908         case GlobalVarWithVarInjectionChecks:
909         case GlobalLexicalVar:
910         case GlobalLexicalVarWithVarInjectionChecks:
911             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
912             if (indirectLoadForOperand)
913                 emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
914             else
915                 emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
916             if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
917                 addSlowCase(branchIfEmpty(regT0));
918             break;
919         case ClosureVar:
920         case ClosureVarWithVarInjectionChecks:
921             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
922             emitGetClosureVar(scope, *operandSlot);
923             break;
924         case Dynamic:
925             addSlowCase(jump());
926             break;
927         case LocalClosureVar:
928         case ModuleVar:
929         case UnresolvedProperty:
930         case UnresolvedPropertyWithVarInjectionChecks:
931             RELEASE_ASSERT_NOT_REACHED();
932         }
933     };
934
935     switch (resolveType) {
936     case GlobalProperty:
937     case GlobalPropertyWithVarInjectionChecks: {
938         JumpList skipToEnd;
939         load32(&metadata.m_getPutInfo, regT0);
940         and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
941
942         Jump isNotGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType));
943         emitCode(resolveType, false);
944         skipToEnd.append(jump());
945
946         isNotGlobalProperty.link(this);
947         emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true);
948
949         skipToEnd.link(this);
950         break;
951     }
952     case UnresolvedProperty:
953     case UnresolvedPropertyWithVarInjectionChecks: {
954         JumpList skipToEnd;
955         load32(&metadata.m_getPutInfo, regT0);
956         and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
957
958         Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
959         Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
960         isGlobalProperty.link(this);
961         emitCode(GlobalProperty, false);
962         skipToEnd.append(jump());
963         notGlobalPropertyWithVarInjections.link(this);
964
965         Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
966         emitCode(GlobalLexicalVar, true);
967         skipToEnd.append(jump());
968         notGlobalLexicalVar.link(this);
969
970         Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
971         emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
972         skipToEnd.append(jump());
973         notGlobalLexicalVarWithVarInjections.link(this);
974
975         addSlowCase(jump());
976
977         skipToEnd.link(this);
978         break;
979     }
980
981     default:
982         emitCode(resolveType, false);
983         break;
984     }
985     emitPutVirtualRegister(dst);
986     emitValueProfilingSite(metadata);
987 }
988
989 void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
990 {
991     linkAllSlowCases(iter);
992
993     auto bytecode = currentInstruction->as<OpGetFromScope>();
994     int dst = bytecode.m_dst.offset();
995     callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction);
996 }
997
998 void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
999 {
1000     emitGetVirtualRegister(value, regT0);
1001     emitNotifyWrite(set);
1002     storePtr(regT0, operand);
1003 }
1004 void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
1005 {
1006     emitGetVirtualRegister(value, regT0);
1007     loadPtr(indirectWatchpointSet, regT1);
1008     emitNotifyWrite(regT1);
1009     loadPtr(addressOfOperand, regT1);
1010     storePtr(regT0, regT1);
1011 }
1012
1013 void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
1014 {
1015     emitGetVirtualRegister(value, regT1);
1016     emitGetVirtualRegister(scope, regT0);
1017     emitNotifyWrite(set);
1018     storePtr(regT1, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register)));
1019 }
1020
1021 void JIT::emit_op_put_to_scope(const Instruction* currentInstruction)
1022 {
1023     auto bytecode = currentInstruction->as<OpPutToScope>();
1024     auto& metadata = bytecode.metadata(m_codeBlock);
1025     int scope = bytecode.m_scope.offset();
1026     int value = bytecode.m_value.offset();
1027     GetPutInfo getPutInfo = copiedGetPutInfo(bytecode);
1028     ResolveType resolveType = getPutInfo.resolveType();
1029     Structure** structureSlot = metadata.m_structure.slot();
1030     uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand);
1031
1032     auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
1033         switch (resolveType) {
1034         case GlobalProperty:
1035         case GlobalPropertyWithVarInjectionChecks: {
1036             emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection.
1037             emitGetVirtualRegister(value, regT2);
1038
1039             jitAssert(scopedLambda<Jump(void)>([&] () -> Jump {
1040                 return branchPtr(Equal, regT0, TrustedImmPtr(m_codeBlock->globalObject()));
1041             }));
1042
1043             loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
1044             loadPtr(operandSlot, regT1);
1045             negPtr(regT1);
1046             storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
1047             emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
1048             break;
1049         }
1050         case GlobalVar:
1051         case GlobalVarWithVarInjectionChecks:
1052         case GlobalLexicalVar:
1053         case GlobalLexicalVarWithVarInjectionChecks: {
1054             JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
1055             RELEASE_ASSERT(constantScope);
1056             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
1057             if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
1058                 // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
1059                 if (indirectLoadForOperand)
1060                     emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
1061                 else
1062                     emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
1063                 addSlowCase(branchIfEmpty(regT0));
1064             }
1065             if (indirectLoadForOperand)
1066                 emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.m_watchpointSet);
1067             else
1068                 emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.m_watchpointSet);
1069             emitWriteBarrier(constantScope, value, ShouldFilterValue);
1070             break;
1071         }
1072         case LocalClosureVar:
1073         case ClosureVar:
1074         case ClosureVarWithVarInjectionChecks:
1075             emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
1076             emitPutClosureVar(scope, *operandSlot, value, metadata.m_watchpointSet);
1077             emitWriteBarrier(scope, value, ShouldFilterValue);
1078             break;
1079         case ModuleVar:
1080         case Dynamic:
1081             addSlowCase(jump());
1082             break;
1083         case UnresolvedProperty:
1084         case UnresolvedPropertyWithVarInjectionChecks:
1085             RELEASE_ASSERT_NOT_REACHED();
1086             break;
1087         }
1088     };
1089
1090     switch (resolveType) {
1091     case GlobalProperty:
1092     case GlobalPropertyWithVarInjectionChecks: {
1093         JumpList skipToEnd;
1094         load32(&metadata.m_getPutInfo, regT0);
1095         and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
1096
1097         Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(resolveType));
1098         Jump isGlobalLexicalVar = branch32(Equal, regT0, TrustedImm32(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar));
1099         addSlowCase(jump()); // Dynamic, it can happen if we attempt to put a value to already-initialized const binding.
1100
1101         isGlobalLexicalVar.link(this);
1102         emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true);
1103         skipToEnd.append(jump());
1104
1105         isGlobalProperty.link(this);
1106         emitCode(resolveType, false);
1107         skipToEnd.link(this);
1108         break;
1109     }
1110     case UnresolvedProperty:
1111     case UnresolvedPropertyWithVarInjectionChecks: {
1112         JumpList skipToEnd;
1113         load32(&metadata.m_getPutInfo, regT0);
1114         and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
1115
1116         Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
1117         Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
1118         isGlobalProperty.link(this);
1119         emitCode(GlobalProperty, false);
1120         skipToEnd.append(jump());
1121         notGlobalPropertyWithVarInjections.link(this);
1122
1123         Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
1124         emitCode(GlobalLexicalVar, true);
1125         skipToEnd.append(jump());
1126         notGlobalLexicalVar.link(this);
1127
1128         Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
1129         emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
1130         skipToEnd.append(jump());
1131         notGlobalLexicalVarWithVarInjections.link(this);
1132
1133         addSlowCase(jump());
1134
1135         skipToEnd.link(this);
1136         break;
1137     }
1138
1139     default:
1140         emitCode(resolveType, false);
1141         break;
1142     }
1143 }
1144
1145 void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1146 {
1147     linkAllSlowCases(iter);
1148
1149     auto bytecode = currentInstruction->as<OpPutToScope>();
1150     ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType();
1151     if (resolveType == ModuleVar) {
1152         JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
1153         slowPathCall.call();
1154     } else
1155         callOperation(operationPutToScope, currentInstruction);
1156 }
1157
1158 void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction)
1159 {
1160     auto bytecode = currentInstruction->as<OpGetFromArguments>();
1161     int dst = bytecode.m_dst.offset();
1162     int arguments = bytecode.m_arguments.offset();
1163     int index = bytecode.m_index;
1164     
1165     emitGetVirtualRegister(arguments, regT0);
1166     load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0);
1167     emitValueProfilingSite(bytecode.metadata(m_codeBlock));
1168     emitPutVirtualRegister(dst);
1169 }
1170
1171 void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction)
1172 {
1173     auto bytecode = currentInstruction->as<OpPutToArguments>();
1174     int arguments = bytecode.m_arguments.offset();
1175     int index = bytecode.m_index;
1176     int value = bytecode.m_value.offset();
1177     
1178     emitGetVirtualRegister(arguments, regT0);
1179     emitGetVirtualRegister(value, regT1);
1180     store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)));
1181
1182     emitWriteBarrier(arguments, value, ShouldFilterValue);
1183 }
1184
1185 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
1186 {
1187     Jump valueNotCell;
1188     if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
1189         emitGetVirtualRegister(value, regT0);
1190         valueNotCell = branchIfNotCell(regT0);
1191     }
1192     
1193     emitGetVirtualRegister(owner, regT0);
1194     Jump ownerNotCell;
1195     if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
1196         ownerNotCell = branchIfNotCell(regT0);
1197
1198     Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT0, regT1);
1199     callOperation(operationWriteBarrierSlowPath, regT0);
1200     ownerIsRememberedOrInEden.link(this);
1201
1202     if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
1203         ownerNotCell.link(this);
1204     if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) 
1205         valueNotCell.link(this);
1206 }
1207
1208 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
1209 {
1210     emitGetVirtualRegister(value, regT0);
1211     Jump valueNotCell;
1212     if (mode == ShouldFilterValue)
1213         valueNotCell = branchIfNotCell(regT0);
1214
1215     emitWriteBarrier(owner);
1216
1217     if (mode == ShouldFilterValue) 
1218         valueNotCell.link(this);
1219 }
1220
1221 #else // USE(JSVALUE64)
1222
1223 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
1224 {
1225     Jump valueNotCell;
1226     if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
1227         emitLoadTag(value, regT0);
1228         valueNotCell = branchIfNotCell(regT0);
1229     }
1230     
1231     emitLoad(owner, regT0, regT1);
1232     Jump ownerNotCell;
1233     if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
1234         ownerNotCell = branchIfNotCell(regT0);
1235
1236     Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT1, regT2);
1237     callOperation(operationWriteBarrierSlowPath, regT1);
1238     ownerIsRememberedOrInEden.link(this);
1239
1240     if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
1241         ownerNotCell.link(this);
1242     if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) 
1243         valueNotCell.link(this);
1244 }
1245
1246 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
1247 {
1248     Jump valueNotCell;
1249     if (mode == ShouldFilterValue) {
1250         emitLoadTag(value, regT0);
1251         valueNotCell = branchIfNotCell(regT0);
1252     }
1253
1254     emitWriteBarrier(owner);
1255
1256     if (mode == ShouldFilterValue) 
1257         valueNotCell.link(this);
1258 }
1259
1260 #endif // USE(JSVALUE64)
1261
1262 void JIT::emitWriteBarrier(JSCell* owner)
1263 {
1264     Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), owner, regT0);
1265     callOperation(operationWriteBarrierSlowPath, owner);
1266     ownerIsRememberedOrInEden.link(this);
1267 }
1268
1269 void JIT::emitByValIdentifierCheck(ByValInfo* byValInfo, RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases)
1270 {
1271     if (propertyName.isSymbol())
1272         slowCases.append(branchPtr(NotEqual, cell, TrustedImmPtr(byValInfo->cachedSymbol.get())));
1273     else {
1274         slowCases.append(branchIfNotString(cell));
1275         loadPtr(Address(cell, JSString::offsetOfValue()), scratch);
1276         slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl())));
1277     }
1278 }
1279
1280 void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1281 {
1282     const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1283     
1284     PatchableJump badType;
1285     JumpList slowCases;
1286     
1287     switch (arrayMode) {
1288     case JITInt32:
1289         slowCases = emitInt32GetByVal(currentInstruction, badType);
1290         break;
1291     case JITDouble:
1292         slowCases = emitDoubleGetByVal(currentInstruction, badType);
1293         break;
1294     case JITContiguous:
1295         slowCases = emitContiguousGetByVal(currentInstruction, badType);
1296         break;
1297     case JITArrayStorage:
1298         slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
1299         break;
1300     case JITDirectArguments:
1301         slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType);
1302         break;
1303     case JITScopedArguments:
1304         slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType);
1305         break;
1306     default:
1307         TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1308         if (isInt(type))
1309             slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type);
1310         else 
1311             slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type);
1312         break;
1313     }
1314     
1315     Jump done = jump();
1316
1317     LinkBuffer patchBuffer(*this, m_codeBlock);
1318
1319     patchBuffer.link(badType, byValInfo->slowPathTarget);
1320     patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1321
1322     patchBuffer.link(done, byValInfo->badTypeDoneTarget);
1323
1324     byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1325         m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1326         "Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
1327     
1328     MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1329     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationGetByValGeneric));
1330 }
1331
1332 void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
1333 {
1334     const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1335     auto bytecode = currentInstruction->as<OpGetByVal>();
1336
1337     Jump fastDoneCase;
1338     Jump slowDoneCase;
1339     JumpList slowCases;
1340
1341     JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, bytecode, propertyName, fastDoneCase, slowDoneCase, slowCases);
1342
1343     ConcurrentJSLocker locker(m_codeBlock->m_lock);
1344     LinkBuffer patchBuffer(*this, m_codeBlock);
1345     patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1346     patchBuffer.link(fastDoneCase, byValInfo->badTypeDoneTarget);
1347     patchBuffer.link(slowDoneCase, byValInfo->badTypeNextHotPathTarget);
1348     if (!m_exceptionChecks.empty())
1349         patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler);
1350
1351     for (const auto& callSite : m_calls) {
1352         if (callSite.callee)
1353             patchBuffer.link(callSite.from, callSite.callee);
1354     }
1355     gen.finalize(patchBuffer, patchBuffer);
1356
1357     byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1358         m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1359         "Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value());
1360     byValInfo->stubInfo = gen.stubInfo();
1361
1362     MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1363     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationGetByValGeneric));
1364 }
1365
1366 template<typename Op>
1367 void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1368 {
1369     const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1370     auto bytecode = currentInstruction->as<Op>();
1371     
1372     PatchableJump badType;
1373     JumpList slowCases;
1374
1375     bool needsLinkForWriteBarrier = false;
1376
1377     switch (arrayMode) {
1378     case JITInt32:
1379         slowCases = emitInt32PutByVal(bytecode, badType);
1380         break;
1381     case JITDouble:
1382         slowCases = emitDoublePutByVal(bytecode, badType);
1383         break;
1384     case JITContiguous:
1385         slowCases = emitContiguousPutByVal(bytecode, badType);
1386         needsLinkForWriteBarrier = true;
1387         break;
1388     case JITArrayStorage:
1389         slowCases = emitArrayStoragePutByVal(bytecode, badType);
1390         needsLinkForWriteBarrier = true;
1391         break;
1392     default:
1393         TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1394         if (isInt(type))
1395             slowCases = emitIntTypedArrayPutByVal(bytecode, badType, type);
1396         else 
1397             slowCases = emitFloatTypedArrayPutByVal(bytecode, badType, type);
1398         break;
1399     }
1400     
1401     Jump done = jump();
1402
1403     LinkBuffer patchBuffer(*this, m_codeBlock);
1404     patchBuffer.link(badType, byValInfo->slowPathTarget);
1405     patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1406     patchBuffer.link(done, byValInfo->badTypeDoneTarget);
1407     if (needsLinkForWriteBarrier) {
1408         ASSERT(removeCodePtrTag(m_calls.last().callee.executableAddress()) == removeCodePtrTag(operationWriteBarrierSlowPath));
1409         patchBuffer.link(m_calls.last().from, m_calls.last().callee);
1410     }
1411     
1412     bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct;
1413     if (!isDirect) {
1414         byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1415             m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1416             "Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
1417         
1418     } else {
1419         byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1420             m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1421             "Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
1422     }
1423     MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1424     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
1425 }
1426
1427 template<typename Op>
1428 void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
1429 {
1430     ASSERT((putKind == Direct && Op::opcodeID == op_put_by_val_direct) || (putKind == NotDirect && Op::opcodeID == op_put_by_val));
1431     const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1432     auto bytecode = currentInstruction->as<Op>();
1433
1434     JumpList doneCases;
1435     JumpList slowCases;
1436
1437     JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, bytecode, putKind, propertyName, doneCases, slowCases);
1438
1439     ConcurrentJSLocker locker(m_codeBlock->m_lock);
1440     LinkBuffer patchBuffer(*this, m_codeBlock);
1441     patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1442     patchBuffer.link(doneCases, byValInfo->badTypeDoneTarget);
1443     if (!m_exceptionChecks.empty())
1444         patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler);
1445
1446     for (const auto& callSite : m_calls) {
1447         if (callSite.callee)
1448             patchBuffer.link(callSite.from, callSite.callee);
1449     }
1450     gen.finalize(patchBuffer, patchBuffer);
1451
1452     byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1453         m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1454         "Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value());
1455     byValInfo->stubInfo = gen.stubInfo();
1456
1457     MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1458     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
1459 }
1460
1461 JIT::JumpList JIT::emitDoubleLoad(const Instruction*, PatchableJump& badType)
1462 {
1463 #if USE(JSVALUE64)
1464     RegisterID base = regT0;
1465     RegisterID property = regT1;
1466     RegisterID indexing = regT2;
1467     RegisterID scratch = regT3;
1468 #else
1469     RegisterID base = regT0;
1470     RegisterID property = regT2;
1471     RegisterID indexing = regT1;
1472     RegisterID scratch = regT3;
1473 #endif
1474
1475     JumpList slowCases;
1476
1477     badType = patchableBranch32(NotEqual, indexing, TrustedImm32(DoubleShape));
1478     loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
1479     slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
1480     loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
1481     slowCases.append(branchIfNaN(fpRegT0));
1482
1483     return slowCases;
1484 }
1485
1486 JIT::JumpList JIT::emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape)
1487 {
1488 #if USE(JSVALUE64)
1489     RegisterID base = regT0;
1490     RegisterID property = regT1;
1491     RegisterID indexing = regT2;
1492     JSValueRegs result = JSValueRegs(regT0);
1493     RegisterID scratch = regT3;
1494 #else
1495     RegisterID base = regT0;
1496     RegisterID property = regT2;
1497     RegisterID indexing = regT1;
1498     JSValueRegs result = JSValueRegs(regT1, regT0);
1499     RegisterID scratch = regT3;
1500 #endif
1501
1502     JumpList slowCases;
1503
1504     badType = patchableBranch32(NotEqual, indexing, TrustedImm32(expectedShape));
1505     loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
1506     slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
1507     loadValue(BaseIndex(scratch, property, TimesEight), result);
1508     slowCases.append(branchIfEmpty(result));
1509
1510     return slowCases;
1511 }
1512
1513 JIT::JumpList JIT::emitArrayStorageLoad(const Instruction*, PatchableJump& badType)
1514 {
1515 #if USE(JSVALUE64)
1516     RegisterID base = regT0;
1517     RegisterID property = regT1;
1518     RegisterID indexing = regT2;
1519     JSValueRegs result = JSValueRegs(regT0);
1520     RegisterID scratch = regT3;
1521 #else
1522     RegisterID base = regT0;
1523     RegisterID property = regT2;
1524     RegisterID indexing = regT1;
1525     JSValueRegs result = JSValueRegs(regT1, regT0);
1526     RegisterID scratch = regT3;
1527 #endif
1528
1529     JumpList slowCases;
1530
1531     add32(TrustedImm32(-ArrayStorageShape), indexing, scratch);
1532     badType = patchableBranch32(Above, scratch, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
1533
1534     loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
1535     slowCases.append(branch32(AboveOrEqual, property, Address(scratch, ArrayStorage::vectorLengthOffset())));
1536
1537     loadValue(BaseIndex(scratch, property, TimesEight, ArrayStorage::vectorOffset()), result);
1538     slowCases.append(branchIfEmpty(result));
1539
1540     return slowCases;
1541 }
1542
1543 JIT::JumpList JIT::emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType)
1544 {
1545     JumpList slowCases;
1546     
1547 #if USE(JSVALUE64)
1548     RegisterID base = regT0;
1549     RegisterID property = regT1;
1550     JSValueRegs result = JSValueRegs(regT0);
1551     RegisterID scratch = regT3;
1552     RegisterID scratch2 = regT4;
1553 #else
1554     RegisterID base = regT0;
1555     RegisterID property = regT2;
1556     JSValueRegs result = JSValueRegs(regT1, regT0);
1557     RegisterID scratch = regT3;
1558     RegisterID scratch2 = regT4;
1559 #endif
1560
1561     load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1562     badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
1563     
1564     load32(Address(base, DirectArguments::offsetOfLength()), scratch2);
1565     slowCases.append(branch32(AboveOrEqual, property, scratch2));
1566     slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfMappedArguments())));
1567     
1568     loadValue(BaseIndex(base, property, TimesEight, DirectArguments::storageOffset()), result);
1569     
1570     return slowCases;
1571 }
1572
1573 JIT::JumpList JIT::emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType)
1574 {
1575     JumpList slowCases;
1576     
1577 #if USE(JSVALUE64)
1578     RegisterID base = regT0;
1579     RegisterID property = regT1;
1580     JSValueRegs result = JSValueRegs(regT0);
1581     RegisterID scratch = regT3;
1582     RegisterID scratch2 = regT4;
1583     RegisterID scratch3 = regT5;
1584 #else
1585     RegisterID base = regT0;
1586     RegisterID property = regT2;
1587     JSValueRegs result = JSValueRegs(regT1, regT0);
1588     RegisterID scratch = regT3;
1589     RegisterID scratch2 = regT4;
1590     RegisterID scratch3 = regT5;
1591 #endif
1592
1593     load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1594     badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType));
1595     loadPtr(Address(base, ScopedArguments::offsetOfStorage()), scratch3);
1596     xorPtr(TrustedImmPtr(ScopedArgumentsPoison::key()), scratch3);
1597     slowCases.append(branch32(AboveOrEqual, property, Address(scratch3, ScopedArguments::offsetOfTotalLengthInStorage())));
1598     
1599     loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch);
1600     xorPtr(TrustedImmPtr(ScopedArgumentsPoison::key()), scratch);
1601     load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2);
1602     Jump overflowCase = branch32(AboveOrEqual, property, scratch2);
1603     loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2);
1604     xorPtr(TrustedImmPtr(ScopedArgumentsPoison::key()), scratch2);
1605     loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch);
1606     load32(BaseIndex(scratch, property, TimesFour), scratch);
1607     slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset)));
1608     loadValue(BaseIndex(scratch2, scratch, TimesEight, JSLexicalEnvironment::offsetOfVariables()), result);
1609     Jump done = jump();
1610     overflowCase.link(this);
1611     sub32(property, scratch2);
1612     neg32(scratch2);
1613     loadValue(BaseIndex(scratch3, scratch2, TimesEight), result);
1614     slowCases.append(branchIfEmpty(result));
1615     done.link(this);
1616     
1617     load32(Address(scratch3, ScopedArguments::offsetOfTotalLengthInStorage()), scratch);
1618     emitPreparePreciseIndexMask32(property, scratch, scratch2);
1619     andPtr(scratch2, result.payloadGPR());
1620     
1621     return slowCases;
1622 }
1623
1624 JIT::JumpList JIT::emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type)
1625 {
1626     ASSERT(isInt(type));
1627     
1628     // The best way to test the array type is to use the classInfo. We need to do so without
1629     // clobbering the register that holds the indexing type, base, and property.
1630
1631 #if USE(JSVALUE64)
1632     RegisterID base = regT0;
1633     RegisterID property = regT1;
1634     JSValueRegs result = JSValueRegs(regT0);
1635     RegisterID scratch = regT3;
1636     RegisterID scratch2 = regT4;
1637 #else
1638     RegisterID base = regT0;
1639     RegisterID property = regT2;
1640     JSValueRegs result = JSValueRegs(regT1, regT0);
1641     RegisterID scratch = regT3;
1642     RegisterID scratch2 = regT4;
1643 #endif
1644     RegisterID resultPayload = result.payloadGPR();
1645     
1646     JumpList slowCases;
1647     
1648     load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1649     badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1650     slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1651     loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
1652     cageConditionally(Gigacage::Primitive, scratch, scratch2);
1653
1654     switch (elementSize(type)) {
1655     case 1:
1656         if (JSC::isSigned(type))
1657             load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload);
1658         else
1659             load8(BaseIndex(scratch, property, TimesOne), resultPayload);
1660         break;
1661     case 2:
1662         if (JSC::isSigned(type))
1663             load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload);
1664         else
1665             load16(BaseIndex(scratch, property, TimesTwo), resultPayload);
1666         break;
1667     case 4:
1668         load32(BaseIndex(scratch, property, TimesFour), resultPayload);
1669         break;
1670     default:
1671         CRASH();
1672     }
1673     
1674     Jump done;
1675     if (type == TypeUint32) {
1676         Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
1677         
1678         convertInt32ToDouble(resultPayload, fpRegT0);
1679         addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
1680         boxDouble(fpRegT0, result);
1681         done = jump();
1682         canBeInt.link(this);
1683     }
1684
1685     boxInt32(resultPayload, result);
1686     if (done.isSet())
1687         done.link(this);
1688     return slowCases;
1689 }
1690
1691 JIT::JumpList JIT::emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type)
1692 {
1693     ASSERT(isFloat(type));
1694     
1695 #if USE(JSVALUE64)
1696     RegisterID base = regT0;
1697     RegisterID property = regT1;
1698     JSValueRegs result = JSValueRegs(regT0);
1699     RegisterID scratch = regT3;
1700     RegisterID scratch2 = regT4;
1701 #else
1702     RegisterID base = regT0;
1703     RegisterID property = regT2;
1704     JSValueRegs result = JSValueRegs(regT1, regT0);
1705     RegisterID scratch = regT3;
1706     RegisterID scratch2 = regT4;
1707 #endif
1708     
1709     JumpList slowCases;
1710
1711     load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1712     badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1713     slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1714     loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
1715     cageConditionally(Gigacage::Primitive, scratch, scratch2);
1716     
1717     switch (elementSize(type)) {
1718     case 4:
1719         loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0);
1720         convertFloatToDouble(fpRegT0, fpRegT0);
1721         break;
1722     case 8: {
1723         loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
1724         break;
1725     }
1726     default:
1727         CRASH();
1728     }
1729     
1730     purifyNaN(fpRegT0);
1731     
1732     boxDouble(fpRegT0, result);
1733     return slowCases;    
1734 }
1735
1736 template<typename Op>
1737 JIT::JumpList JIT::emitIntTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type)
1738 {
1739     auto& metadata = bytecode.metadata(m_codeBlock);
1740     ArrayProfile* profile = &metadata.m_arrayProfile;
1741     ASSERT(isInt(type));
1742     
1743     int value = bytecode.m_value.offset();
1744
1745 #if USE(JSVALUE64)
1746     RegisterID base = regT0;
1747     RegisterID property = regT1;
1748     RegisterID earlyScratch = regT3;
1749     RegisterID lateScratch = regT2;
1750     RegisterID lateScratch2 = regT4;
1751 #else
1752     RegisterID base = regT0;
1753     RegisterID property = regT2;
1754     RegisterID earlyScratch = regT3;
1755     RegisterID lateScratch = regT1;
1756     RegisterID lateScratch2 = regT4;
1757 #endif
1758     
1759     JumpList slowCases;
1760     
1761     load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1762     badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1763     Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1764     emitArrayProfileOutOfBoundsSpecialCase(profile);
1765     slowCases.append(jump());
1766     inBounds.link(this);
1767     
1768 #if USE(JSVALUE64)
1769     emitGetVirtualRegister(value, earlyScratch);
1770     slowCases.append(branchIfNotInt32(earlyScratch));
1771 #else
1772     emitLoad(value, lateScratch, earlyScratch);
1773     slowCases.append(branchIfNotInt32(lateScratch));
1774 #endif
1775     
1776     // We would be loading this into base as in get_by_val, except that the slow
1777     // path expects the base to be unclobbered.
1778     loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
1779     cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2);
1780     
1781     if (isClamped(type)) {
1782         ASSERT(elementSize(type) == 1);
1783         ASSERT(!JSC::isSigned(type));
1784         Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
1785         Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
1786         xor32(earlyScratch, earlyScratch);
1787         Jump clamped = jump();
1788         tooBig.link(this);
1789         move(TrustedImm32(0xff), earlyScratch);
1790         clamped.link(this);
1791         inBounds.link(this);
1792     }
1793     
1794     switch (elementSize(type)) {
1795     case 1:
1796         store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
1797         break;
1798     case 2:
1799         store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
1800         break;
1801     case 4:
1802         store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
1803         break;
1804     default:
1805         CRASH();
1806     }
1807     
1808     return slowCases;
1809 }
1810
1811 template<typename Op>
1812 JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type)
1813 {
1814     auto& metadata = bytecode.metadata(m_codeBlock);
1815     ArrayProfile* profile = &metadata.m_arrayProfile;
1816     ASSERT(isFloat(type));
1817     
1818     int value = bytecode.m_value.offset();
1819
1820 #if USE(JSVALUE64)
1821     RegisterID base = regT0;
1822     RegisterID property = regT1;
1823     RegisterID earlyScratch = regT3;
1824     RegisterID lateScratch = regT2;
1825     RegisterID lateScratch2 = regT4;
1826 #else
1827     RegisterID base = regT0;
1828     RegisterID property = regT2;
1829     RegisterID earlyScratch = regT3;
1830     RegisterID lateScratch = regT1;
1831     RegisterID lateScratch2 = regT4;
1832 #endif
1833     
1834     JumpList slowCases;
1835     
1836     load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1837     badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1838     Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1839     emitArrayProfileOutOfBoundsSpecialCase(profile);
1840     slowCases.append(jump());
1841     inBounds.link(this);
1842     
1843 #if USE(JSVALUE64)
1844     emitGetVirtualRegister(value, earlyScratch);
1845     Jump doubleCase = branchIfNotInt32(earlyScratch);
1846     convertInt32ToDouble(earlyScratch, fpRegT0);
1847     Jump ready = jump();
1848     doubleCase.link(this);
1849     slowCases.append(branchIfNotNumber(earlyScratch));
1850     add64(tagTypeNumberRegister, earlyScratch);
1851     move64ToDouble(earlyScratch, fpRegT0);
1852     ready.link(this);
1853 #else
1854     emitLoad(value, lateScratch, earlyScratch);
1855     Jump doubleCase = branchIfNotInt32(lateScratch);
1856     convertInt32ToDouble(earlyScratch, fpRegT0);
1857     Jump ready = jump();
1858     doubleCase.link(this);
1859     slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
1860     moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
1861     ready.link(this);
1862 #endif
1863     
1864     // We would be loading this into base as in get_by_val, except that the slow
1865     // path expects the base to be unclobbered.
1866     loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
1867     cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2);
1868     
1869     switch (elementSize(type)) {
1870     case 4:
1871         convertDoubleToFloat(fpRegT0, fpRegT0);
1872         storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
1873         break;
1874     case 8:
1875         storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
1876         break;
1877     default:
1878         CRASH();
1879     }
1880     
1881     return slowCases;
1882 }
1883
1884 template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*);
1885
1886 } // namespace JSC
1887
1888 #endif // ENABLE(JIT)