Make JSCells have 32-bit Structure pointers
[WebKit-https.git] / Source / JavaScriptCore / jit / AssemblyHelpers.h
1 /*
2  * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef AssemblyHelpers_h
27 #define AssemblyHelpers_h
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "FPRInfo.h"
33 #include "GPRInfo.h"
34 #include "JITCode.h"
35 #include "MacroAssembler.h"
36 #include "VM.h"
37
38 namespace JSC {
39
40 typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
41
42 class AssemblyHelpers : public MacroAssembler {
43 public:
44     AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
45         : m_vm(vm)
46         , m_codeBlock(codeBlock)
47         , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
48     {
49         if (m_codeBlock) {
50             ASSERT(m_baselineCodeBlock);
51             ASSERT(!m_baselineCodeBlock->alternative());
52             ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
53         }
54     }
55     
56     CodeBlock* codeBlock() { return m_codeBlock; }
57     VM* vm() { return m_vm; }
58     AssemblerType_T& assembler() { return m_assembler; }
59
60     void checkStackPointerAlignment()
61     {
62         // This check is both unneeded and harder to write correctly for ARM64
63 #if !defined(NDEBUG) && !CPU(ARM64)
64         Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
65         breakpoint();
66         stackPointerAligned.link(this);
67 #endif
68     }
69
70 #if CPU(X86_64) || CPU(X86)
71     size_t prologueStackPointerDelta()
72     {
73         // Prologue only saves the framePointerRegister
74         return sizeof(void*);
75     }
76
77     void emitFunctionPrologue()
78     {
79         push(framePointerRegister);
80         move(stackPointerRegister, framePointerRegister);
81     }
82
83     void emitFunctionEpilogue()
84     {
85         move(framePointerRegister, stackPointerRegister);
86         pop(framePointerRegister);
87     }
88
89     void preserveReturnAddressAfterCall(GPRReg reg)
90     {
91         pop(reg);
92     }
93
94     void restoreReturnAddressBeforeReturn(GPRReg reg)
95     {
96         push(reg);
97     }
98
99     void restoreReturnAddressBeforeReturn(Address address)
100     {
101         push(address);
102     }
103 #endif // CPU(X86_64) || CPU(X86)
104
105 #if CPU(ARM) || CPU(ARM64)
106     size_t prologueStackPointerDelta()
107     {
108         // Prologue saves the framePointerRegister and linkRegister
109         return 2 * sizeof(void*);
110     }
111
112     void emitFunctionPrologue()
113     {
114         pushPair(framePointerRegister, linkRegister);
115         move(stackPointerRegister, framePointerRegister);
116     }
117
118     void emitFunctionEpilogue()
119     {
120         move(framePointerRegister, stackPointerRegister);
121         popPair(framePointerRegister, linkRegister);
122     }
123
124     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
125     {
126         move(linkRegister, reg);
127     }
128
129     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
130     {
131         move(reg, linkRegister);
132     }
133
134     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
135     {
136         loadPtr(address, linkRegister);
137     }
138 #endif
139
140 #if CPU(MIPS)
141     size_t prologueStackPointerDelta()
142     {
143         // Prologue saves the framePointerRegister and returnAddressRegister
144         return 2 * sizeof(void*);
145     }
146
147     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
148     {
149         move(returnAddressRegister, reg);
150     }
151
152     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
153     {
154         move(reg, returnAddressRegister);
155     }
156
157     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
158     {
159         loadPtr(address, returnAddressRegister);
160     }
161 #endif
162
163 #if CPU(SH4)
164     size_t prologueStackPointerDelta()
165     {
166         // Prologue saves the framePointerRegister and link register
167         return 2 * sizeof(void*);
168     }
169
170     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
171     {
172         m_assembler.stspr(reg);
173     }
174
175     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
176     {
177         m_assembler.ldspr(reg);
178     }
179
180     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
181     {
182         loadPtrLinkReg(address);
183     }
184 #endif
185
186     void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
187     {
188         loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
189     }
190     void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
191     {
192         storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
193     }
194
195     void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
196     {
197         storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
198     }
199
200     void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to)
201     {
202         loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to);
203     }
204     void emitPutCallerFrameToCallFrameHeader(RegisterID from)
205     {
206         storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
207     }
208
209     void emitPutReturnPCToCallFrameHeader(RegisterID from)
210     {
211         storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
212     }
213     void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from)
214     {
215         storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
216     }
217
218     // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
219     // fields before the code from emitFunctionPrologue() has executed.
220     // First, the access is via the stack pointer. Second, the address calculation must also take
221     // into account that the stack pointer may not have been adjusted down for the return PC and/or
222     // caller's frame pointer. On some platforms, the callee is responsible for pushing the
223     // "link register" containing the return address in the function prologue.
224 #if USE(JSVALUE64)
225     void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
226     {
227         storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
228     }
229 #else
230     void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
231     {
232         storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
233     }
234
235     void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry)
236     {
237         storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
238     }
239 #endif
240
241     Jump branchIfNotCell(GPRReg reg)
242     {
243 #if USE(JSVALUE64)
244         return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
245 #else
246         return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
247 #endif
248     }
249     
250     static Address addressForByteOffset(ptrdiff_t byteOffset)
251     {
252         return Address(GPRInfo::callFrameRegister, byteOffset);
253     }
254     static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg)
255     {
256         ASSERT(virtualRegister.isValid());
257         return Address(baseReg, virtualRegister.offset() * sizeof(Register));
258     }
259     static Address addressFor(VirtualRegister virtualRegister)
260     {
261         ASSERT(virtualRegister.isValid());
262         return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
263     }
264     static Address addressFor(int operand)
265     {
266         return addressFor(static_cast<VirtualRegister>(operand));
267     }
268
269     static Address tagFor(VirtualRegister virtualRegister)
270     {
271         ASSERT(virtualRegister.isValid());
272         return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
273     }
274     static Address tagFor(int operand)
275     {
276         return tagFor(static_cast<VirtualRegister>(operand));
277     }
278
279     static Address payloadFor(VirtualRegister virtualRegister)
280     {
281         ASSERT(virtualRegister.isValid());
282         return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
283     }
284     static Address payloadFor(int operand)
285     {
286         return payloadFor(static_cast<VirtualRegister>(operand));
287     }
288
289     Jump branchIfCellNotObject(GPRReg cellReg)
290     {
291         return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
292     }
293
294     static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
295     {
296         if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
297             return GPRInfo::regT0;
298
299         if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
300             return GPRInfo::regT1;
301
302         if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
303             return GPRInfo::regT2;
304
305         if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
306             return GPRInfo::regT3;
307
308         return GPRInfo::regT4;
309     }
310
311     // Add a debug call. This call has no effect on JIT code execution state.
312     void debugCall(V_DebugOperation_EPP function, void* argument)
313     {
314         size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
315         ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
316         EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
317
318         for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
319 #if USE(JSVALUE64)
320             store64(GPRInfo::toRegister(i), buffer + i);
321 #else
322             store32(GPRInfo::toRegister(i), buffer + i);
323 #endif
324         }
325
326         for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
327             move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
328             storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
329         }
330
331         // Tell GC mark phase how much of the scratch buffer is active during call.
332         move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
333         storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
334
335 #if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
336         move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
337         move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
338         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
339         GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
340 #elif CPU(X86)
341         poke(GPRInfo::callFrameRegister, 0);
342         poke(TrustedImmPtr(argument), 1);
343         poke(TrustedImmPtr(buffer), 2);
344         GPRReg scratch = GPRInfo::regT0;
345 #else
346 #error "JIT not supported on this platform."
347 #endif
348         move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
349         call(scratch);
350
351         move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
352         storePtr(TrustedImmPtr(0), GPRInfo::regT0);
353
354         for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
355             move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
356             loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
357         }
358         for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
359 #if USE(JSVALUE64)
360             load64(buffer + i, GPRInfo::toRegister(i));
361 #else
362             load32(buffer + i, GPRInfo::toRegister(i));
363 #endif
364         }
365     }
366
367     // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
368 #if !ASSERT_DISABLED
369     void jitAssertIsInt32(GPRReg);
370     void jitAssertIsJSInt32(GPRReg);
371     void jitAssertIsJSNumber(GPRReg);
372     void jitAssertIsJSDouble(GPRReg);
373     void jitAssertIsCell(GPRReg);
374     void jitAssertHasValidCallFrame();
375     void jitAssertIsNull(GPRReg);
376     void jitAssertTagsInPlace();
377     void jitAssertArgumentCountSane();
378 #else
379     void jitAssertIsInt32(GPRReg) { }
380     void jitAssertIsJSInt32(GPRReg) { }
381     void jitAssertIsJSNumber(GPRReg) { }
382     void jitAssertIsJSDouble(GPRReg) { }
383     void jitAssertIsCell(GPRReg) { }
384     void jitAssertHasValidCallFrame() { }
385     void jitAssertIsNull(GPRReg) { }
386     void jitAssertTagsInPlace() { }
387     void jitAssertArgumentCountSane() { }
388 #endif
389
390     Jump genericWriteBarrier(GPRReg owner)
391     {
392         return branchTest8(Zero, Address(owner, JSCell::gcDataOffset()));
393     }
394
395     // These methods convert between doubles, and doubles boxed and JSValues.
396 #if USE(JSVALUE64)
397     GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
398     {
399         moveDoubleTo64(fpr, gpr);
400         sub64(GPRInfo::tagTypeNumberRegister, gpr);
401         jitAssertIsJSDouble(gpr);
402         return gpr;
403     }
404     FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
405     {
406         jitAssertIsJSDouble(gpr);
407         add64(GPRInfo::tagTypeNumberRegister, gpr);
408         move64ToDouble(gpr, fpr);
409         return fpr;
410     }
411     
412     // Here are possible arrangements of source, target, scratch:
413     // - source, target, scratch can all be separate registers.
414     // - source and target can be the same but scratch is separate.
415     // - target and scratch can be the same but source is separate.
416     void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch)
417     {
418         // Is it an int32?
419         signExtend32ToPtr(source, scratch);
420         Jump isInt32 = branch64(Equal, source, scratch);
421         
422         // Nope, it's not, but regT0 contains the int64 value.
423         convertInt64ToDouble(source, fpScratch);
424         boxDouble(fpScratch, target);
425         Jump done = jump();
426         
427         isInt32.link(this);
428         zeroExtend32ToPtr(source, target);
429         or64(GPRInfo::tagTypeNumberRegister, target);
430         
431         done.link(this);
432     }
433 #endif
434
435 #if USE(JSVALUE32_64)
436     void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
437     {
438         moveDoubleToInts(fpr, payloadGPR, tagGPR);
439     }
440     void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
441     {
442         moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
443     }
444 #endif
445     
446     enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
447     Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
448     {
449 #if USE(JSVALUE64)
450         return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
451 #elif USE(JSVALUE32_64)
452         return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
453 #endif
454     }
455
456 #if ENABLE(SAMPLING_COUNTERS)
457     static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
458     {
459         jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
460     }
461     void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
462     {
463         add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
464     }
465 #endif
466
467 #if ENABLE(SAMPLING_FLAGS)
468     void setSamplingFlag(int32_t);
469     void clearSamplingFlag(int32_t flag);
470 #endif
471
472     JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
473     {
474         return codeBlock()->globalObjectFor(codeOrigin);
475     }
476     
477     bool isStrictModeFor(CodeOrigin codeOrigin)
478     {
479         if (!codeOrigin.inlineCallFrame)
480             return codeBlock()->isStrictMode();
481         return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
482     }
483     
484     ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
485     {
486         return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
487     }
488     
489     ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
490     
491     CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
492     {
493         return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
494     }
495     
496     CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
497     {
498         if (!inlineCallFrame)
499             return baselineCodeBlock();
500         return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
501     }
502     
503     CodeBlock* baselineCodeBlock()
504     {
505         return m_baselineCodeBlock;
506     }
507     
508     VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
509     {
510         if (!inlineCallFrame)
511             return baselineCodeBlock()->argumentsRegister();
512         
513         return VirtualRegister(baselineCodeBlockForInlineCallFrame(
514             inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset);
515     }
516     
517     VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
518     {
519         return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
520     }
521     
522     SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
523     {
524         return baselineCodeBlockFor(codeOrigin)->symbolTable();
525     }
526
527     int offsetOfLocals(const CodeOrigin& codeOrigin)
528     {
529         if (!codeOrigin.inlineCallFrame)
530             return 0;
531         return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
532     }
533
534     int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame)
535     {
536         if (!inlineCallFrame)
537             return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
538         if (inlineCallFrame->arguments.size() <= 1)
539             return 0;
540         ValueRecovery recovery = inlineCallFrame->arguments[1];
541         RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
542         return (recovery.virtualRegister().offset() - 1) * sizeof(Register);
543     }
544     
545     int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
546     {
547         return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame);
548     }
549
550     void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
551     {
552 #if USE(JSVALUE64)
553         load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
554         loadPtr(vm()->heap.structureIDTable().base(), scratch);
555         loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
556 #else
557         UNUSED_PARAM(scratch);
558         loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
559 #endif
560     }
561
562     static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch)
563     {
564 #if USE(JSVALUE64)
565         jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
566         jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch);
567         jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
568 #else
569         UNUSED_PARAM(scratch);
570         jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
571 #endif
572     }
573
574     void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
575     {
576         emitStoreStructureWithTypeInfo(*this, structure, dest);
577     }
578
579     void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
580     {
581 #if USE(JSVALUE64)
582         load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
583         store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
584 #else
585         // Store all the info flags using a single 32-bit wide load and store.
586         load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch);
587         store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
588
589         // Store the StructureID
590         storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
591 #endif
592     }
593
594     static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
595     {
596         const Structure* structurePtr = static_cast<const Structure*>(structure.m_value);
597 #if USE(JSVALUE64)
598         jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
599 #ifndef NDEBUG
600         Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id()));
601         jit.breakpoint();
602         correctStructure.link(&jit);
603
604         Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()), TrustedImm32(structurePtr->indexingType()));
605         jit.breakpoint();
606         correctIndexingType.link(&jit);
607
608         Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
609         jit.breakpoint();
610         correctType.link(&jit);
611
612         Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
613         jit.breakpoint();
614         correctFlags.link(&jit);
615 #endif
616 #else
617         // Do a 32-bit wide store to initialize the cell's fields.
618         jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
619         jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
620 #endif
621     }
622
623     void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
624     {
625         UNUSED_PARAM(owner);
626         UNUSED_PARAM(scratch1);
627         UNUSED_PARAM(scratch2);
628         UNUSED_PARAM(useKind);
629         ASSERT(owner != scratch1);
630         ASSERT(owner != scratch2);
631         ASSERT(scratch1 != scratch2);
632         
633 #if ENABLE(WRITE_BARRIER_PROFILING)
634         emitCount(WriteBarrierCounters::jitCounterFor(useKind));
635 #endif
636     }
637
638     Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
639     
640 protected:
641     VM* m_vm;
642     CodeBlock* m_codeBlock;
643     CodeBlock* m_baselineCodeBlock;
644
645     HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset>> m_decodedCodeMaps;
646 };
647
648 } // namespace JSC
649
650 #endif // ENABLE(JIT)
651
652 #endif // AssemblyHelpers_h
653