AssemblyHelpers should not have a VM field
[WebKit-https.git] / Source / JavaScriptCore / ftl / FTLLowerDFGToB3.cpp
index 3cf8d2d..a117b20 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -196,11 +196,15 @@ public:
         m_proc.addFastConstant(m_tagTypeNumber->key());
         m_proc.addFastConstant(m_tagMask->key());
         
-        m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(CallFrameSlot::codeBlock));
+        // We don't want the CodeBlock to have a weak pointer to itself because
+        // that would cause it to always get collected.
+        m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
+
+        VM* vm = &this->vm();
 
         // Stack Overflow Check.
         unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
-        MacroAssembler::AbsoluteAddress addressOfStackLimit(vm().addressOfSoftStackLimit());
+        MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
         PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
         CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
         stackOverflowHandler->appendSomeRegister(m_callFrame);
@@ -224,16 +228,16 @@ public:
                     jit.store32(
                         MacroAssembler::TrustedImm32(callSiteIndex.bits()),
                         CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
-                    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+                    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
 
                     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
                     jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
                     CCallHelpers::Call throwCall = jit.call();
 
-                    jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+                    jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
                     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
                     CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
-                    jit.jumpToExceptionHandler();
+                    jit.jumpToExceptionHandler(*vm);
 
                     jit.addLinkTask(
                         [=] (LinkBuffer& linkBuffer) {
@@ -255,6 +259,7 @@ public:
         m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
         for (unsigned i = codeBlock()->numParameters(); i--;) {
             Node* node = m_graph.m_arguments[i];
+            m_out.setOrigin(node);
             VirtualRegister operand = virtualRegisterForArgument(i);
             
             LValue jsValue = m_out.load64(addressFor(operand));
@@ -624,7 +629,7 @@ private:
             compilePutStructure();
             break;
         case TryGetById:
-            compileGetById(AccessType::GetPure);
+            compileGetById(AccessType::TryGet);
             break;
         case GetById:
         case GetByIdFlush:
@@ -706,6 +711,9 @@ private:
         case ArrayPop:
             compileArrayPop();
             break;
+        case ArraySlice:
+            compileArraySlice();
+            break;
         case CreateActivation:
             compileCreateActivation();
             break;
@@ -955,6 +963,9 @@ private:
         case IsTypedArrayView:
             compileIsTypedArrayView();
             break;
+        case ParseInt:
+            compileParseInt();
+            break;
         case TypeOf:
             compileTypeOf();
             break;
@@ -1013,8 +1024,9 @@ private:
         case MaterializeCreateActivation:
             compileMaterializeCreateActivation();
             break;
-        case CheckWatchdogTimer:
-            compileCheckWatchdogTimer();
+        case CheckTraps:
+            if (Options::usePollingTraps())
+                compileCheckTraps();
             break;
         case CreateRest:
             compileCreateRest();
@@ -1068,6 +1080,9 @@ private:
         case ToLowerCase:
             compileToLowerCase();
             break;
+        case NumberToStringWithRadix:
+            compileNumberToStringWithRadix();
+            break;
         case CheckDOM:
             compileCheckDOM();
             break;
@@ -1557,7 +1572,7 @@ private:
         m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
 
         m_out.appendTo(slowCase, continuation);
-        ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationObjectConstructor), m_callFrame, m_out.constIntPtr(globalObject), value));
+        ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationObjectConstructor), m_callFrame, weakPointer(globalObject), value));
         m_out.jump(continuation);
 
         m_out.appendTo(continuation, lastNext);
@@ -2551,7 +2566,8 @@ private:
             LValue value = lowInt52(m_node->child1());
             CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
             blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
-            speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
+            if (shouldCheckNegativeZero(m_node->arithMode()))
+                speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
             setInt52(result);
             break;
         }
@@ -2661,7 +2677,7 @@ private:
             checkStructure(
                 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
                 exitKind, m_node->structureSet(),
-                [&] (Structure* structure) {
+                [&] (RegisteredStructure structure) {
                     return weakStructureID(structure);
                 });
             return;
@@ -2681,7 +2697,7 @@ private:
             checkStructure(
                 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
                 exitKind, m_node->structureSet(),
-                [&] (Structure* structure) {
+                [&] (RegisteredStructure structure) {
                     return weakStructureID(structure);
                 });
             m_out.jump(continuation);
@@ -2795,8 +2811,8 @@ private:
     {
         m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
         
-        Structure* oldStructure = m_node->transition()->previous;
-        Structure* newStructure = m_node->transition()->next;
+        RegisteredStructure oldStructure = m_node->transition()->previous;
+        RegisteredStructure newStructure = m_node->transition()->next;
         ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
         ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
         ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
@@ -2809,7 +2825,7 @@ private:
     
     void compileGetById(AccessType type)
     {
-        ASSERT(type == AccessType::Get || type == AccessType::GetPure);
+        ASSERT(type == AccessType::Get || type == AccessType::TryGet);
         switch (m_node->child1().useKind()) {
         case CellUse: {
             setJSValue(getById(lowCell(m_node->child1()), type));
@@ -2859,10 +2875,40 @@ private:
 
     void compileGetByIdWithThis()
     {
-        LValue base = lowJSValue(m_node->child1());
-        LValue thisValue = lowJSValue(m_node->child2());
-        LValue result = vmCall(Int64, m_out.operation(operationGetByIdWithThis), m_callFrame, base, thisValue, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
-        setJSValue(result);
+        if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
+            setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
+        else {
+            LValue base = lowJSValue(m_node->child1());
+            LValue thisValue = lowJSValue(m_node->child2());
+            
+            LBasicBlock baseCellCase = m_out.newBlock();
+            LBasicBlock notCellCase = m_out.newBlock();
+            LBasicBlock thisValueCellCase = m_out.newBlock();
+            LBasicBlock continuation = m_out.newBlock();
+            
+            m_out.branch(
+                isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
+            
+            LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
+            
+            m_out.branch(
+                isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
+            
+            m_out.appendTo(thisValueCellCase, notCellCase);
+            ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
+            m_out.jump(continuation);
+
+            m_out.appendTo(notCellCase, continuation);
+            ValueFromBlock notCellResult = m_out.anchor(vmCall(
+                Int64, m_out.operation(operationGetByIdWithThis),
+                m_callFrame, base, thisValue,
+                m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
+            m_out.jump(continuation);
+            
+            m_out.appendTo(continuation, lastNext);
+            setJSValue(m_out.phi(Int64, cellResult, notCellResult));
+        }
+        
     }
 
     void compileGetByValWithThis()
@@ -3140,7 +3186,7 @@ private:
             LValue arguments = lowCell(m_node->child1());
             speculate(
                 ExoticObjectMode, noValue(), nullptr,
-                m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides)));
+                m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
             setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
             return;
         }
@@ -3292,7 +3338,7 @@ private:
             
             speculate(
                 ExoticObjectMode, noValue(), nullptr,
-                m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides)));
+                m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
             speculate(
                 ExoticObjectMode, noValue(), nullptr,
                 m_out.aboveOrEqual(
@@ -3860,6 +3906,75 @@ private:
             return;
         }
     }
+
+    void compileArraySlice()
+    {
+        JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+        LValue sourceStorage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
+        LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
+
+        LValue endBoundary;
+        if (m_node->numChildren() == 3)
+            endBoundary = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
+        else {
+            endBoundary = lowInt32(m_graph.varArgChild(m_node, 2));
+            endBoundary = m_out.select(m_out.greaterThanOrEqual(endBoundary, m_out.constInt32(0)),
+                m_out.select(m_out.above(endBoundary, inputLength), inputLength, endBoundary),
+                m_out.select(m_out.lessThan(m_out.add(inputLength, endBoundary), m_out.constInt32(0)), m_out.constInt32(0), m_out.add(inputLength, endBoundary)));
+        }
+
+        LValue startIndex = lowInt32(m_graph.varArgChild(m_node, 1));
+        startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.constInt32(0)),
+            m_out.select(m_out.above(startIndex, inputLength), inputLength, startIndex),
+            m_out.select(m_out.lessThan(m_out.add(inputLength, startIndex), m_out.constInt32(0)), m_out.constInt32(0), m_out.add(inputLength, startIndex)));
+
+        LValue resultLength = m_out.select(m_out.below(startIndex, endBoundary),
+            m_out.sub(endBoundary, startIndex),
+            m_out.constInt32(0));
+
+        ArrayValues arrayResult;
+        {
+            LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
+            indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllArrayTypesAndHistory));
+            // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
+            // to ensure the incoming array is one to be one of the original array structures
+            // with one of the following indexing shapes: Int32, Contiguous, Double.
+            LValue structure = m_out.select(
+                m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
+                weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
+                m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
+                    weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
+                    weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
+            arrayResult = allocateJSArray(resultLength, structure, indexingType, false, false);
+        }
+
+        LBasicBlock loop = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        resultLength = m_out.zeroExtPtr(resultLength);
+        ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
+        ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
+
+        m_out.branch(
+            m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
+
+        LBasicBlock lastNext = m_out.appendTo(loop, continuation);
+        LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
+        LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
+        LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
+        m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
+        LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
+        m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
+        m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
+        m_out.branch(
+            m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
+
+        m_out.appendTo(continuation, lastNext);
+
+        mutatorFence();
+        setJSValue(arrayResult.array);
+    }
     
     void compileArrayPop()
     {
@@ -3922,13 +4037,13 @@ private:
     {
         LValue scope = lowCell(m_node->child1());
         SymbolTable* table = m_node->castOperand<SymbolTable*>();
-        Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
+        RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
         JSValue initializationValue = m_node->initializationValueForActivation();
         ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
         if (table->singletonScope()->isStillValid()) {
             LValue callResult = vmCall(
                 Int64,
-                m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
+                m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
                 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
             setJSValue(callResult);
             return;
@@ -3959,11 +4074,12 @@ private:
         m_out.jump(continuation);
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateActivationDirect, locations[0].directGPR(),
-                    CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(),
+                    CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(table),
                     CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
             },
@@ -3993,10 +4109,10 @@ private:
             return;
         }
         
-        Structure* structure =
+        RegisteredStructure structure = m_graph.registerStructure(
             isGeneratorFunction ? m_graph.globalObjectFor(m_node->origin.semantic)->generatorFunctionStructure() :
             isAsyncFunction ? m_graph.globalObjectFor(m_node->origin.semantic)->asyncFunctionStructure() :
-            m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure();
+            m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure());
         
         LBasicBlock slowPath = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
@@ -4023,21 +4139,22 @@ private:
 
         Vector<LValue> slowPathArguments;
         slowPathArguments.append(scope);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
                 if (isGeneratorFunction) {
-                    return createLazyCallGenerator(
+                    return createLazyCallGenerator(vm,
                         operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint,
                         locations[0].directGPR(), locations[1].directGPR(),
                         CCallHelpers::TrustedImmPtr(executable));
                 }
                 if (isAsyncFunction) {
-                    return createLazyCallGenerator(
+                    return createLazyCallGenerator(vm,
                         operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint,
                         locations[0].directGPR(), locations[1].directGPR(),
                         CCallHelpers::TrustedImmPtr(executable));
                 }
-                return createLazyCallGenerator(
+                return createLazyCallGenerator(vm,
                     operationNewFunctionWithInvalidatedReallocationWatchpoint,
                     locations[0].directGPR(), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(executable));
@@ -4056,8 +4173,8 @@ private:
         // them be explicit arguments to this node.
         // https://bugs.webkit.org/show_bug.cgi?id=142207
         
-        Structure* structure =
-            m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure();
+        RegisteredStructure structure =
+            m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
         
         unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
         
@@ -4088,17 +4205,19 @@ private:
         
         m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
         m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
-        m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_overrides);
+        m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
+        m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
         
         ValueFromBlock fastResult = m_out.anchor(fastObject);
         m_out.jump(continuation);
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateDirectArguments, locations[0].directGPR(),
-                    CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(),
+                    CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImm32(minCapacity));
             }, length.value);
         ValueFromBlock slowResult = m_out.anchor(callResult);
@@ -4186,7 +4305,7 @@ private:
             LValue arrayLength = lowInt32(m_node->child1());
             LBasicBlock loopStart = m_out.newBlock();
             JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-            Structure* structure = globalObject->restParameterStructure();
+            RegisteredStructure structure = m_graph.registerStructure(globalObject->restParameterStructure());
             ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
             LValue array = arrayValues.array;
             LValue butterfly = arrayValues.butterfly;
@@ -4254,8 +4373,8 @@ private:
             speculate(m_graph.varArgChild(m_node, operandIndex));
         
         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-        Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
-            m_node->indexingType());
+        RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+            m_node->indexingType()));
 
         if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
             unsigned numElements = m_node->numChildren();
@@ -4300,7 +4419,7 @@ private:
         if (!m_node->numChildren()) {
             setJSValue(vmCall(
                 Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
-                m_out.constIntPtr(structure)));
+                weakStructure(structure)));
             return;
         }
         
@@ -4321,7 +4440,7 @@ private:
         
         LValue result = vmCall(
             Int64, m_out.operation(operationNewArray), m_callFrame,
-            m_out.constIntPtr(structure), m_out.constIntPtr(buffer),
+            weakStructure(structure), m_out.constIntPtr(buffer),
             m_out.constIntPtr(m_node->numChildren()));
         
         m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->activeLengthPtr()));
@@ -4346,6 +4465,7 @@ private:
             for (unsigned i = 0; i < m_node->numChildren(); ++i) {
                 if (bitVector->get(i)) {
                     Edge use = m_graph.varArgChild(m_node, i);
+                    CheckValue* lengthCheck = nullptr;
                     if (use->op() == PhantomSpread) {
                         RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
                         InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
@@ -4353,15 +4473,17 @@ private:
                         LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
                             return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
                         }).iterator->value;
-                        length = m_out.add(length, spreadLength);
+                        lengthCheck = m_out.speculateAdd(length, spreadLength);
                     } else {
                         LValue fixedArray = lowCell(use);
-                        length = m_out.add(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
+                        lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
                     }
+                    blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
+                    length = lengthCheck;
                 }
             }
 
-            Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous);
+            RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
             ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
             LValue result = arrayValues.array;
             LValue storage = arrayValues.butterfly;
@@ -4583,8 +4705,8 @@ private:
     void compileNewArrayBuffer()
     {
         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-        Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
-            m_node->indexingType());
+        RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+            m_node->indexingType()));
         
         if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
             unsigned numElements = m_node->numConstants();
@@ -4613,7 +4735,7 @@ private:
         
         setJSValue(vmCall(
             Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
-            m_out.constIntPtr(structure), m_out.constIntPtr(m_node->startConstant()),
+            weakStructure(structure), m_out.constIntPtr(m_node->startConstant()),
             m_out.constIntPtr(m_node->numConstants())));
     }
 
@@ -4622,24 +4744,22 @@ private:
         LValue publicLength = lowInt32(m_node->child1());
         
         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-        Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
-            m_node->indexingType());
+        RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+            m_node->indexingType()));
         
         if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
+            IndexingType indexingType = m_node->indexingType();
             setJSValue(
                 allocateJSArray(
-                    publicLength,
-                    globalObject->arrayStructureForIndexingTypeDuringAllocation(
-                        m_node->indexingType())).array);
+                    publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
             mutatorFence();
             return;
         }
         
         LValue structureValue = m_out.select(
             m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
-            m_out.constIntPtr(
-                globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)),
-            m_out.constIntPtr(structure));
+            weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
+            weakStructure(structure));
         setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
     }
 
@@ -4650,7 +4770,7 @@ private:
         
         switch (m_node->child1().useKind()) {
         case Int32Use: {
-            Structure* structure = globalObject->typedArrayStructureConcurrently(type);
+            RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(type));
 
             LValue size = lowInt32(m_node->child1());
 
@@ -4679,8 +4799,7 @@ private:
                     m_out.constIntPtr(~static_cast<intptr_t>(7)));
             }
         
-            LValue allocator = allocatorForSize(
-                vm().heap.subspaceForAuxiliaryData(), byteSize, slowCase);
+            LValue allocator = allocatorForSize(vm().auxiliarySpace, byteSize, slowCase);
             LValue storage = allocateHeapCell(allocator, slowCase);
             
             splatWords(
@@ -4706,11 +4825,12 @@ private:
             m_out.appendTo(slowCase, continuation);
             LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
 
+            VM& vm = this->vm();
             LValue slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationNewTypedArrayWithSizeForType(type), locations[0].directGPR(),
-                        CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(),
+                        CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                         locations[2].directGPR());
                 },
                 size, storageValue);
@@ -4742,7 +4862,7 @@ private:
     void compileAllocatePropertyStorage()
     {
         LValue object = lowCell(m_node->child1());
-        setStorage(allocatePropertyStorage(object, m_node->transition()->previous));
+        setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
     }
 
     void compileReallocatePropertyStorage()
@@ -4753,7 +4873,7 @@ private:
         
         setStorage(
             reallocatePropertyStorage(
-                object, oldStorage, transition->previous, transition->next));
+                object, oldStorage, transition->previous.get(), transition->next.get()));
     }
     
     void compileNukeStructureAndSetButterfly()
@@ -4826,10 +4946,13 @@ private:
         }
             
         case CellUse:
+        case NotCellUse:
         case UntypedUse: {
             LValue value;
             if (m_node->child1().useKind() == CellUse)
                 value = lowCell(m_node->child1());
+            else if (m_node->child1().useKind() == NotCellUse)
+                value = lowNotCell(m_node->child1());
             else
                 value = lowJSValue(m_node->child1());
             
@@ -4840,6 +4963,8 @@ private:
             LValue isCellPredicate;
             if (m_node->child1().useKind() == CellUse)
                 isCellPredicate = m_out.booleanTrue;
+            else if (m_node->child1().useKind() == NotCellUse)
+                isCellPredicate = m_out.booleanFalse;
             else
                 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
             m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
@@ -4866,6 +4991,18 @@ private:
             setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
             return;
         }
+
+        case Int32Use:
+            setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
+            return;
+
+        case Int52RepUse:
+            setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
+            return;
+
+        case DoubleRepUse:
+            setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
+            return;
             
         default:
             DFG_CRASH(m_graph, m_node, "Bad use kind");
@@ -4921,8 +5058,7 @@ private:
         
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
         
-        MarkedAllocator* allocator =
-            vm().heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
+        MarkedAllocator* allocator = subspaceFor<JSRopeString>(vm())->allocatorFor(sizeof(JSRopeString));
         DFG_ASSERT(m_graph, m_node, allocator);
         
         LValue result = allocateCell(
@@ -4953,19 +5089,20 @@ private:
         
         m_out.appendTo(slowPath, continuation);
         LValue slowResultValue;
+        VM& vm = this->vm();
         switch (numKids) {
         case 2:
             slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
                         locations[2].directGPR());
                 }, kids[0], kids[1]);
             break;
         case 3:
             slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
                         locations[2].directGPR(), locations[3].directGPR());
                 }, kids[0], kids[1], kids[2]);
@@ -5216,11 +5353,11 @@ private:
         LBasicBlock continuation = m_out.newBlock();
         
         Vector<SwitchCase, 2> cases;
-        StructureSet baseSet;
+        RegisteredStructureSet baseSet;
         for (unsigned i = data.cases.size(); i--;) {
             MultiGetByOffsetCase getCase = data.cases[i];
             for (unsigned j = getCase.set().size(); j--;) {
-                Structure* structure = getCase.set()[j];
+                RegisteredStructure structure = getCase.set()[j];
                 baseSet.add(structure);
                 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
             }
@@ -5298,11 +5435,11 @@ private:
         LBasicBlock continuation = m_out.newBlock();
         
         Vector<SwitchCase, 2> cases;
-        StructureSet baseSet;
+        RegisteredStructureSet baseSet;
         for (unsigned i = data.variants.size(); i--;) {
             PutByIdVariant variant = data.variants[i];
             for (unsigned j = variant.oldStructure().size(); j--;) {
-                Structure* structure = variant.oldStructure()[j];
+                RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
                 baseSet.add(structure);
                 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
             }
@@ -5343,7 +5480,7 @@ private:
                 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
                 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
                 m_out.store32(
-                    weakStructureID(variant.newStructure()), base, m_heaps.JSCell_structureID);
+                    weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
             }
             
             m_out.jump(continuation);
@@ -5382,9 +5519,10 @@ private:
         
         LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
 
+        VM& vm = this->vm();
         lazySlowPath(
-            [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
             });
         m_out.jump(continuation);
@@ -5901,7 +6039,7 @@ private:
         bool isConstruct = node->op() == DirectConstruct;
         
         ExecutableBase* executable = node->castOperand<ExecutableBase*>();
-        FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(executable);
+        FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
         
         unsigned numPassedArgs = node->numChildren() - 1;
         unsigned numAllocatedArgs = numPassedArgs;
@@ -5992,6 +6130,7 @@ private:
                     }
                     for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
                         shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
+                    shuffleData.numPassedArgs = numPassedArgs;
                     shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
                     
                     CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
@@ -6158,6 +6297,8 @@ private:
                 for (unsigned i = 0; i < numArgs; ++i)
                     shuffleData.args.append(params[1 + i].recoveryForJSValue());
 
+                shuffleData.numPassedArgs = numArgs;
+                
                 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
 
                 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
@@ -6315,11 +6456,7 @@ private:
                 auto callWithExceptionCheck = [&] (void* callee) {
                     jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
                     jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                    exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
-                };
-
-                auto adjustStack = [&] (GPRReg amount) {
-                    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), amount, CCallHelpers::stackPointerRegister);
+                    exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 };
 
                 CCallHelpers::JumpList slowCase;
@@ -6341,6 +6478,9 @@ private:
                     jit.lshiftPtr(CCallHelpers::Imm32(3), scratchGPR1);
                     jit.addPtr(GPRInfo::callFrameRegister, scratchGPR1);
 
+                    // Before touching stack values, we should update the stack pointer to protect them from signal stack.
+                    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
+
                     jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
 
                     int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
@@ -6387,8 +6527,6 @@ private:
                     
                     dontThrow.link(&jit);
                 }
-
-                adjustStack(scratchGPR1);
                 
                 ASSERT(calleeGPR == GPRInfo::regT0);
                 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
@@ -6625,11 +6763,7 @@ private:
                 auto callWithExceptionCheck = [&] (void* callee) {
                     jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
                     jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                    exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
-                };
-
-                auto adjustStack = [&] (GPRReg amount) {
-                    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), amount, CCallHelpers::stackPointerRegister);
+                    exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 };
 
                 unsigned originalStackHeight = params.proc().frameSize();
@@ -6643,7 +6777,9 @@ private:
                         inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
                     else
                         inlineCallFrame = node->origin.semantic.inlineCallFrame;
-                    emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
+
+                    // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
+                    emitSetupVarargsFrameFastCase(state->vm(), jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
 
                     CCallHelpers::Jump done = jit.jump();
                     slowCase.link(&jit);
@@ -6652,8 +6788,6 @@ private:
                     jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
                     
                     done.link(&jit);
-
-                    adjustStack(scratchGPR2);
                 } else {
                     jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
                     jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
@@ -6667,7 +6801,7 @@ private:
                     jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
                     callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
                     
-                    adjustStack(GPRInfo::returnValueGPR);
+                    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
 
                     calleeLateRep.emitRestore(jit, GPRInfo::regT0);
 
@@ -6816,7 +6950,7 @@ private:
                 jit.setupArgumentsWithExecState(GPRInfo::regT1);
                 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR);
                 jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+                exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 
                 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
                 
@@ -7673,11 +7807,11 @@ private:
         LValue index = m_out.bitAnd(mask, unmaskedIndex);
         LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
         ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
-        m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue())),
+        m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
             unsure(notPresentInTable), unsure(notEmptyValue));
 
         m_out.appendTo(notEmptyValue, notDeletedValue);
-        m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue())),
+        m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
             unsure(loopAround), unsure(notDeletedValue));
 
         m_out.appendTo(notDeletedValue, loopAround);
@@ -7859,9 +7993,10 @@ private:
             rarely(slowPath), usually(continuation));
         
         m_out.appendTo(slowPath, notCellCase);
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationObjectIsObject, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
@@ -7909,9 +8044,10 @@ private:
             rarely(slowPath), usually(continuation));
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationObjectIsFunction, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
@@ -7965,12 +8101,12 @@ private:
     
     void compileIn()
     {
-        DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == CellUse);
+        DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse);
 
         Node* node = m_node;
-        Edge base = node->child2();
+        Edge base = node->child1();
         LValue cell = lowCell(base);
-        if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
+        if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(vm())) {
             if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
                 UniquedStringImpl* str = bitwise_cast<UniquedStringImpl*>(string->tryGetValueImpl());
                 B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
@@ -8000,6 +8136,7 @@ private:
                         stubInfo->codeOrigin = node->origin.semantic;
                         stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
                         stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
+                        stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
                         stubInfo->patch.usedRegisters = params.unavailableRegisters();
 
                         CCallHelpers::PatchableJump jump = jit.patchableJump();
@@ -8042,7 +8179,7 @@ private:
             }
         } 
 
-        setJSValue(vmCall(Int64, m_out.operation(operationGenericIn), m_callFrame, cell, lowJSValue(m_node->child1())));
+        setJSValue(vmCall(Int64, m_out.operation(operationGenericIn), m_callFrame, cell, lowJSValue(m_node->child2())));
     }
 
     void compileHasOwnProperty()
@@ -8147,9 +8284,29 @@ private:
         setBoolean(m_out.phi(Int32, fastResult, slowResult));
     }
 
+    void compileParseInt()
+    {
+        RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
+        LValue result;
+        if (m_node->child2()) {
+            LValue radix = lowInt32(m_node->child2());
+            if (m_node->child1().useKind() == UntypedUse)
+                result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
+            else
+                result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
+        } else {
+            if (m_node->child1().useKind() == UntypedUse)
+                result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
+            else
+                result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
+        }
+        setJSValue(result);
+    }
+
     void compileOverridesHasInstance()
     {
-        JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(m_node->cellOperand()->value());
+        FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
+        ASSERT(defaultHasInstanceFunction->cell()->inherits(vm(), JSFunction::info()));
 
         LValue constructor = lowCell(m_node->child1());
         LValue hasInstance = lowJSValue(m_node->child2());
@@ -8160,7 +8317,7 @@ private:
         // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
 
         ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
-        m_out.branch(m_out.notEqual(hasInstance, m_out.constIntPtr(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
+        m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
 
         LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
         ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
@@ -8276,6 +8433,7 @@ private:
             LValue base = lowCell(m_node->child1());
             LValue index = lowInt32(m_node->child2());
             LValue storage = lowStorage(m_node->child3());
+            LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
 
             IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
                 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
@@ -8301,7 +8459,7 @@ private:
             m_out.appendTo(slowCase, continuation);
             ValueFromBlock slowResult = m_out.anchor(m_out.equal(
                 m_out.constInt64(JSValue::encode(jsBoolean(true))), 
-                vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index)));
+                vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index, internalMethodType)));
             m_out.jump(continuation);
 
             m_out.appendTo(continuation, lastNext);
@@ -8312,6 +8470,7 @@ private:
             LValue base = lowCell(m_node->child1());
             LValue index = lowInt32(m_node->child2());
             LValue storage = lowStorage(m_node->child3());
+            LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
             
             IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
             
@@ -8336,7 +8495,7 @@ private:
             m_out.appendTo(slowCase, continuation);
             ValueFromBlock slowResult = m_out.anchor(m_out.equal(
                 m_out.constInt64(JSValue::encode(jsBoolean(true))), 
-                vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index)));
+                vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index, internalMethodType)));
             m_out.jump(continuation);
             
             m_out.appendTo(continuation, lastNext);
@@ -8508,7 +8667,7 @@ private:
         LValue structure = lowCell(m_node->child1());
         checkStructure(
             structure, noValue(), BadCache, m_node->structureSet(),
-            [this] (Structure* structure) {
+            [this] (RegisteredStructure structure) {
                 return weakStructure(structure);
             });
     }
@@ -8533,7 +8692,7 @@ private:
             }
         }
         
-        const StructureSet& set = m_node->structureSet();
+        RegisteredStructureSet set = m_node->structureSet();
 
         Vector<LBasicBlock, 1> blocks(set.size());
         for (unsigned i = set.size(); i--;)
@@ -8543,7 +8702,7 @@ private:
         
         Vector<SwitchCase, 1> cases(set.size());
         for (unsigned i = set.size(); i--;)
-            cases[i] = SwitchCase(weakStructure(set[i]), blocks[i], Weight(1));
+            cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
         m_out.switchInstruction(
             lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
         
@@ -8554,14 +8713,14 @@ private:
         for (unsigned i = set.size(); i--;) {
             m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
             
-            Structure* structure = set[i];
+            RegisteredStructure structure = set.at(i);
             
             LValue object;
             LValue butterfly;
             
             if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
                 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-                MarkedAllocator* cellAllocator = vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
+                MarkedAllocator* cellAllocator = subspaceFor<JSFinalObject>(vm())->allocatorFor(allocationSize);
                 DFG_ASSERT(m_graph, m_node, cellAllocator);
 
                 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
@@ -8601,7 +8760,7 @@ private:
                 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
                 
                 LValue startOfStorage = allocateHeapCell(
-                    allocatorForSize(vm().heap.subspaceForAuxiliaryData(), butterflySize, slowPath),
+                    allocatorForSize(vm().auxiliarySpace, butterflySize, slowPath),
                     slowPath);
 
                 LValue fastButterflyValue = m_out.add(
@@ -8630,22 +8789,23 @@ private:
                 
                 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
 
+                VM& vm = this->vm();
                 LValue slowObjectValue;
                 if (hasIndexingHeader) {
                     slowObjectValue = lazySlowPath(
-                        [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                            return createLazyCallGenerator(
+                        [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                            return createLazyCallGenerator(vm,
                                 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
-                                locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure),
+                                locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
                                 locations[1].directGPR(), locations[2].directGPR());
                         },
                         vectorLength, butterflyValue);
                 } else {
                     slowObjectValue = lazySlowPath(
-                        [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                            return createLazyCallGenerator(
+                        [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                            return createLazyCallGenerator(vm,
                                 operationNewObjectWithButterfly, locations[0].directGPR(),
-                                CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR());
+                                CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
                         },
                         butterflyValue);
                 }
@@ -8662,7 +8822,7 @@ private:
 
                 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
 
-                initializeArrayElements(structure->indexingType(), m_out.int32Zero, vectorLength, butterfly);
+                initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
 
                 HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
                 Vector<int32_t> indices;
@@ -8817,8 +8977,8 @@ private:
 
         LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
         SymbolTable* table = m_node->castOperand<SymbolTable*>();
-        ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>());
-        Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
+        ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
+        RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
 
         LBasicBlock slowPath = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
@@ -8841,11 +9001,12 @@ private:
         // because all fields will be overwritten.
         // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that 
         // doesn't initialize every slot because we are guaranteed to do that here.
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateActivationDirect, locations[0].directGPR(),
-                    CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(),
+                    CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(table),
                     CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
             }, scope);
@@ -8884,20 +9045,22 @@ private:
         setJSValue(activation);
     }
 
-    void compileCheckWatchdogTimer()
+    void compileCheckTraps()
     {
-        LBasicBlock timerDidFire = m_out.newBlock();
+        ASSERT(Options::usePollingTraps());
+        LBasicBlock needTrapHandling = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
         
-        LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().watchdog()->timerDidFireAddress()));
+        LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().needTrapHandlingAddress()));
         m_out.branch(m_out.isZero32(state),
-            usually(continuation), rarely(timerDidFire));
+            usually(continuation), rarely(needTrapHandling));
 
-        LBasicBlock lastNext = m_out.appendTo(timerDidFire, continuation);
+        LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
 
+        VM& vm = this->vm();
         lazySlowPath(
-            [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(operationHandleWatchdogTimer, InvalidGPRReg);
+            [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
             });
         m_out.jump(continuation);
         
@@ -8970,11 +9133,12 @@ private:
 
     void compileNewRegexp()
     {
-        RegExp* regexp = m_node->castOperand<RegExp*>();
+        FrozenValue* regexp = m_node->cellOperand();
+        ASSERT(regexp->cell()->inherits(vm(), RegExp::info()));
         LValue result = vmCall(
             pointerType(),
             m_out.operation(operationNewRegexp), m_callFrame,
-            m_out.constIntPtr(regexp));
+            frozenPointer(regexp));
         
         setJSValue(result);
     }
@@ -8991,7 +9155,7 @@ private:
             && m_node->child2().useKind() == RegExpObjectUse
             && m_node->child3().useKind() == StringUse) {
 
-            if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>()) {
+            if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
                 if (!replace->length()) {
                     LValue string = lowString(m_node->child1());
                     LValue regExp = lowRegExpObject(m_node->child2());
@@ -9067,10 +9231,12 @@ private:
         CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
         
         m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
-        m_out.storePtr(m_out.constIntPtr(ShadowChicken::Packet::tailMarker()), packet, m_heaps.ShadowChicken_Packet_callee);
+        m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
         m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
         m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
-        m_out.storePtr(m_out.constIntPtr(codeBlock()), packet, m_heaps.ShadowChicken_Packet_codeBlock);
+        // We don't want the CodeBlock to have a weak pointer to itself because
+        // that would cause it to always get collected.
+        m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
         m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
     }
 
@@ -9161,7 +9327,7 @@ private:
     template<typename Functor>
     void checkStructure(
         LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
-        const StructureSet& set, const Functor& weakStructureDiscriminant)
+        RegisteredStructureSet set, const Functor& weakStructureDiscriminant)
     {
         if (set.isEmpty()) {
             terminate(exitKind);
@@ -9296,16 +9462,18 @@ private:
             speculate(BadType, jsValueValue(value), edge.node(), isNotSymbol(value, provenType(edge)));
             return;
 
-        case InferredType::ObjectWithStructure:
+        case InferredType::ObjectWithStructure: {
+            RegisteredStructure structure = m_graph.registerStructure(type.structure());
             speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge)));
-            if (!abstractValue(edge).m_structure.isSubsetOf(StructureSet(type.structure()))) {
+            if (!abstractValue(edge).m_structure.isSubsetOf(RegisteredStructureSet(structure))) {
                 speculate(
                     BadType, jsValueValue(value), edge.node(),
                     m_out.notEqual(
                         m_out.load32(value, m_heaps.JSCell_structureID),
-                        weakStructureID(type.structure())));
+                        weakStructureID(structure)));
             }
             return;
+        }
 
         case InferredType::ObjectWithStructureOrOther: {
             LBasicBlock cellCase = m_out.newBlock();
@@ -9316,12 +9484,13 @@ private:
 
             LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
 
-            if (!abstractValue(edge).m_structure.isSubsetOf(StructureSet(type.structure()))) {
+            RegisteredStructure structure = m_graph.registerStructure(type.structure());
+            if (!abstractValue(edge).m_structure.isSubsetOf(RegisteredStructureSet(structure))) {
                 speculate(
                     BadType, jsValueValue(value), edge.node(),
                     m_out.notEqual(
                         m_out.load32(value, m_heaps.JSCell_structureID),
-                        weakStructureID(type.structure())));
+                        weakStructureID(structure)));
             }
 
             m_out.jump(continuation);
@@ -9418,26 +9587,35 @@ private:
         return result;
     }
 
-    void initializeArrayElements(IndexingType indexingType, LValue begin, LValue end, LValue butterfly)
+    void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
     {
-        if (hasUndecided(indexingType))
-            return;
         
         if (begin == end)
             return;
         
-        IndexedAbstractHeap* heap = m_heaps.forIndexingType(indexingType);
-        DFG_ASSERT(m_graph, m_node, heap);
-        
-        LValue hole;
-        if (hasDouble(indexingType))
-            hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
-        else
-            hole = m_out.constInt64(JSValue::encode(JSValue()));
-        
-        splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
+        if (indexingType->hasInt32()) {
+            IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
+            if (hasUndecided(rawIndexingType))
+                return;
+            IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
+            DFG_ASSERT(m_graph, m_node, heap);
+            
+            LValue hole;
+            if (hasDouble(rawIndexingType))
+                hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
+            else
+                hole = m_out.constInt64(JSValue::encode(JSValue()));
+            
+            splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
+        } else {
+            LValue hole = m_out.select(
+                m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
+                m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
+                m_out.constInt64(JSValue::encode(JSValue())));
+            splatWords(butterfly, begin, end, hole, m_heaps.root);
+        }
     }
-    
+
     void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
     {
         const uint64_t unrollingLimit = 10;
@@ -9538,7 +9716,7 @@ private:
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
 
         size_t sizeInBytes = sizeInValues * sizeof(JSValue);
-        MarkedAllocator* allocator = vm().heap.allocatorForAuxiliaryData(sizeInBytes);
+        MarkedAllocator* allocator = vm().auxiliarySpace.allocatorFor(sizeInBytes);
         LValue startOfStorage = allocateHeapCell(m_out.constIntPtr(allocator), slowPath);
         ValueFromBlock fastButterfly = m_out.anchor(
             m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
@@ -9547,17 +9725,18 @@ private:
         m_out.appendTo(slowPath, continuation);
         
         LValue slowButterflyValue;
+        VM& vm = this->vm();
         if (sizeInValues == initialOutOfLineCapacity) {
             slowButterflyValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationAllocateSimplePropertyStorageWithInitialCapacity,
                         locations[0].directGPR());
                 });
         } else {
             slowButterflyValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationAllocateSimplePropertyStorage, locations[0].directGPR(),
                         CCallHelpers::TrustedImmPtr(sizeInValues));
                 });
@@ -9646,6 +9825,74 @@ private:
 
         return patchpoint;
     }
+    
+    LValue getByIdWithThis(LValue base, LValue thisValue)
+    {
+        Node* node = m_node;
+        UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
+
+        B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+        patchpoint->appendSomeRegister(base);
+        patchpoint->appendSomeRegister(thisValue);
+        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+
+        patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+        RefPtr<PatchpointExceptionHandle> exceptionHandle =
+            preparePatchpointForExceptions(patchpoint);
+
+        State* state = &m_ftlState;
+        patchpoint->setGenerator(
+            [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                AllowMacroScratchRegisterUsage allowScratch(jit);
+
+                CallSiteIndex callSiteIndex =
+                    state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
+
+                // This is the direct exit target for operation calls.
+                Box<CCallHelpers::JumpList> exceptions =
+                    exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+                // This is the exit for call IC's created by the getById for getters. We don't have
+                // to do anything weird other than call this, since it will associate the exit with
+                // the callsite index.
+                exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+                auto generator = Box<JITGetByIdWithThisGenerator>::create(
+                    jit.codeBlock(), node->origin.semantic, callSiteIndex,
+                    params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
+                    JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
+
+                generator->generateFastPath(jit);
+                CCallHelpers::Label done = jit.label();
+
+                params.addLatePath(
+                    [=] (CCallHelpers& jit) {
+                        AllowMacroScratchRegisterUsage allowScratch(jit);
+
+                        J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
+
+                        generator->slowPathJump().link(&jit);
+                        CCallHelpers::Label slowPathBegin = jit.label();
+                        CCallHelpers::Call slowPathCall = callOperation(
+                            *state, params.unavailableRegisters(), jit, node->origin.semantic,
+                            exceptions.get(), optimizationFunction, params[0].gpr(),
+                            CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
+                            params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
+                        jit.jump().linkTo(done, &jit);
+
+                        generator->reportSlowPathCall(slowPathBegin, slowPathCall);
+
+                        jit.addLinkTask(
+                            [=] (LinkBuffer& linkBuffer) {
+                                generator->finalize(linkBuffer);
+                            });
+                    });
+            });
+
+        return patchpoint;
+    }
 
     LValue isFastTypedArray(LValue object)
     {
@@ -9767,6 +10014,30 @@ private:
         setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
     }
 
+    void compileNumberToStringWithRadix()
+    {
+        bool validRadixIsGuaranteed = false;
+        if (m_node->child2()->isInt32Constant()) {
+            int32_t radix = m_node->child2()->asInt32();
+            if (radix >= 2 && radix <= 36)
+                validRadixIsGuaranteed = true;
+        }
+
+        switch (m_node->child1().useKind()) {
+        case Int32Use:
+            setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+            break;
+        case Int52RepUse:
+            setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
+            break;
+        case DoubleRepUse:
+            setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
     void compileResolveScope()
     {
         UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
@@ -10389,37 +10660,69 @@ private:
             object, m_heaps.JSCell_usefulBytes);
     }
 
-    LValue allocateCell(LValue allocator, Structure* structure, LBasicBlock slowPath)
+    void storeStructure(LValue object, LValue structure)
+    {
+        if (structure->hasIntPtr()) {
+            storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
+            return;
+        }
+
+        LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
+        m_out.store32(id, object, m_heaps.JSCell_structureID);
+
+        LValue blob = m_out.load32(structure, m_heaps.Structure_indexingTypeIncludingHistory);
+        m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
+    }
+
+    template <typename StructureType>
+    LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
     {
         LValue result = allocateHeapCell(allocator, slowPath);
         storeStructure(result, structure);
         return result;
     }
 
-    LValue allocateObject(
-        LValue allocator, Structure* structure, LValue butterfly, LBasicBlock slowPath)
+    LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
+    {
+        return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
+    }
+
+    LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
     {
         LValue result = allocateCell(allocator, structure, slowPath);
-        splatWords(
-            result,
-            m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
-            m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + structure->inlineCapacity()),
-            m_out.int64Zero,
-            m_heaps.properties.atAnyNumber());
+        if (structure->hasIntPtr()) {
+            splatWords(
+                result,
+                m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+                m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
+                m_out.int64Zero,
+                m_heaps.properties.atAnyNumber());
+        } else {
+            LValue end = m_out.add(
+                m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+                m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
+            splatWords(
+                result,
+                m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+                end,
+                m_out.int64Zero,
+                m_heaps.properties.atAnyNumber());
+        }
+        
         m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
         return result;
     }
     
-    template<typename ClassType>
+    template<typename ClassType, typename StructureType>
     LValue allocateObject(
-        size_t size, Structure* structure, LValue butterfly, LBasicBlock slowPath)
+        size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
     {
-        MarkedAllocator* allocator = vm().heap.allocatorForObjectOfType<ClassType>(size);
+        MarkedAllocator* allocator = subspaceFor<ClassType>(vm())->allocatorFor(size);
         return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, slowPath);
     }
     
-    template<typename ClassType>
-    LValue allocateObject(Structure* structure, LValue butterfly, LBasicBlock slowPath)
+    template<typename ClassType, typename StructureType>
+    LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
     {
         return allocateObject<ClassType>(
             ClassType::allocationSize(0), structure, butterfly, slowPath);
@@ -10431,10 +10734,10 @@ private:
         
         // Try to do some constant-folding here.
         if (subspace->hasIntPtr() && size->hasIntPtr()) {
-            MarkedSpace::Subspace* actualSubspace = bitwise_cast<MarkedSpace::Subspace*>(subspace->asIntPtr());
+            Subspace* actualSubspace = bitwise_cast<Subspace*>(subspace->asIntPtr());
             size_t actualSize = size->asIntPtr();
             
-            MarkedAllocator* actualAllocator = MarkedSpace::allocatorFor(*actualSubspace, actualSize);
+            MarkedAllocator* actualAllocator = actualSubspace->allocatorFor(actualSize);
             if (!actualAllocator) {
                 LBasicBlock continuation = m_out.newBlock();
                 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
@@ -10464,21 +10767,21 @@ private:
         
         return m_out.loadPtr(
             m_out.baseIndex(
-                m_heaps.MarkedSpace_Subspace_allocatorForSizeStep,
+                m_heaps.Subspace_allocatorForSizeStep,
                 subspace, m_out.sub(sizeClassIndex, m_out.intPtrOne)));
     }
     
-    LValue allocatorForSize(MarkedSpace::Subspace& subspace, LValue size, LBasicBlock slowPath)
+    LValue allocatorForSize(Subspace& subspace, LValue size, LBasicBlock slowPath)
     {
         return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
     }
     
     template<typename ClassType>
     LValue allocateVariableSizedObject(
-        LValue size, Structure* structure, LValue butterfly, LBasicBlock slowPath)
+        LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
     {
         LValue allocator = allocatorForSize(
-            vm().heap.subspaceForObjectOfType<ClassType>(), size, slowPath);
+            *subspaceFor<ClassType>(vm()), size, slowPath);
         return allocateObject(allocator, structure, butterfly, slowPath);
     }
 
@@ -10487,14 +10790,14 @@ private:
         LValue size, Structure* structure, LBasicBlock slowPath)
     {
         LValue allocator = allocatorForSize(
-            vm().heap.subspaceForObjectOfType<ClassType>(), size, slowPath);
+            *subspaceFor<ClassType>(vm()), size, slowPath);
         return allocateCell(allocator, structure, slowPath);
     }
     
-    LValue allocateObject(Structure* structure)
+    LValue allocateObject(RegisteredStructure structure)
     {
-        size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-        MarkedAllocator* allocator = vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
+        size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
+        MarkedAllocator* allocator = subspaceFor<JSFinalObject>(vm())->allocatorFor(allocationSize);
         
         // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
         // instead of putting it on the slow path.
@@ -10512,11 +10815,12 @@ private:
         
         m_out.appendTo(slowPath, continuation);
 
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNewObject, locations[0].directGPR(),
-                    CCallHelpers::TrustedImmPtr(structure));
+                    CCallHelpers::TrustedImmPtr(structure.get()));
             });
         ValueFromBlock slowResult = m_out.anchor(slowResultValue);
         m_out.jump(continuation);
@@ -10542,15 +10846,17 @@ private:
         LValue butterfly;
     };
 
-    ArrayValues allocateJSArray(LValue publicLength, Structure* structure, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
+    ArrayValues allocateJSArray(LValue publicLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
     {
         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-        IndexingType indexingType = structure->indexingType();
-        ASSERT(
-            hasUndecided(indexingType)
-            || hasInt32(indexingType)
-            || hasDouble(indexingType)
-            || hasContiguous(indexingType));
+        if (indexingType->hasInt32()) {
+            IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
+            ASSERT_UNUSED(type,
+                hasUndecided(type)
+                || hasInt32(type)
+                || hasDouble(type)
+                || hasContiguous(type));
+        }
 
         LBasicBlock fastCase = m_out.newBlock();
         LBasicBlock largeCase = m_out.newBlock();
@@ -10573,12 +10879,12 @@ private:
         m_out.appendTo(fastCase, largeCase);
 
         LValue vectorLength = nullptr;
-        if (publicLength->hasInt32()) {
+        if (publicLength->hasInt32() && structure->hasIntPtr()) {
             unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
             if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
                 vectorLength = m_out.constInt32(
                     Butterfly::optimalContiguousVectorLength(
-                        structure->outOfLineCapacity(), publicLengthConst));
+                        bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst));
             }
         }
         
@@ -10594,8 +10900,7 @@ private:
         LValue butterflySize = m_out.add(
             payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
             
-        LValue allocator = allocatorForSize(
-            vm().heap.subspaceForAuxiliaryData(), butterflySize, failCase);
+        LValue allocator = allocatorForSize(vm().auxiliarySpace, butterflySize, failCase);
         LValue startOfStorage = allocateHeapCell(allocator, failCase);
             
         LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
@@ -10611,28 +10916,28 @@ private:
         ValueFromBlock haveButterfly = m_out.anchor(butterfly);
         
         LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
-            
+
         ValueFromBlock fastResult = m_out.anchor(object);
         ValueFromBlock fastButterfly = m_out.anchor(butterfly);
         m_out.jump(continuation);
         
         m_out.appendTo(largeCase, failCase);
         ValueFromBlock largeStructure = m_out.anchor(
-            m_out.constIntPtr(
-                globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)));
+            weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
         m_out.jump(slowCase);
         
         m_out.appendTo(failCase, slowCase);
-        ValueFromBlock failStructure = m_out.anchor(m_out.constIntPtr(structure));
+        ValueFromBlock failStructure = m_out.anchor(structure);
         m_out.jump(slowCase);
         
         m_out.appendTo(slowCase, continuation);
         LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
         LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
 
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNewArrayWithSize, locations[0].directGPR(),
                     locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
             },
@@ -10648,12 +10953,12 @@ private:
             m_out.phi(pointerType(), fastButterfly, slowButterfly));
     }
     
-    ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, Structure* structure)
+    ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
     {
         bool shouldInitializeElements = false;
         bool shouldLargeArraySizeCreateArrayStorage = false;
         return allocateJSArray(
-            publicLength, structure, shouldInitializeElements,
+            publicLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
             shouldLargeArraySizeCreateArrayStorage);
     }
     
@@ -10795,7 +11100,7 @@ private:
                 m_out.appendTo(masqueradesCase);
                 
                 isTruthyObject = m_out.notEqual(
-                    m_out.constIntPtr(m_graph.globalObjectFor(m_node->origin.semantic)),
+                    weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
                     m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
             }
             results.append(m_out.anchor(isTruthyObject));
@@ -10894,7 +11199,7 @@ private:
             
             results.append(m_out.anchor(
                 m_out.equal(
-                    m_out.constIntPtr(m_graph.globalObjectFor(m_node->origin.semantic)),
+                    weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
                     m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
             m_out.jump(continuation);
         }
@@ -11378,9 +11683,10 @@ private:
         functor(TypeofType::Object);
         
         m_out.appendTo(slowPath, unreachable);
+        VM& vm = this->vm();
         LValue result = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
@@ -11469,7 +11775,7 @@ private:
     
     LValue doubleToInt32(LValue doubleValue)
     {
-        if (Output::hasSensibleDoubleToInt())
+        if (hasSensibleDoubleToInt())
             return sensibleDoubleToInt32(doubleValue);
         
         double limit = pow(2, 31) - 1;
@@ -11489,7 +11795,7 @@ private:
         
         LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
         ValueFromBlock slowResult = m_out.anchor(
-            m_out.call(Int32, m_out.operation(operationToInt32), doubleValue));
+            m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
         m_out.jump(continuation);
         
         m_out.appendTo(continuation, lastNext);
@@ -11706,7 +12012,9 @@ private:
                 terminate(Uncountable);
                 return m_out.int32Zero;
             }
-            return m_out.constInt32(value.asInt32());
+            LValue result = m_out.constInt32(value.asInt32());
+            result->setOrigin(B3::Origin(edge.node()));
+            return result;
         }
         
         LoweredNodeValue value = m_int32Values.get(edge.node());
@@ -11816,12 +12124,14 @@ private:
         DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()));
         
         if (edge->op() == JSConstant) {
-            JSValue value = edge->asJSValue();
-            if (!value.isCell()) {
+            FrozenValue* value = edge->constant();
+            if (!value->value().isCell()) {
                 terminate(Uncountable);
                 return m_out.intPtrZero;
             }
-            return m_out.constIntPtr(value.asCell());
+            LValue result = frozenPointer(value);
+            result->setOrigin(B3::Origin(edge.node()));
+            return result;
         }
         
         LoweredNodeValue value = m_jsValueValues.get(edge.node());
@@ -11914,7 +12224,9 @@ private:
                 terminate(Uncountable);
                 return m_out.booleanFalse;
             }
-            return m_out.constBool(value.asBoolean());
+            LValue result = m_out.constBool(value.asBoolean());
+            result->setOrigin(B3::Origin(edge.node()));
+            return result;
         }
         
         LoweredNodeValue value = m_booleanValues.get(edge.node());
@@ -11954,8 +12266,11 @@ private:
         DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()));
         DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse);
         
-        if (edge->hasConstant())
-            return m_out.constInt64(JSValue::encode(edge->asJSValue()));
+        if (edge->hasConstant()) {
+            LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
+            result->setOrigin(B3::Origin(edge.node()));
+            return result;
+        }
 
         LoweredNodeValue value = m_jsValueValues.get(edge.node());
         if (isValid(value))
@@ -11978,6 +12293,13 @@ private:
         DFG_CRASH(m_graph, m_node, "Value not defined");
         return 0;
     }
+
+    LValue lowNotCell(Edge edge)
+    {
+        LValue result = lowJSValue(edge, ManualOperandSpeculation);
+        FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCell, isCell(result));
+        return result;
+    }
     
     LValue lowStorage(Edge edge)
     {
@@ -12382,6 +12704,13 @@ private:
     {
         lowCell(edge);
     }
+
+    void speculateNotCell(Edge edge)
+    {
+        if (!m_interpreter.needsTypeCheck(edge))
+            return;
+        lowNotCell(edge);
+    }
     
     void speculateCellOrOther(Edge edge)
     {
@@ -12786,10 +13115,10 @@ private:
     
     void speculateStringObjectForStructureID(Edge edge, LValue structureID)
     {
-        Structure* stringObjectStructure =
-            m_graph.globalObjectFor(m_node->origin.semantic)->stringObjectStructure();
-        
-        if (abstractStructure(edge).isSubsetOf(StructureSet(stringObjectStructure)))
+        RegisteredStructure stringObjectStructure =
+            m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->stringObjectStructure());
+
+        if (abstractStructure(edge).isSubsetOf(RegisteredStructureSet(stringObjectStructure)))
             return;
         
         speculate(
@@ -12900,15 +13229,6 @@ private:
         m_out.appendTo(continuation, lastNext);
     }
     
-    void speculateNotCell(Edge edge)
-    {
-        if (!m_interpreter.needsTypeCheck(edge))
-            return;
-        
-        LValue value = lowJSValue(edge, ManualOperandSpeculation);
-        typeCheck(jsValueValue(value), edge, ~SpecCell, isCell(value));
-    }
-    
     void speculateOther(Edge edge)
     {
         if (!m_interpreter.needsTypeCheck(edge))
@@ -12956,8 +13276,13 @@ private:
 
     void emitStoreBarrier(LValue base, bool isFenced)
     {
+        LBasicBlock recheckPath = nullptr;
+        if (isFenced)
+            recheckPath = m_out.newBlock();
         LBasicBlock slowPath = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
+        
+        LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
 
         LValue threshold;
         if (isFenced)
@@ -12967,76 +13292,23 @@ private:
         
         m_out.branch(
             m_out.above(loadCellState(base), threshold),
-            usually(continuation), rarely(slowPath));
-
-        LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
+            usually(continuation), rarely(isFenced ? recheckPath : slowPath));
         
-        // We emit the store barrier slow path lazily. In a lot of cases, this will never fire. And
-        // when it does fire, it makes sense for us to generate this code using our JIT rather than
-        // wasting B3's time optimizing it.
-        PatchpointValue* patchpoint = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                GPRReg baseGPR = locations[1].directGPR();
-
-                return LazySlowPath::createGenerator(
-                    [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
-                        if (isFenced) {
-                            CCallHelpers::Jump noFence = jit.jumpIfMutatorFenceNotNeeded();
-                            jit.memoryFence();
-                            params.doneJumps.append(jit.barrierBranchWithoutFence(baseGPR));
-                            noFence.link(&jit);
-                        }
-                        
-                        RegisterSet usedRegisters = params.lazySlowPath->usedRegisters();
-                        ScratchRegisterAllocator scratchRegisterAllocator(usedRegisters);
-                        scratchRegisterAllocator.lock(baseGPR);
-
-                        GPRReg scratch1 = scratchRegisterAllocator.allocateScratchGPR();
-                        GPRReg scratch2 = scratchRegisterAllocator.allocateScratchGPR();
-
-                        ScratchRegisterAllocator::PreservedState preservedState =
-                            scratchRegisterAllocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
-
-                        // We've already saved these, so when we make a slow path call, we don't have
-                        // to save them again.
-                        usedRegisters.exclude(RegisterSet(scratch1, scratch2));
-
-                        WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer();
-                        jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2);
-                        CCallHelpers::Jump needToFlush = jit.branch32(
-                            CCallHelpers::AboveOrEqual, scratch2,
-                            CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity()));
-
-                        jit.add32(CCallHelpers::TrustedImm32(1), scratch2);
-                        jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress());
-
-                        jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1);
-                        jit.storePtr(
-                            baseGPR,
-                            CCallHelpers::BaseIndex(
-                                scratch1, scratch2, CCallHelpers::ScalePtr,
-                                static_cast<int32_t>(-sizeof(void*))));
-
-                        scratchRegisterAllocator.restoreReusedRegistersByPopping(jit, preservedState);
-
-                        params.doneJumps.append(jit.jump());
+        if (isFenced) {
+            m_out.appendTo(recheckPath, slowPath);
+            
+            m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
+            
+            m_out.branch(
+                m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
+                usually(continuation), rarely(slowPath));
+        }
 
-                        needToFlush.link(&jit);
-                        callOperation(
-                            usedRegisters, jit, params.lazySlowPath->callSiteIndex(),
-                            params.exceptionJumps, operationFlushWriteBarrierBuffer, InvalidGPRReg,
-                            baseGPR);
-                        scratchRegisterAllocator.restoreReusedRegistersByPopping(jit, preservedState);
-                        params.doneJumps.append(jit.jump());
-                    });
-            },
-            base);
+        m_out.appendTo(slowPath, continuation);
         
-        if (isFenced)
-            m_heaps.decoratePatchpointRead(&m_heaps.root, patchpoint);
-        else
-            m_heaps.decoratePatchpointRead(&m_heaps.JSCell_cellState, patchpoint);
-        m_heaps.decoratePatchpointWrite(&m_heaps.JSCell_cellState, patchpoint);
+        LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
+        m_heaps.decorateCCallRead(&m_heaps.root, call);
+        m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
         
         m_out.jump(continuation);
 
@@ -13622,19 +13894,30 @@ private:
 
     LValue weakPointer(JSCell* pointer)
     {
+        // There are weird relationships in how optimized CodeBlocks
+        // point to other CodeBlocks. We don't want to have them be
+        // part of the weak pointer set. For example, an optimized CodeBlock
+        // having a weak pointer to itself will cause it to get collected.
+        RELEASE_ASSERT(!jsDynamicCast<CodeBlock*>(vm(), pointer));
+
         addWeakReference(pointer);
-        return m_out.constIntPtr(pointer);
+        return m_out.weakPointer(m_graph, pointer);
+    }
+
+    LValue frozenPointer(FrozenValue* value)
+    {
+        return m_out.weakPointer(value);
     }
 
-    LValue weakStructureID(Structure* structure)
+    LValue weakStructureID(RegisteredStructure structure)
     {
-        addWeakReference(structure);
         return m_out.constInt32(structure->id());
     }
     
-    LValue weakStructure(Structure* structure)
+    LValue weakStructure(RegisteredStructure structure)
     {
-        return weakPointer(structure);
+        ASSERT(!!structure.get());
+        return m_out.weakPointer(m_graph, structure.get());
     }
     
     TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
@@ -13742,7 +14025,9 @@ private:
         m_out.call(
             Void,
             m_out.constIntPtr(ftlUnreachable),
-            m_out.constIntPtr(codeBlock()), m_out.constInt32(blockIndex),
+            // We don't want the CodeBlock to have a weak pointer to itself because
+            // that would cause it to always get collected.
+            m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
             m_out.constInt32(nodeIndex));
 #endif
         m_out.unreachable();