[BigInt] Add ValueBitRShift into DFG
[WebKit-https.git] / Source / JavaScriptCore / ftl / FTLLowerDFGToB3.cpp
index 2a941af..867bd77 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #include "CodeBlockWithJITType.h"
 #include "DFGAbstractInterpreterInlines.h"
 #include "DFGCapabilities.h"
+#include "DFGDoesGC.h"
 #include "DFGDominators.h"
 #include "DFGInPlaceAbstractState.h"
+#include "DFGLivenessAnalysisPhase.h"
 #include "DFGMayExit.h"
 #include "DFGOSRAvailabilityAnalysisPhase.h"
 #include "DFGOSRExitFuzz.h"
 #include "JSAsyncFunction.h"
 #include "JSAsyncGeneratorFunction.h"
 #include "JSCInlines.h"
+#include "JSGenerator.h"
 #include "JSGeneratorFunction.h"
 #include "JSImmutableButterfly.h"
 #include "JSLexicalEnvironment.h"
 #include "JSMap.h"
 #include "OperandsInlines.h"
+#include "ProbeContext.h"
 #include "RegExpObject.h"
 #include "ScopedArguments.h"
 #include "ScopedArgumentsTable.h"
@@ -154,6 +158,29 @@ public:
         , m_interpreter(state.graph, m_state)
         , m_indexMaskingMode(Options::enableSpectreMitigations() ?  IndexMaskingEnabled : IndexMaskingDisabled)
     {
+        if (Options::validateAbstractInterpreterState()) {
+            performLivenessAnalysis(m_graph);
+
+            // We only use node liveness here, not combined liveness, as we only track
+            // AI state for live nodes.
+            for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
+                NodeSet live;
+
+                for (NodeFlowProjection node : block->ssa->liveAtTail) {
+                    if (node.kind() == NodeFlowProjection::Primary)
+                        live.addVoid(node.node());
+                }
+
+                for (unsigned i = block->size(); i--; ) {
+                    Node* node = block->at(i);
+                    live.remove(node);
+                    m_graph.doToChildren(node, [&] (Edge child) {
+                        live.addVoid(child.node());
+                    });
+                    m_liveInToNode.add(node, live);
+                }
+            }
+        }
     }
     
     void lower()
@@ -233,13 +260,13 @@ public:
         auto preOrder = m_graph.blocksInPreOrder();
 
         m_callFrame = m_out.framePointer();
-        m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
-        m_tagMask = m_out.constInt64(TagMask);
+        m_numberTag = m_out.constInt64(JSValue::NumberTag);
+        m_notCellMask = m_out.constInt64(JSValue::NotCellMask);
 
         // Make sure that B3 knows that we really care about the mask registers. This forces the
         // constants to be materialized in registers.
-        m_proc.addFastConstant(m_tagTypeNumber->key());
-        m_proc.addFastConstant(m_tagMask->key());
+        m_proc.addFastConstant(m_numberTag->key());
+        m_proc.addFastConstant(m_notCellMask->key());
         
         // We don't want the CodeBlock to have a weak pointer to itself because
         // that would cause it to always get collected.
@@ -472,6 +499,8 @@ private:
             crash(m_highBlock, nullptr);
             return;
         }
+
+        m_aiCheckedNodes.clear();
         
         m_availabilityCalculator.beginBlock(m_highBlock);
         
@@ -507,6 +536,133 @@ private:
         }
     }
 
+    void validateAIState(Node* node)
+    {
+        if (!m_graphDump) {
+            StringPrintStream out;
+            m_graph.dump(out);
+            m_graphDump = out.toString();
+        }
+
+        switch (node->op()) {
+        case MovHint:
+        case ZombieHint:
+        case JSConstant:
+        case LazyJSConstant:
+        case DoubleConstant:
+        case Int52Constant:
+        case GetStack:
+        case PutStack:
+        case KillStack:
+        case ExitOK:
+            return;
+        default:
+            break;
+        }
+
+        // Before we execute node.
+        NodeSet& live = m_liveInToNode.find(node)->value;
+        unsigned highParentIndex = node->index();
+        {
+            uint64_t hash = WTF::intHash(highParentIndex);
+            if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
+                return;
+        }
+
+        for (Node* node : live) {
+            if (node->isPhantomAllocation())
+                continue;
+
+            if (node->op() == CheckInBounds)
+                continue;
+
+            AbstractValue value = m_interpreter.forNode(node);
+            {
+                auto iter = m_aiCheckedNodes.find(node);
+                if (iter != m_aiCheckedNodes.end()) {
+                    AbstractValue checkedValue = iter->value;
+                    if (checkedValue == value) {
+                        if (!(value.m_type & SpecCell))
+                            continue;
+                    }
+                }
+                m_aiCheckedNodes.set(node, value);
+            }
+
+            FlushFormat flushFormat;
+            LValue input;
+            if (node->hasJSResult()) {
+                input = lowJSValue(Edge(node, UntypedUse));
+                flushFormat = FlushedJSValue;
+            } else if (node->hasDoubleResult()) {
+                input = lowDouble(Edge(node, DoubleRepUse));
+                flushFormat = FlushedDouble;
+            } else if (node->hasInt52Result()) {
+                input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
+                flushFormat = FlushedInt52;
+            } else
+                continue;
+
+            unsigned highChildIndex = node->index();
+
+            String graphDump = m_graphDump;
+
+            PatchpointValue* patchpoint = m_out.patchpoint(Void);
+            patchpoint->effects = Effects::none();
+            patchpoint->effects.writesLocalState = true;
+            patchpoint->appendSomeRegister(input);
+            patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                GPRReg reg = InvalidGPRReg;
+                FPRReg fpReg = InvalidFPRReg;
+                if (flushFormat == FlushedDouble)
+                    fpReg = params[0].fpr();
+                else
+                    reg = params[0].gpr();
+                jit.probe([=] (Probe::Context& context) {
+                    JSValue input;
+                    double doubleInput;
+
+                    auto dumpAndCrash = [&] {
+                        dataLogLn("Validation failed at node: @", highParentIndex);
+                        dataLogLn("Failed validating live value: @", highChildIndex);
+                        dataLogLn();
+                        dataLogLn("Expected AI value = ", value);
+                        if (flushFormat != FlushedDouble)
+                            dataLogLn("Unexpected value = ", input);
+                        else
+                            dataLogLn("Unexpected double value = ", doubleInput);
+                        dataLogLn();
+                        dataLogLn(graphDump);
+                        CRASH();
+                    };
+
+                    if (flushFormat == FlushedDouble) {
+                        doubleInput = context.fpr(fpReg);
+                        SpeculatedType type;
+                        if (!std::isnan(doubleInput))
+                            type = speculationFromValue(jsDoubleNumber(doubleInput));
+                        else if (isImpureNaN(doubleInput))
+                            type = SpecDoubleImpureNaN;
+                        else
+                            type = SpecDoublePureNaN;
+
+                        if (!value.couldBeType(type))
+                            dumpAndCrash();
+                    } else {
+                        input = JSValue::decode(context.gpr(reg)); 
+                        if (flushFormat == FlushedInt52) {
+                            RELEASE_ASSERT(input.isAnyInt());
+                            input = jsDoubleNumber(input.asAnyInt());
+                        }
+                        if (!value.validateOSREntryValue(input, flushFormat))
+                            dumpAndCrash();
+                    }
+
+                });
+            });
+        }
+    }
+
     bool compileNode(unsigned nodeIndex)
     {
         if (!m_state.isValid()) {
@@ -521,11 +677,17 @@ private:
         if (verboseCompilationEnabled())
             dataLog("Lowering ", m_node, "\n");
         
-        m_availableRecoveries.shrink(0);
-        
         m_interpreter.startExecuting();
         m_interpreter.executeKnownEdgeTypes(m_node);
-        
+
+        if (Options::validateAbstractInterpreterState())
+            validateAIState(m_node);
+
+        if (validateDFGDoesGC) {
+            bool expectDoesGC = doesGC(m_graph, m_node);
+            m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
+        }
+
         switch (m_node->op()) {
         case DFG::Upsilon:
             compileUpsilon();
@@ -619,6 +781,9 @@ private:
         case ArithDiv:
             compileArithDiv();
             break;
+        case ValueMod:
+            compileValueMod();
+            break;
         case ArithMod:
             compileArithMod();
             break;
@@ -629,6 +794,9 @@ private:
         case ArithAbs:
             compileArithAbs();
             break;
+        case ValuePow:
+            compileValuePow();
+            break;
         case ArithPow:
             compileArithPow();
             break;
@@ -659,6 +827,9 @@ private:
         case ArithUnary:
             compileArithUnary();
             break;
+        case ValueBitNot:
+            compileValueBitNot();
+            break;
         case ArithBitNot:
             compileArithBitNot();
             break;
@@ -680,11 +851,17 @@ private:
         case ValueBitXor:
             compileValueBitXor();
             break;
-        case BitRShift:
-            compileBitRShift();
+        case ValueBitRShift:
+            compileValueBitRShift();
+            break;
+        case ArithBitRShift:
+            compileArithBitRShift();
             break;
-        case BitLShift:
-            compileBitLShift();
+        case ArithBitLShift:
+            compileArithBitLShift();
+            break;
+        case ValueBitLShift:
+            compileValueBitLShift();
             break;
         case BitURShift:
             compileBitURShift();
@@ -872,6 +1049,12 @@ private:
         case NewObject:
             compileNewObject();
             break;
+        case NewPromise:
+            compileNewPromise();
+            break;
+        case NewGenerator:
+            compileNewGenerator();
+            break;
         case NewStringObject:
             compileNewStringObject();
             break;
@@ -887,6 +1070,12 @@ private:
         case CreateThis:
             compileCreateThis();
             break;
+        case CreatePromise:
+            compileCreatePromise();
+            break;
+        case CreateGenerator:
+            compileCreateGenerator();
+            break;
         case Spread:
             compileSpread();
             break;
@@ -934,12 +1123,12 @@ private:
         case StringCharCodeAt:
             compileStringCharCodeAt();
             break;
+        case StringCodePointAt:
+            compileStringCodePointAt();
+            break;
         case StringFromCharCode:
             compileStringFromCharCode();
             break;
-        case ObjectToString:
-            compileObjectToString();
-            break;
         case GetByOffset:
         case GetGetterSetterByOffset:
             compileGetByOffset();
@@ -1002,6 +1191,12 @@ private:
         case PutClosureVar:
             compilePutClosureVar();
             break;
+        case GetInternalField:
+            compileGetInternalField();
+            break;
+        case PutInternalField:
+            compilePutInternalField();
+            break;
         case GetFromArguments:
             compileGetFromArguments();
             break;
@@ -1546,7 +1741,7 @@ private:
                 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
 
                 m_out.appendTo(nonDoubleCase, undefinedCase);
-                LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
+                LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(JSValue::ValueUndefined));
                 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
 
                 m_out.appendTo(undefinedCase, testNullCase);
@@ -1554,7 +1749,7 @@ private:
                 m_out.jump(continuation);
 
                 m_out.appendTo(testNullCase, nullCase);
-                LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
+                LValue valueIsNull = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
                 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
 
                 m_out.appendTo(nullCase, testBooleanTrueCase);
@@ -1562,7 +1757,7 @@ private:
                 m_out.jump(continuation);
 
                 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
-                LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
+                LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(JSValue::ValueTrue));
                 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
 
                 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
@@ -1571,7 +1766,7 @@ private:
 
                 m_out.appendTo(convertBooleanFalseCase, continuation);
 
-                LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
+                LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(JSValue::ValueFalse));
                 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
                 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
                 m_out.jump(continuation);
@@ -1721,7 +1916,7 @@ private:
             
             LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
             ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
-                m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
+                m_out.zeroExt(unboxBoolean(value), Int64), m_numberTag));
             m_out.jump(continuation);
             
             m_out.appendTo(continuation, lastNext);
@@ -1762,12 +1957,20 @@ private:
         
         DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
         
-        if (data->format == FlushedDouble)
+        switch (data->format) {
+        case FlushedDouble:
             setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
-        else if (isInt32Speculation(value.m_type))
-            setInt32(m_out.load32(payloadFor(data->machineLocal)));
-        else
-            setJSValue(m_out.load64(addressFor(data->machineLocal)));
+            break;
+        case FlushedInt52:
+            setInt52(m_out.load64(addressFor(data->machineLocal)));
+            break;
+        default:
+            if (isInt32Speculation(value.m_type))
+                setInt32(m_out.load32(payloadFor(data->machineLocal)));
+            else
+                setJSValue(m_out.load64(addressFor(data->machineLocal)));
+            break;
+        }
     }
     
     void compilePutStack()
@@ -1896,11 +2099,11 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
-        const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr();
+        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
         auto repatchingFunction = operationValueAddOptimize;
         auto nonRepatchingFunction = operationValueAdd;
-        compileBinaryMathIC<JITAddGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
+        compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
     }
 
     void compileValueSub()
@@ -1915,11 +2118,11 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
-        const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr();
+        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
         auto repatchingFunction = operationValueSubOptimize;
         auto nonRepatchingFunction = operationValueSub;
-        compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
+        compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
     }
 
     void compileValueMul()
@@ -1934,16 +2137,16 @@ private:
         }
 
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
-        const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr();
+        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
         auto repatchingFunction = operationValueMulOptimize;
         auto nonRepatchingFunction = operationValueMul;
-        compileBinaryMathIC<JITMulGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
+        compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
     }
 
     template <typename Generator, typename Func1, typename Func2,
         typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
-    void compileUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)
+    void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
     {
         Node* node = m_node;
 
@@ -1951,8 +2154,8 @@ private:
 
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(operand);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
         patchpoint->numGPScratchRegisters = 1;
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
@@ -1969,7 +2172,7 @@ private:
 #endif
 
                 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
-                JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile, instruction);
+                JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
                 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
 
                 bool shouldEmitProfiling = false;
@@ -2029,7 +2232,7 @@ private:
 
     template <typename Generator, typename Func1, typename Func2,
         typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
-    void compileBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)
+    void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
     {
         Node* node = m_node;
         
@@ -2042,8 +2245,8 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(left);
         patchpoint->appendSomeRegister(right);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         patchpoint->numGPScratchRegisters = 1;
@@ -2063,7 +2266,7 @@ private:
 #endif
 
                 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
-                JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile, instruction);
+                JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
                 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
                     JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
                     params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
@@ -2162,8 +2365,8 @@ private:
         }
             
         case Int52RepUse: {
-            if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
-                && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
+            if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
+                && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
                 Int52Kind kind;
                 LValue left = lowWhicheverInt52(m_node->child1(), kind);
                 LValue right = lowInt52(m_node->child2(), kind);
@@ -2195,11 +2398,11 @@ private:
             }
 
             CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-            ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
-            const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr();
+            unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+            ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
             auto repatchingFunction = operationValueSubOptimize;
             auto nonRepatchingFunction = operationValueSub;
-            compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
+            compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
             break;
         }
 
@@ -2373,6 +2576,24 @@ private:
         }
     }
     
+    void compileValueMod()
+    {
+        if (m_node->binaryUseKind() == BigIntUse) {
+            LValue left = lowBigInt(m_node->child1());
+            LValue right = lowBigInt(m_node->child2());
+
+            LValue result = vmCall(pointerType(), m_out.operation(operationModBigInt), m_callFrame, left, right);
+            setJSValue(result);
+            return;
+        }
+
+        DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse, m_node->binaryUseKind());
+        LValue left = lowJSValue(m_node->child1());
+        LValue right = lowJSValue(m_node->child2());
+        LValue result = vmCall(Int64, m_out.operation(operationValueMod), m_callFrame, left, right);
+        setJSValue(result);
+    }
+
     void compileArithMod()
     {
         switch (m_node->binaryUseKind()) {
@@ -2528,6 +2749,23 @@ private:
         setDouble(result);
     }
 
+    void compileValuePow()
+    {
+        if (m_node->isBinaryUseKind(BigIntUse)) {
+            LValue base = lowBigInt(m_node->child1());
+            LValue exponent = lowBigInt(m_node->child2());
+            
+            LValue result = vmCall(pointerType(), m_out.operation(operationPowBigInt), m_callFrame, base, exponent);
+            setJSValue(result);
+            return;
+        }
+
+        LValue base = lowJSValue(m_node->child1());
+        LValue exponent = lowJSValue(m_node->child2());
+        LValue result = vmCall(Int64, m_out.operation(operationValuePow), m_callFrame, base, exponent);
+        setJSValue(result);
+    }
+
     void compileArithPow()
     {
         if (m_node->child2().useKind() == Int32Use)
@@ -2701,7 +2939,7 @@ private:
 
         // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
         // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
-        static const double scale = 1.0 / (1ULL << 53);
+        static constexpr double scale = 1.0 / (1ULL << 53);
 
         // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
         // It just reduces the exp part of the given 53bit double integer.
@@ -2720,18 +2958,17 @@ private:
                 LValue value = lowDouble(m_node->child1());
                 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
             } else {
-                LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
+                LBasicBlock shouldRoundDown = m_out.newBlock();
                 LBasicBlock continuation = m_out.newBlock();
 
                 LValue value = lowDouble(m_node->child1());
                 LValue integerValue = m_out.doubleCeil(value);
                 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
 
-                LValue realPart = m_out.doubleSub(integerValue, value);
-
-                m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
+                LValue ceilMinusHalf = m_out.doubleSub(integerValue, m_out.constDouble(0.5));
+                m_out.branch(m_out.doubleGreaterThanOrUnordered(ceilMinusHalf, value), unsure(shouldRoundDown), unsure(continuation));
 
-                LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
+                LBasicBlock lastNext = m_out.appendTo(shouldRoundDown, continuation);
                 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
                 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
                 m_out.jump(continuation);
@@ -2827,11 +3064,11 @@ private:
     {
         DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
         CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
-        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
-        const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr();
+        unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
+        ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
         auto repatchingFunction = operationArithNegateOptimize;
         auto nonRepatchingFunction = operationArithNegate;
-        compileUnaryMathIC<JITNegGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction);
+        compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
     }
 
     void compileArithNegate()
@@ -2857,7 +3094,7 @@ private:
         }
             
         case Int52RepUse: {
-            if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
+            if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
                 Int52Kind kind;
                 LValue value = lowWhicheverInt52(m_node->child1(), kind);
                 LValue result = m_out.neg(value);
@@ -2887,15 +3124,22 @@ private:
         }
     }
     
-    void compileArithBitNot()
+    void compileValueBitNot()
     {
-        if (m_node->child1().useKind() == UntypedUse) {
-            LValue operand = lowJSValue(m_node->child1());
-            LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
+        if (m_node->child1().useKind() == BigIntUse) {
+            LValue operand = lowBigInt(m_node->child1());
+            LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
             setJSValue(result);
             return;
         }
 
+        LValue operand = lowJSValue(m_node->child1());
+        LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
+        setJSValue(result);
+    }
+
+    void compileArithBitNot()
+    {
         setInt32(m_out.bitNot(lowInt32(m_node->child1())));
     }
 
@@ -2956,28 +3200,49 @@ private:
         setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
     }
     
-    void compileBitRShift()
+    void compileValueBitRShift()
     {
-        if (m_node->isBinaryUseKind(UntypedUse)) {
-            emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
+        if (m_node->isBinaryUseKind(BigIntUse)) {
+            LValue left = lowBigInt(m_node->child1());
+            LValue right = lowBigInt(m_node->child2());
+
+            LValue result = vmCall(pointerType(), m_out.operation(operationBitRShiftBigInt), m_callFrame, left, right);
+            setJSValue(result);
             return;
         }
+
+        emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
+    }
+
+    void compileArithBitRShift()
+    {
         setInt32(m_out.aShr(
             lowInt32(m_node->child1()),
             m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
     }
     
-    void compileBitLShift()
+    void compileArithBitLShift()
     {
-        if (m_node->isBinaryUseKind(UntypedUse)) {
-            emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
-            return;
-        }
         setInt32(m_out.shl(
             lowInt32(m_node->child1()),
             m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
     }
     
+    void compileValueBitLShift()
+    {
+        if (m_node->isBinaryUseKind(BigIntUse)) {
+            LValue left = lowBigInt(m_node->child1());
+            LValue right = lowBigInt(m_node->child2());
+            
+            LValue result = vmCall(pointerType(), m_out.operation(operationBitLShiftBigInt), m_callFrame, left, right);
+            setJSValue(result);
+            return;
+        }
+
+        ASSERT(m_node->isBinaryUseKind(UntypedUse));
+        emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
+    }
+
     void compileBitURShift()
     {
         if (m_node->isBinaryUseKind(UntypedUse)) {
@@ -3114,8 +3379,9 @@ private:
         if (!validationEnabled())
             return;
 
+        LValue val = lowJSValue(m_node->child1());
         PatchpointValue* patchpoint = m_out.patchpoint(Void);
-        patchpoint->appendSomeRegister(lowJSValue(m_node->child1()));
+        patchpoint->appendSomeRegister(val);
         patchpoint->setGenerator(
             [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
                 AllowMacroScratchRegisterUsage allowScratch(jit);
@@ -3137,10 +3403,7 @@ private:
     {
         LValue cell = lowCell(m_node->child1());
         speculateFunction(m_node->child1(), cell);
-        setJSValue(
-            m_out.bitXor(
-                m_out.loadPtr(cell, m_heaps.JSFunction_executable),
-                m_out.constIntPtr(JSFunctionPoison::key())));
+        setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
     }
     
     void compileArrayify()
@@ -3571,8 +3834,8 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Void);
         patchpoint->appendSomeRegister(base);
         patchpoint->appendSomeRegister(value);
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
 
         // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
@@ -3651,8 +3914,7 @@ private:
             LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
             ValueFromBlock fastResult = m_out.anchor(fastResultValue);
             
-            m_out.branch(
-                m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath));
+            m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
             
             LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
             
@@ -3669,7 +3931,7 @@ private:
 
         DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
         LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
-        setStorage(caged(Gigacage::Primitive, vector));
+        setStorage(caged(Gigacage::Primitive, vector, cell));
     }
     
     void compileCheckArray()
@@ -3713,14 +3975,15 @@ private:
 
         m_out.appendTo(notNull, continuation);
 
-        LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly));
+        LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly), basePtr);
         LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
 
-        LValue vectorPtr = caged(Gigacage::Primitive, vector);
+        LValue vectorPtr = caged(Gigacage::Primitive, vector, basePtr);
 
         // FIXME: This needs caging.
         // https://bugs.webkit.org/show_bug.cgi?id=175515
         LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
+        dataPtr = removeArrayPtrTag(dataPtr);
 
         ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
 
@@ -3824,7 +4087,23 @@ private:
             
         case Array::String: {
             LValue string = lowCell(m_node->child1());
-            setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
+
+            LBasicBlock ropePath = m_out.newBlock();
+            LBasicBlock nonRopePath = m_out.newBlock();
+            LBasicBlock continuation = m_out.newBlock();
+
+            m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
+
+            LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
+            ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
+            m_out.jump(continuation);
+
+            m_out.appendTo(nonRopePath, continuation);
+            ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
+            m_out.jump(continuation);
+
+            m_out.appendTo(continuation, lastNext);
+            setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
             return;
         }
             
@@ -3839,9 +4118,7 @@ private:
             
         case Array::ScopedArguments: {
             LValue arguments = lowCell(m_node->child1());
-            LValue storage = m_out.bitXor(
-                m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage),
-                m_out.constIntPtr(ScopedArgumentsPoison::key()));
+            LValue storage = m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage);
             speculate(
                 ExoticObjectMode, noValue(), nullptr,
                 m_out.notZero32(m_out.load8ZeroExt32(storage, m_heaps.ScopedArguments_Storage_overrodeThings)));
@@ -3878,6 +4155,10 @@ private:
         speculate(
             OutOfBounds, noValue(), 0,
             m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+
+        // Even though we claim to have JSValue result, no user of us should
+        // depend on our value. Users of this node just need to maintain that
+        // we dominate them.
     }
     
     void compileGetByVal()
@@ -3988,7 +4269,7 @@ private:
             LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
 
             speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
-            setJSValue(m_out.constInt64(ValueUndefined));
+            setJSValue(m_out.constInt64(JSValue::ValueUndefined));
             return;
         }
             
@@ -4039,8 +4320,6 @@ private:
             LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
             
             LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
-            storage = m_out.bitXor(storage, m_out.constIntPtr(ScopedArgumentsPoison::key()));
-            
             LValue totalLength = m_out.load32NonNegative(
                 storage, m_heaps.ScopedArguments_Storage_totalLength);
             speculate(
@@ -4048,8 +4327,6 @@ private:
                 m_out.aboveOrEqual(index, totalLength));
             
             LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
-            table = m_out.bitXor(table, m_out.constIntPtr(ScopedArgumentsPoison::key()));
-            
             LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
             
             LBasicBlock namedCase = m_out.newBlock();
@@ -4062,8 +4339,6 @@ private:
             LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
             
             LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
-            scope = m_out.bitXor(scope, m_out.constIntPtr(ScopedArgumentsPoison::key()));
-            
             LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
             
             TypedPointer address = m_out.baseIndex(
@@ -4165,13 +4440,21 @@ private:
             return;
         }
             
-        default: {
+        case Array::Int8Array:
+        case Array::Int16Array:
+        case Array::Int32Array:
+        case Array::Uint8Array:
+        case Array::Uint8ClampedArray:
+        case Array::Uint16Array:
+        case Array::Uint32Array:
+        case Array::Float32Array:
+        case Array::Float64Array: {
             LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
             LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
             
             TypedArrayType type = m_node->arrayMode().typedArrayType();
-            
-            if (isTypedView(type)) {
+            ASSERT(isTypedView(type));
+            {
                 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
                 
                 if (isInt(type)) {
@@ -4198,15 +4481,21 @@ private:
                 setDouble(result);
                 return;
             }
-            
+        }
+
+        case Array::AnyTypedArray:
+        case Array::ForceExit:
+        case Array::SelectUsingArguments:
+        case Array::SelectUsingPredictions:
+        case Array::Unprofiled:
             DFG_CRASH(m_graph, m_node, "Bad array type");
             return;
-        } }
+        }
     }
     
     void compileGetMyArgumentByVal()
     {
-        InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+        InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
         
         LValue originalIndex = lowInt32(m_node->child2());
         
@@ -4220,13 +4509,15 @@ private:
         
         LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
         LValue indexToCheck = originalIndex;
+        LValue numberOfArgumentsToSkip = m_out.int32Zero;
         if (m_node->numberOfArgumentsToSkip()) {
-            CheckValue* check = m_out.speculateAdd(indexToCheck, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
+            numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
+            CheckValue* check = m_out.speculateAdd(indexToCheck, numberOfArgumentsToSkip);
             blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
             indexToCheck = check;
         }
 
-        LValue isOutOfBounds = m_out.aboveOrEqual(indexToCheck, numberOfArgs);
+        LValue isOutOfBounds = m_out.bitOr(m_out.aboveOrEqual(indexToCheck, numberOfArgs), m_out.below(indexToCheck, numberOfArgumentsToSkip));
         LBasicBlock continuation = nullptr;
         LBasicBlock lastNext = nullptr;
         ValueFromBlock slowResult;
@@ -4369,7 +4660,7 @@ private:
                 }
                 
                 contiguousPutByValOutOfBounds(
-                    codeBlock()->isStrictMode()
+                    m_graph.isStrictModeFor(m_node->origin.semantic)
                         ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
                         : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
                     base, storage, index, value, continuation);
@@ -4395,7 +4686,7 @@ private:
                 }
                 
                 contiguousPutByValOutOfBounds(
-                    codeBlock()->isStrictMode()
+                    m_graph.isStrictModeFor(m_node->origin.semantic)
                         ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
                         : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
                     base, storage, index, value, continuation);
@@ -4435,7 +4726,7 @@ private:
             LValue isOutOfBounds = m_out.aboveOrEqual(
                 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
 
-            auto slowPathFunction = codeBlock()->isStrictMode()
+            auto slowPathFunction = m_graph.isStrictModeFor(m_node->origin.semantic)
                 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
                 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
             if (!arrayMode.isOutOfBounds()) {
@@ -4490,10 +4781,19 @@ private:
             return;
         }
             
-        default: {
+        case Array::Int8Array:
+        case Array::Int16Array:
+        case Array::Int32Array:
+        case Array::Uint8Array:
+        case Array::Uint8ClampedArray:
+        case Array::Uint16Array:
+        case Array::Uint32Array:
+        case Array::Float32Array:
+        case Array::Float64Array: {
             TypedArrayType type = arrayMode.typedArrayType();
             
-            if (isTypedView(type)) {
+            ASSERT(isTypedView(type));
+            {
                 TypedPointer pointer = TypedPointer(
                     m_heaps.typedArrayProperties,
                     m_out.add(
@@ -4546,11 +4846,21 @@ private:
                 
                 return;
             }
+        }
 
+        case Array::AnyTypedArray:
+        case Array::String:
+        case Array::DirectArguments:
+        case Array::ForceExit:
+        case Array::Generic:
+        case Array::ScopedArguments:
+        case Array::SelectUsingArguments:
+        case Array::SelectUsingPredictions:
+        case Array::Undecided:
+        case Array::Unprofiled:
             DFG_CRASH(m_graph, m_node, "Bad array type");
             break;
         }
-        }
     }
 
     void compilePutAccessorById()
@@ -4619,14 +4929,12 @@ private:
                 Output::StoreType storeType;
                 
                 Edge& element = m_graph.varArgChild(m_node, elementOffset);
+                speculate(element);
                 if (m_node->arrayMode().type() != Array::Double) {
                     value = lowJSValue(element, ManualOperandSpeculation);
-                    if (m_node->arrayMode().type() == Array::Int32)
-                        DFG_ASSERT(m_graph, m_node, !m_interpreter.needsTypeCheck(element, SpecInt32Only));
                     storeType = Output::Store64;
                 } else {
                     value = lowDouble(element);
-                    DFG_ASSERT(m_graph, m_node, !m_interpreter.needsTypeCheck(element, SpecDoubleReal));
                     storeType = Output::StoreDouble;
                 }
 
@@ -4665,6 +4973,11 @@ private:
                 return;
             }
 
+            for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
+                Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
+                speculate(element);
+            }
+
             LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
             LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
 
@@ -4701,12 +5014,9 @@ private:
                 Output::StoreType storeType;
                 if (m_node->arrayMode().type() != Array::Double) {
                     value = lowJSValue(element, ManualOperandSpeculation);
-                    if (m_node->arrayMode().type() == Array::Int32)
-                        DFG_ASSERT(m_graph, m_node, !m_interpreter.needsTypeCheck(element, SpecInt32Only));
                     storeType = Output::Store64;
                 } else {
                     value = lowDouble(element);
-                    DFG_ASSERT(m_graph, m_node, !m_interpreter.needsTypeCheck(element, SpecDoubleReal));
                     storeType = Output::StoreDouble;
                 }
 
@@ -4854,6 +5164,7 @@ private:
     {
         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
 
+        LValue sourceArray = lowCell(m_graph.varArgChild(m_node, 0));
         LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
         LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
 
@@ -4879,7 +5190,7 @@ private:
 
         ArrayValues arrayResult;
         {
-            LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
+            LValue indexingType = m_out.load8ZeroExt32(sourceArray, m_heaps.JSCell_indexingTypeAndMisc);
             // We can ignore the writability of the cell since we won't write to the source.
             indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
             // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
@@ -4894,6 +5205,9 @@ private:
             arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
         }
 
+        // Keep the sourceArray alive at least until after anything that can GC.
+        keepAlive(sourceArray);
+
         LBasicBlock loop = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
 
@@ -4991,7 +5305,7 @@ private:
             ValueFromBlock foundResult = m_out.anchor(index);
             switch (searchElementEdge.useKind()) {
             case Int32Use: {
-                // Empty value is ignored because of TagTypeNumber.
+                // Empty value is ignored because of JSValue::NumberTag.
                 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
                 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
                 break;
@@ -5179,7 +5493,7 @@ private:
         RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
         JSValue initializationValue = m_node->initializationValueForActivation();
         ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
-        if (table->singletonScope()->isStillValid()) {
+        if (table->singleton().isStillValid()) {
             LValue callResult = vmCall(
                 Int64,
                 m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
@@ -5240,7 +5554,7 @@ private:
         LValue scope = lowCell(m_node->child1());
         
         FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
-        if (executable->singletonFunction()->isStillValid()) {
+        if (executable->singleton().isStillValid()) {
             LValue callResult =
                 isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
                 isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
@@ -5282,11 +5596,10 @@ private:
         // We don't need memory barriers since we just fast-created the function, so it
         // must be young.
         m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
-        m_out.storePtr(weakPoisonedPointer<JSFunctionPoison>(executable), fastObject, m_heaps.JSFunction_executable);
+        m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
         m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
-        
         mutatorFence();
-        
+
         ValueFromBlock fastResult = m_out.anchor(fastObject);
         m_out.jump(continuation);
         
@@ -5593,6 +5906,57 @@ private:
         mutatorFence();
     }
 
+    void compileNewPromise()
+    {
+        LBasicBlock slowCase = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
+
+        LValue promise;
+        if (m_node->isInternalPromise())
+            promise = allocateObject<JSInternalPromise>(m_node->structure(), m_out.intPtrZero, slowCase);
+        else
+            promise = allocateObject<JSPromise>(m_node->structure(), m_out.intPtrZero, slowCase);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)))), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::Flags)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::ReactionsOrResult)]);
+        mutatorFence();
+        ValueFromBlock fastResult = m_out.anchor(promise);
+        m_out.jump(continuation);
+
+        m_out.appendTo(slowCase, continuation);
+        ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(m_node->isInternalPromise() ? operationNewInternalPromise : operationNewPromise), m_callFrame, frozenPointer(m_graph.freezeStrong(m_node->structure().get()))));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+    }
+
+    void compileNewGenerator()
+    {
+        LBasicBlock slowCase = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
+
+        LValue generator = allocateObject<JSGenerator>(m_node->structure(), m_out.intPtrZero, slowCase);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNull())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::PolyProto)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNumber(static_cast<int32_t>(JSGenerator::GeneratorState::Init)))), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::State)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::Next)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::This)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::Frame)]);
+        mutatorFence();
+        ValueFromBlock fastResult = m_out.anchor(generator);
+        m_out.jump(continuation);
+
+        m_out.appendTo(slowCase, continuation);
+        ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationNewGenerator), m_callFrame, frozenPointer(m_graph.freezeStrong(m_node->structure().get()))));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+    }
+
     void compileNewStringObject()
     {
         RegisteredStructure structure = m_node->structure();
@@ -5604,7 +5968,7 @@ private:
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
 
         LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
-        m_out.storePtr(m_out.constIntPtr(PoisonedClassInfoPtr(StringObject::info()).bits()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
+        m_out.storePtr(m_out.constIntPtr(StringObject::info()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
         m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
         mutatorFence();
         ValueFromBlock fastResult = m_out.anchor(fastResultValue);
@@ -5761,7 +6125,7 @@ private:
                     CheckValue* lengthCheck = nullptr;
                     if (use->op() == PhantomSpread) {
                         if (use->child1()->op() == PhantomCreateRest) {
-                            InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+                            InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
                             unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
                             LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
                                 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
@@ -5780,6 +6144,9 @@ private:
                 }
             }
 
+            LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+            blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
+
             RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
             ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
             LValue result = arrayValues.array;
@@ -5802,7 +6169,7 @@ private:
                             index = m_out.add(index, m_out.constIntPtr(array->length()));
                         } else {
                             RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
-                            InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+                            InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
                             unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
 
                             LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
@@ -5948,6 +6315,115 @@ private:
         setJSValue(result);
     }
 
+    void compileCreatePromise()
+    {
+        JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+        LValue callee = lowCell(m_node->child1());
+
+        LBasicBlock derivedCase = m_out.newBlock();
+        LBasicBlock isFunctionBlock = m_out.newBlock();
+        LBasicBlock hasRareData = m_out.newBlock();
+        LBasicBlock hasStructure = m_out.newBlock();
+        LBasicBlock checkGlobalObjectCase = m_out.newBlock();
+        LBasicBlock fastAllocationCase = m_out.newBlock();
+        LBasicBlock slowCase = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        ValueFromBlock promiseStructure = m_out.anchor(weakStructure(m_graph.registerStructure(m_node->isInternalPromise() ? globalObject->internalPromiseStructure() : globalObject->promiseStructure())));
+        m_out.branch(m_out.equal(callee, weakPointer(m_node->isInternalPromise() ? globalObject->internalPromiseConstructor() : globalObject->promiseConstructor())), unsure(fastAllocationCase), unsure(derivedCase));
+
+        LBasicBlock lastNext = m_out.appendTo(derivedCase, isFunctionBlock);
+        m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowCase));
+
+        m_out.appendTo(isFunctionBlock, hasRareData);
+        LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
+        m_out.branch(m_out.isZero64(rareData), rarely(slowCase), usually(hasRareData));
+
+        m_out.appendTo(hasRareData, hasStructure);
+        LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_internalFunctionAllocationProfile_structure);
+        m_out.branch(m_out.isZero64(structure), rarely(slowCase), usually(hasStructure));
+
+        m_out.appendTo(hasStructure, checkGlobalObjectCase);
+        m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_classInfo), m_out.constIntPtr(m_node->isInternalPromise() ? JSInternalPromise::info() : JSPromise::info())), usually(checkGlobalObjectCase), rarely(slowCase));
+
+        m_out.appendTo(checkGlobalObjectCase, fastAllocationCase);
+        ValueFromBlock derivedStructure = m_out.anchor(structure);
+        m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_globalObject), weakPointer(globalObject)), usually(fastAllocationCase), rarely(slowCase));
+
+        m_out.appendTo(fastAllocationCase, slowCase);
+        LValue promise;
+        if (m_node->isInternalPromise())
+            promise = allocateObject<JSInternalPromise>(m_out.phi(pointerType(), promiseStructure, derivedStructure), m_out.intPtrZero, slowCase);
+        else
+            promise = allocateObject<JSPromise>(m_out.phi(pointerType(), promiseStructure, derivedStructure), m_out.intPtrZero, slowCase);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)))), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::Flags)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::ReactionsOrResult)]);
+        mutatorFence();
+        ValueFromBlock fastResult = m_out.anchor(promise);
+        m_out.jump(continuation);
+
+        m_out.appendTo(slowCase, continuation);
+        ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(m_node->isInternalPromise() ? operationCreateInternalPromise : operationCreatePromise), m_callFrame, callee, weakPointer(globalObject)));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        LValue result = m_out.phi(Int64, fastResult, slowResult);
+
+        setJSValue(result);
+    }
+
+    void compileCreateGenerator()
+    {
+        JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+        LValue callee = lowCell(m_node->child1());
+
+        LBasicBlock isFunctionBlock = m_out.newBlock();
+        LBasicBlock hasRareData = m_out.newBlock();
+        LBasicBlock hasStructure = m_out.newBlock();
+        LBasicBlock checkGlobalObjectCase = m_out.newBlock();
+        LBasicBlock fastAllocationCase = m_out.newBlock();
+        LBasicBlock slowCase = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowCase));
+
+        LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
+        LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
+        m_out.branch(m_out.isZero64(rareData), rarely(slowCase), usually(hasRareData));
+
+        m_out.appendTo(hasRareData, hasStructure);
+        LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_internalFunctionAllocationProfile_structure);
+        m_out.branch(m_out.isZero64(structure), rarely(slowCase), usually(hasStructure));
+
+        m_out.appendTo(hasStructure, checkGlobalObjectCase);
+        m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_classInfo), m_out.constIntPtr(JSGenerator::info())), usually(checkGlobalObjectCase), rarely(slowCase));
+
+        m_out.appendTo(checkGlobalObjectCase, fastAllocationCase);
+        m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_globalObject), weakPointer(globalObject)), usually(fastAllocationCase), rarely(slowCase));
+
+        m_out.appendTo(fastAllocationCase, slowCase);
+        LValue generator = allocateObject<JSGenerator>(structure, m_out.intPtrZero, slowCase);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNull())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::PolyProto)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsNumber(static_cast<int32_t>(JSGenerator::GeneratorState::Init)))), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::State)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::Next)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::This)]);
+        m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), generator, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSGenerator::Field::Frame)]);
+        mutatorFence();
+        ValueFromBlock fastResult = m_out.anchor(generator);
+        m_out.jump(continuation);
+
+        m_out.appendTo(slowCase, continuation);
+        ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCreateGenerator), m_callFrame, callee, weakPointer(globalObject)));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        LValue result = m_out.phi(Int64, fastResult, slowResult);
+
+        setJSValue(result);
+    }
+
     void compileSpread()
     {
         if (m_node->child1()->op() == PhantomNewArrayBuffer) {
@@ -5994,7 +6470,7 @@ private:
             LBasicBlock continuation = m_out.newBlock();
             LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopHeader);
 
-            InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+            InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
             unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
             LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
             LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
@@ -6139,7 +6615,7 @@ private:
             m_out.jump(continuation);
 
             m_out.appendTo(slowPath, continuation);
-            LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), m_out.weakPointer(m_node->cellOperand()));
+            LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), frozenPointer(m_node->cellOperand()));
             ValueFromBlock slowResult = m_out.anchor(slowArray);
             m_out.jump(continuation);
 
@@ -6152,7 +6628,7 @@ private:
         
         setJSValue(vmCall(
             Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
-            weakStructure(structure), m_out.weakPointer(m_node->cellOperand())));
+            weakStructure(structure), frozenPointer(m_node->cellOperand())));
     }
 
     void compileNewArrayWithSize()
@@ -6220,6 +6696,20 @@ private:
                 m_out.int64Zero,
                 m_heaps.typedArrayProperties);
 
+#if CPU(ARM64E)
+            {
+                LValue sizePtr = m_out.zeroExtPtr(size);
+                PatchpointValue* authenticate = m_out.patchpoint(pointerType());
+                authenticate->appendSomeRegister(storage);
+                authenticate->append(sizePtr, B3::ValueRep(B3::ValueRep::SomeLateRegister));
+                authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                    jit.move(params[1].gpr(), params[0].gpr());
+                    jit.tagArrayPtr(params[2].gpr(), params[0].gpr());
+                });
+                storage = authenticate;
+            }
+#endif
+
             ValueFromBlock haveStorage = m_out.anchor(storage);
 
             LValue fastResultValue =
@@ -6425,61 +6915,6 @@ private:
             break;
         }
     }
-
-    void compileObjectToString()
-    {
-        switch (m_node->child1().useKind()) {
-        case OtherUse: {
-            speculate(m_node->child1());
-            LValue source = lowJSValue(m_node->child1(), ManualOperandSpeculation);
-            LValue result = m_out.select(m_out.equal(source, m_out.constInt64(ValueUndefined)),
-                weakPointer(vm().smallStrings.undefinedObjectString()), weakPointer(vm().smallStrings.nullObjectString()));
-            setJSValue(result);
-            return;
-        }
-        case UntypedUse: {
-            LBasicBlock cellCase = m_out.newBlock();
-            LBasicBlock objectCase = m_out.newBlock();
-            LBasicBlock notNullCase = m_out.newBlock();
-            LBasicBlock rareDataCase = m_out.newBlock();
-            LBasicBlock slowCase = m_out.newBlock();
-            LBasicBlock continuation = m_out.newBlock();
-
-            LValue source = lowJSValue(m_node->child1());
-            m_out.branch(isCell(source, provenType(m_node->child1())), unsure(cellCase), unsure(slowCase));
-
-            LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
-            m_out.branch(isObject(source, provenType(m_node->child1()) & SpecCell), unsure(objectCase), unsure(slowCase));
-
-            m_out.appendTo(objectCase, notNullCase);
-            LValue structure = loadStructure(source);
-            LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
-            m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
-
-            m_out.appendTo(notNullCase, rareDataCase);
-            m_out.branch(
-                m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
-                unsure(rareDataCase), unsure(slowCase));
-
-            m_out.appendTo(rareDataCase, slowCase);
-            LValue objectToStringValue = m_out.loadPtr(previousOrRareData, m_heaps.StructureRareData_objectToStringValue);
-            ValueFromBlock fastResult = m_out.anchor(objectToStringValue);
-            m_out.branch(m_out.isNull(objectToStringValue), unsure(slowCase), unsure(continuation));
-
-            m_out.appendTo(slowCase, continuation);
-            LValue slowResultValue = vmCall(pointerType(), m_out.operation(operationObjectToString), m_callFrame, source);
-            ValueFromBlock slowResult = m_out.anchor(slowResultValue);
-            m_out.jump(continuation);
-
-            m_out.appendTo(continuation, lastNext);
-            setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
-            return;
-        }
-        default:
-            DFG_CRASH(m_graph, m_node, "Bad use kind");
-            return;
-        }
-    }
     
     void compileToPrimitive()
     {
@@ -6512,49 +6947,106 @@ private:
     
     void compileMakeRope()
     {
+        struct FlagsAndLength {
+            LValue flags;
+            LValue length;
+        };
+
+        Edge edges[3] = {
+            m_node->child1(),
+            m_node->child2(),
+            m_node->child3(),
+        };
         LValue kids[3];
         unsigned numKids;
-        kids[0] = lowCell(m_node->child1());
-        kids[1] = lowCell(m_node->child2());
-        if (m_node->child3()) {
-            kids[2] = lowCell(m_node->child3());
+        kids[0] = lowCell(edges[0]);
+        kids[1] = lowCell(edges[1]);
+        if (edges[2]) {
+            kids[2] = lowCell(edges[2]);
             numKids = 3;
         } else {
             kids[2] = 0;
             numKids = 2;
         }
         
+        LBasicBlock emptyCase = m_out.newBlock();
         LBasicBlock slowPath = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
         
-        LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
-        
-        Allocator allocator = subspaceFor<JSRopeString>(vm())->allocatorForNonVirtual(sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
+        Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
         
         LValue result = allocateCell(
             m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
         
-        m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSString_value);
-        for (unsigned i = 0; i < numKids; ++i)
-            m_out.storePtr(kids[i], result, m_heaps.JSRopeString_fibers[i]);
-        for (unsigned i = numKids; i < JSRopeString::s_maxInternalRopeLength; ++i)
-            m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSRopeString_fibers[i]);
-        LValue flags = m_out.load16ZeroExt32(kids[0], m_heaps.JSString_flags);
-        LValue length = m_out.load32(kids[0], m_heaps.JSString_length);
+        // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
+        m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
+
+        auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
+            if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
+                return FlagsAndLength {
+                    m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
+                    m_out.constInt32(string->length())
+                };
+            }
+
+            LBasicBlock continuation = m_out.newBlock();
+            LBasicBlock ropeCase = m_out.newBlock();
+            LBasicBlock notRopeCase = m_out.newBlock();
+
+            m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
+
+            LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
+            ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
+            ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
+            m_out.jump(continuation);
+
+            m_out.appendTo(notRopeCase, continuation);
+            LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
+            ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
+            ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
+            m_out.jump(continuation);
+
+            m_out.appendTo(continuation, lastNext);
+            return FlagsAndLength {
+                m_out.phi(Int32, flagsForRope, flagsForNonRope),
+                m_out.phi(Int32, lengthForRope, lengthForNonRope)
+            };
+        };
+
+        FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
         for (unsigned i = 1; i < numKids; ++i) {
-            flags = m_out.bitAnd(flags, m_out.load16ZeroExt32(kids[i], m_heaps.JSString_flags));
-            CheckValue* lengthCheck = m_out.speculateAdd(
-                length, m_out.load32(kids[i], m_heaps.JSString_length));
-            blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
-            length = lengthCheck;
+            auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
+                FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
+                LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
+                CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
+                blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
+                return FlagsAndLength {
+                    flags,
+                    lengthCheck
+                };
+            };
+            flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
         }
-        m_out.store32As16(
-            m_out.bitAnd(m_out.constInt32(JSString::Is8Bit), flags),
-            result, m_heaps.JSString_flags);
-        m_out.store32(length, result, m_heaps.JSString_length);
+
+        m_out.storePtr(
+            m_out.bitOr(
+                m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
+                m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
+            result, m_heaps.JSRopeString_fiber0);
+        m_out.storePtr(
+            m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
+            result, m_heaps.JSRopeString_fiber1);
+        if (numKids == 2)
+            m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
+        else
+            m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
         
         mutatorFence();
         ValueFromBlock fastResult = m_out.anchor(result);
+        m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
+
+        LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
+        ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(m_graph.m_vm)));
         m_out.jump(continuation);
         
         m_out.appendTo(slowPath, continuation);
@@ -6585,7 +7077,7 @@ private:
         m_out.jump(continuation);
         
         m_out.appendTo(continuation, lastNext);
-        setJSValue(m_out.phi(Int64, fastResult, slowResult));
+        setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
     }
     
     void compileStringCharAt()
@@ -6598,15 +7090,14 @@ private:
         LBasicBlock slowPath = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
             
+        LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
         m_out.branch(
             m_out.aboveOrEqual(
-                index, m_out.load32NonNegative(base, m_heaps.JSString_length)),
+                index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
             rarely(slowPath), usually(fastPath));
             
         LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
             
-        LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
-            
         LBasicBlock is8Bit = m_out.newBlock();
         LBasicBlock is16Bit = m_out.newBlock();
         LBasicBlock bitsContinuation = m_out.newBlock();
@@ -6636,7 +7127,7 @@ private:
                 provenValue(m_graph.child(m_node, 1))));
         ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
         m_out.branch(
-            m_out.aboveOrEqual(char16BitValue, m_out.constInt32(0x100)),
+            m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
             rarely(bigCharacter), usually(bitsContinuation));
             
         m_out.appendTo(bigCharacter, bitsContinuation);
@@ -6664,20 +7155,19 @@ private:
             results.append(m_out.anchor(m_out.intPtrZero));
         } else {
             JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
-            
-            bool prototypeChainIsSane = false;
+            Structure* stringPrototypeStructure = globalObject->stringPrototype()->structure(vm());
+            Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(vm());
+            WTF::loadLoadFence();
+
             if (globalObject->stringPrototypeChainIsSane()) {
                 // FIXME: This could be captured using a Speculation mode that means
                 // "out-of-bounds loads return a trivial value", something like
                 // SaneChainOutOfBounds.
                 // https://bugs.webkit.org/show_bug.cgi?id=144668
                 
-                m_graph.registerAndWatchStructureTransition(globalObject->stringPrototype()->structure(vm()));
-                m_graph.registerAndWatchStructureTransition(globalObject->objectPrototype()->structure(vm()));
+                m_graph.registerAndWatchStructureTransition(stringPrototypeStructure);
+                m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
 
-                prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
-            }
-            if (prototypeChainIsSane) {
                 LBasicBlock negativeIndex = m_out.newBlock();
                     
                 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
@@ -6708,12 +7198,12 @@ private:
         LValue index = lowInt32(m_node->child2());
         LValue storage = lowStorage(m_node->child3());
         
+        LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
+
         speculate(
             Uncountable, noValue(), 0,
             m_out.aboveOrEqual(
-                index, m_out.load32NonNegative(base, m_heaps.JSString_length)));
-        
-        LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
+                index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
         
         m_out.branch(
             m_out.testIsZero32(
@@ -6744,6 +7234,64 @@ private:
         setInt32(m_out.phi(Int32, char8Bit, char16Bit));
     }
 
+    void compileStringCodePointAt()
+    {
+        LBasicBlock is8Bit = m_out.newBlock();
+        LBasicBlock is16Bit = m_out.newBlock();
+        LBasicBlock isLeadSurrogate = m_out.newBlock();
+        LBasicBlock mayHaveTrailSurrogate = m_out.newBlock();
+        LBasicBlock hasTrailSurrogate = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+
+        LValue base = lowString(m_node->child1());
+        LValue index = lowInt32(m_node->child2());
+        LValue storage = lowStorage(m_node->child3());
+
+        LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
+        LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
+
+        speculate(Uncountable, noValue(), 0, m_out.aboveOrEqual(index, length));
+
+        m_out.branch(
+            m_out.testIsZero32(
+                m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+                m_out.constInt32(StringImpl::flagIs8Bit())),
+            unsure(is16Bit), unsure(is8Bit));
+
+        LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
+        // FIXME: Need to cage strings!
+        // https://bugs.webkit.org/show_bug.cgi?id=174924
+        ValueFromBlock char8Bit = m_out.anchor(
+            m_out.load8ZeroExt32(m_out.baseIndex(
+                m_heaps.characters8, storage, m_out.zeroExtPtr(index),
+                provenValue(m_node->child2()))));
+        m_out.jump(continuation);
+
+        m_out.appendTo(is16Bit, isLeadSurrogate);
+        LValue leadCharacter = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(index), provenValue(m_node->child2())));
+        ValueFromBlock char16Bit = m_out.anchor(leadCharacter);
+        LValue nextIndex = m_out.add(index, m_out.int32One);
+        m_out.branch(m_out.aboveOrEqual(nextIndex, length), unsure(continuation), unsure(isLeadSurrogate));
+
+        m_out.appendTo(isLeadSurrogate, mayHaveTrailSurrogate);
+        m_out.branch(m_out.notEqual(m_out.bitAnd(leadCharacter, m_out.constInt32(0xfffffc00)), m_out.constInt32(0xd800)), unsure(continuation), unsure(mayHaveTrailSurrogate));
+
+        m_out.appendTo(mayHaveTrailSurrogate, hasTrailSurrogate);
+        JSValue indexValue = provenValue(m_node->child2());
+        JSValue nextIndexValue;
+        if (indexValue && indexValue.isInt32() && indexValue.asInt32() != INT32_MAX)
+            nextIndexValue = jsNumber(indexValue.asInt32() + 1);
+        LValue trailCharacter = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(nextIndex), nextIndexValue));
+        m_out.branch(m_out.notEqual(m_out.bitAnd(trailCharacter, m_out.constInt32(0xfffffc00)), m_out.constInt32(0xdc00)), unsure(continuation), unsure(hasTrailSurrogate));
+
+        m_out.appendTo(hasTrailSurrogate, continuation);
+        ValueFromBlock charSurrogatePair = m_out.anchor(m_out.sub(m_out.add(m_out.shl(leadCharacter, m_out.constInt32(10)), trailCharacter), m_out.constInt32(U16_SURROGATE_OFFSET)));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        setInt32(m_out.phi(Int32, char8Bit, char16Bit, charSurrogatePair));
+    }
+
     void compileStringFromCharCode()
     {
         Edge childEdge = m_node->child1();
@@ -6765,7 +7313,7 @@ private:
         LBasicBlock continuation = m_out.newBlock();
 
         m_out.branch(
-            m_out.aboveOrEqual(value, m_out.constInt32(0xff)),
+            m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
             rarely(slowCase), usually(smallIntCase));
 
         LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
@@ -7099,6 +7647,22 @@ private:
             m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
     }
     
+    void compileGetInternalField()
+    {
+        setJSValue(
+            m_out.load64(
+                lowCell(m_node->child1()),
+                m_heaps.JSInternalFieldObjectImpl_internalFields[m_node->internalFieldIndex()]));
+    }
+
+    void compilePutInternalField()
+    {
+        m_out.store64(
+            lowJSValue(m_node->child2()),
+            lowCell(m_node->child1()),
+            m_heaps.JSInternalFieldObjectImpl_internalFields[m_node->internalFieldIndex()]);
+    }
+
     void compileGetFromArguments()
     {
         setJSValue(
@@ -7131,7 +7695,7 @@ private:
         m_out.jump(continuation);
 
         m_out.appendTo(outOfBounds, continuation);
-        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
+        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueUndefined));
         m_out.jump(continuation);
 
         m_out.appendTo(continuation, lastNext);
@@ -7227,7 +7791,7 @@ private:
 
             speculateString(m_node->child2(), right);
             
-            ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right));
+            ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
             m_out.jump(continuation);
 
             m_out.appendTo(continuation, lastNext);
@@ -7401,7 +7965,7 @@ private:
 
         // Full String compare.
         m_out.appendTo(testStringEquality, continuation);
-        ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue));
+        ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
         m_out.jump(continuation);
 
         // Continuation.
@@ -7572,11 +8136,11 @@ private:
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
-        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
 
         CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
         State* state = &m_ftlState;
@@ -7688,14 +8252,14 @@ private:
         
         if (isTail) {
             // The shuffler needs tags.
-            patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-            patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+            patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+            patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
         }
         
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         if (!isTail) {
             patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
-            patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+            patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
         }
         
         CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
@@ -7760,7 +8324,7 @@ private:
                         CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
                     callLinkInfo->setExecutableDuringCompilation(executable);
                     if (numAllocatedArgs > numPassedArgs)
-                        callLinkInfo->setMaxNumArguments(numAllocatedArgs);
+                        callLinkInfo->setMaxArgumentCountIncludingThis(numAllocatedArgs);
                     
                     jit.addLinkTask(
                         [=] (LinkBuffer& linkBuffer) {
@@ -7794,7 +8358,7 @@ private:
                     node->origin.semantic, InvalidGPRReg);
                 callLinkInfo->setExecutableDuringCompilation(executable);
                 if (numAllocatedArgs > numPassedArgs)
-                    callLinkInfo->setMaxNumArguments(numAllocatedArgs);
+                    callLinkInfo->setMaxArgumentCountIncludingThis(numAllocatedArgs);
                 
                 params.addLatePath(
                     [=] (CCallHelpers& jit) {
@@ -7874,8 +8438,8 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Void);
         patchpoint->appendVector(arguments);
 
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
 
         // Prevent any of the arguments from using the scratch register.
         patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
@@ -7989,7 +8553,7 @@ private:
             }
 
             RELEASE_ASSERT(target->op() == PhantomCreateRest);
-            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
             unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
             LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
                 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
@@ -8009,14 +8573,14 @@ private:
         patchpoint->append(thisArg, ValueRep::WarmAny);
         patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
         patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
 
         RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
 
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
-        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
 
         patchpoint->numGPScratchRegisters = 0;
 
@@ -8152,7 +8716,7 @@ private:
                         }
 
                         RELEASE_ASSERT(target->op() == PhantomCreateRest);
-                        InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+                        InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
 
                         unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
 
@@ -8313,12 +8877,12 @@ private:
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
 
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
-        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
 
         // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
         unsigned minimumJSCallAreaSize =
@@ -8432,9 +8996,9 @@ private:
                     CCallHelpers::JumpList slowCase;
                     InlineCallFrame* inlineCallFrame;
                     if (node->child3())
-                        inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
+                        inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
                     else
-                        inlineCallFrame = node->origin.semantic.inlineCallFrame;
+                        inlineCallFrame = node->origin.semantic.inlineCallFrame();
 
                     // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
                     emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
@@ -8571,11 +9135,11 @@ private:
         
         RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
         
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
-        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
         
         CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
         State* state = &m_ftlState;
@@ -8678,9 +9242,9 @@ private:
         LoadVarargsData* data = m_node->loadVarargsData();
         InlineCallFrame* inlineCallFrame;
         if (m_node->child1())
-            inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+            inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
         else
-            inlineCallFrame = m_node->origin.semantic.inlineCallFrame;
+            inlineCallFrame = m_node->origin.semantic.inlineCallFrame();
 
         LValue length = nullptr; 
         LValue lengthIncludingThis = nullptr;
@@ -8827,7 +9391,7 @@ private:
             }
 
             ASSERT(target->op() == PhantomCreateRest);
-            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
             unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
             spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
                 return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
@@ -8878,7 +9442,7 @@ private:
             }
 
             RELEASE_ASSERT(target->op() == PhantomCreateRest);
-            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame;
+            InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
 
             LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
             LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
@@ -9051,25 +9615,25 @@ private:
             LBasicBlock is16Bit = m_out.newBlock();
             LBasicBlock continuation = m_out.newBlock();
             
+            ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
+            m_out.branch(
+                isRopeString(stringValue, m_node->child1()),
+                rarely(needResolution), usually(resolved));
+
+            LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
+            ValueFromBlock slowValue = m_out.anchor(
+                vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue));
+            m_out.jump(resolved);
+
+            m_out.appendTo(resolved, lengthIs1);
+            LValue value = m_out.phi(pointerType(), fastValue, slowValue);
             m_out.branch(
                 m_out.notEqual(
-                    m_out.load32NonNegative(stringValue, m_heaps.JSString_length),
+                    m_out.load32NonNegative(value, m_heaps.StringImpl_length),
                     m_out.int32One),
                 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
-            
-            LBasicBlock lastNext = m_out.appendTo(lengthIs1, needResolution);
-            Vector<ValueFromBlock, 2> values;
-            LValue fastValue = m_out.loadPtr(stringValue, m_heaps.JSString_value);
-            values.append(m_out.anchor(fastValue));
-            m_out.branch(m_out.isNull(fastValue), rarely(needResolution), usually(resolved));
-            
-            m_out.appendTo(needResolution, resolved);
-            values.append(m_out.anchor(
-                vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue)));
-            m_out.jump(resolved);
-            
-            m_out.appendTo(resolved, is8Bit);
-            LValue value = m_out.phi(pointerType(), values);
+
+            m_out.appendTo(lengthIs1, is8Bit);
             LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
             m_out.branch(
                 m_out.testNonZero32(
@@ -9111,7 +9675,7 @@ private:
             }
                 
             case StringUse: {
-                switchString(data, lowString(m_node->child1()));
+                switchString(data, lowString(m_node->child1()), m_node->child1());
                 return;
             }
                 
@@ -9133,7 +9697,7 @@ private:
                 
                 m_out.appendTo(isStringBlock, lastNext);
                 
-                switchString(data, value);
+                switchString(data, value, m_node->child1());
                 return;
             }
                 
@@ -9229,7 +9793,7 @@ private:
             patchpoint->effects = Effects::forCall();
             patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
             // The low 32-bits of rdtsc go into rax.
-            patchpoint->resultConstraint = ValueRep::reg(X86Registers::eax);
+            patchpoint->resultConstraints = { ValueRep::reg(X86Registers::eax) };
             patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
                 jit.rdtsc();
             });
@@ -9471,17 +10035,16 @@ private:
         return key;
     }
 
-    LValue mapHashString(LValue string)
+    LValue mapHashString(LValue string, Edge& edge)
     {
         LBasicBlock nonEmptyStringCase = m_out.newBlock();
         LBasicBlock slowCase = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
 
-        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
-        m_out.branch(
-            m_out.equal(stringImpl, m_out.constIntPtr(0)), unsure(slowCase), unsure(nonEmptyStringCase));
+        m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
 
         LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
+        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
         LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
         ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
         m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
@@ -9520,7 +10083,7 @@ private:
                 isStringValue, unsure(isString), unsure(notString));
 
             LBasicBlock lastNext = m_out.appendTo(isString, notString);
-            ValueFromBlock stringResult = m_out.anchor(mapHashString(value));
+            ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
             m_out.jump(continuation);
 
             m_out.appendTo(notString, continuation);
@@ -9534,7 +10097,7 @@ private:
 
         case StringUse: {
             LValue string = lowString(m_node->child1());
-            setInt32(mapHashString(string));
+            setInt32(mapHashString(string, m_node->child1()));
             return;
         }
 
@@ -9561,11 +10124,10 @@ private:
             isString, unsure(isStringCase), unsure(straightHash));
 
         m_out.appendTo(isStringCase, nonEmptyStringCase);
-        LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
-        m_out.branch(
-            m_out.equal(stringImpl, m_out.constIntPtr(0)), rarely(slowCase), usually(nonEmptyStringCase));
+        m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
 
         m_out.appendTo(nonEmptyStringCase, straightHash);
+        LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
         LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
         ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
         m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
@@ -9605,19 +10167,21 @@ private:
 
         m_out.appendTo(notInt32NumberCase, notNaNCase);
         LValue doubleValue = unboxDouble(key);
+        ValueFromBlock normalizedNaNResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsNaN())));
         m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
 
         m_out.appendTo(notNaNCase, convertibleCase);
         LValue integerValue = m_out.doubleToInt(doubleValue);
         LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
+        ValueFromBlock doubleResult = m_out.anchor(key);
         m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
 
         m_out.appendTo(convertibleCase, continuation);
-        ValueFromBlock slowResult = m_out.anchor(boxInt32(integerValue));
+        ValueFromBlock boxedIntResult = m_out.anchor(boxInt32(integerValue));
         m_out.jump(continuation);
 
         m_out.appendTo(continuation, lastNext);
-        setJSValue(m_out.phi(Int64, fastResult, slowResult));
+        setJSValue(m_out.phi(Int64, fastResult, normalizedNaNResult, doubleResult, boxedIntResult));
     }
 
     void compileGetMapBucket()
@@ -9758,9 +10322,9 @@ private:
         m_out.appendTo(notPresentInTable, continuation);
         ValueFromBlock notPresentResult;
         if (m_node->child1().useKind() == MapObjectUse)
-            notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket.get()));
+            notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
         else if (m_node->child1().useKind() == SetObjectUse)
-            notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket.get()));
+            notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
         else
             RELEASE_ASSERT_NOT_REACHED();
         m_out.jump(continuation);
@@ -9806,10 +10370,10 @@ private:
         m_out.appendTo(noBucket, hasBucket);
         ValueFromBlock noBucketResult;
         if (m_node->bucketOwnerType() == BucketOwnerType::Map)
-            noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket.get()));
+            noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
         else {
             ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
-            noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket.get()));
+            noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
         }
         m_out.jump(continuation);
 
@@ -10095,8 +10659,8 @@ private:
 
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(base);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
 
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
 
@@ -10160,17 +10724,17 @@ private:
         switch (m_node->child2().useKind()) {
         case StringUse: {
             LBasicBlock isNonEmptyString = m_out.newBlock();
-            LBasicBlock isAtomicString = m_out.newBlock();
+            LBasicBlock isAtomString = m_out.newBlock();
 
             keyAsValue = lowString(m_node->child2());
-            uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
-            m_out.branch(m_out.notNull(uniquedStringImpl), usually(isNonEmptyString), rarely(slowCase));
+            m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
 
-            lastNext = m_out.appendTo(isNonEmptyString, isAtomicString);
-            LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
-            m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomicString));
+            lastNext = m_out.appendTo(isNonEmptyString, isAtomString);
+            uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
+            LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
+            m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomString));
 
-            m_out.appendTo(isAtomicString, slowCase);
+            m_out.appendTo(isAtomString, slowCase);
             break;
         }
         case SymbolUse: {
@@ -10194,12 +10758,12 @@ private:
             m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
 
             m_out.appendTo(isStringCase, isNonEmptyString);
-            LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
-            ValueFromBlock stringResult = m_out.anchor(implFromString);
-            m_out.branch(m_out.notNull(implFromString), usually(isNonEmptyString), rarely(slowCase));
+            m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
 
             m_out.appendTo(isNonEmptyString, notStringCase);
-            LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
+            LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
+            ValueFromBlock stringResult = m_out.anchor(implFromString);
+            LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
             m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
 
             m_out.appendTo(notStringCase, isSymbolCase);
@@ -10219,7 +10783,7 @@ private:
 
         ASSERT(keyAsValue);
 
-        // Note that we don't test if the hash is zero here. AtomicStringImpl's can't have a zero
+        // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero
         // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
         // ever load the result from the cache if the cache entry matches what we are querying for.
         // So we either get super lucky and use zero for the hash and somehow collide with the entity
@@ -10336,10 +10900,10 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(value);
         patchpoint->appendSomeRegister(prototype);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         patchpoint->numGPScratchRegisters = 2;
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
@@ -10446,12 +11010,13 @@ private:
     
     void compileHasIndexedProperty()
     {
+        LValue base = lowCell(m_graph.varArgChild(m_node, 0));
+        LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
+
         switch (m_node->arrayMode().type()) {
         case Array::Int32:
         case Array::Contiguous: {
-            LValue base = lowCell(m_node->child1());
-            LValue index = lowInt32(m_node->child2());
-            LValue storage = lowStorage(m_node->child3());
+            LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
             LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
 
             IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
@@ -10472,7 +11037,7 @@ private:
                 lastNext = m_out.insertNewBlocksBefore(slowCase);
 
             LValue checkHoleResultValue =
-                m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_node->child2())));
+                m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
             ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
             m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
 
@@ -10486,9 +11051,7 @@ private:
             return;
         }
         case Array::Double: {
-            LValue base = lowCell(m_node->child1());
-            LValue index = lowInt32(m_node->child2());
-            LValue storage = lowStorage(m_node->child3());
+            LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
             LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
             
             IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
@@ -10507,7 +11070,7 @@ private:
             } else
                 lastNext = m_out.insertNewBlocksBefore(slowCase);
 
-            LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_node->child2()));
+            LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
             LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
             ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
             m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
@@ -10523,9 +11086,7 @@ private:
         }
 
         case Array::ArrayStorage: {
-            LValue base = lowCell(m_node->child1());
-            LValue index = lowInt32(m_node->child2());
-            LValue storage = lowStorage(m_node->child3());
+            LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
             LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
 
             LBasicBlock slowCase = m_out.newBlock();
@@ -10543,7 +11104,7 @@ private:
                 lastNext = m_out.insertNewBlocksBefore(slowCase);
 
             LValue checkHoleResultValue =
-                m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_node->child2())));
+                m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
             ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
             m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
 
@@ -10558,8 +11119,6 @@ private:
         }
 
         default: {
-            LValue base = lowCell(m_node->child1());
-            LValue index = lowInt32(m_node->child2());
             LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
             setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
             break;
@@ -10683,7 +11242,7 @@ private:
         m_out.jump(continuation);
 
         m_out.appendTo(outOfBounds, continuation);
-        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueNull));
         m_out.jump(continuation);
         
         m_out.appendTo(continuation, lastNext);
@@ -10709,7 +11268,7 @@ private:
         m_out.jump(continuation);
 
         m_out.appendTo(outOfBounds, continuation);
-        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+        ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueNull));
         m_out.jump(continuation);
         
         m_out.appendTo(continuation, lastNext);
@@ -10780,7 +11339,7 @@ private:
             
             if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
                 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-                Allocator cellAllocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+                Allocator cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
 
                 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
                 unsigned indexingHeaderSize = 0;
@@ -11037,7 +11596,6 @@ private:
 
         LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
         SymbolTable* table = m_node->castOperand<SymbolTable*>();
-        ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
         RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
 
         LBasicBlock slowPath = m_out.newBlock();
@@ -11225,7 +11783,6 @@ private:
         FrozenValue* regexp = m_node->cellOperand();
         LValue lastIndex = lowJSValue(m_node->child1());
         ASSERT(regexp->cell()->inherits<RegExp>(vm()));
-        ASSERT(m_node->castOperand<RegExp*>()->isValid());
 
         LBasicBlock slowCase = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
@@ -11234,9 +11791,8 @@ private:
 
         auto structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->regExpStructure());
         LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
-        m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExp);
+        m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
         m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
-        m_out.store32As8(m_out.constInt32(true), m_out.address(fastResultValue, m_heaps.RegExpObject_lastIndexIsWritable));
         mutatorFence();
         ValueFromBlock fastResult = m_out.anchor(fastResultValue);
         m_out.jump(continuation);
@@ -11322,7 +11878,9 @@ private:
 
             speculate(
                 ExoticObjectMode, noValue(), nullptr,
-                m_out.isZero32(m_out.load8ZeroExt32(regExp, m_heaps.RegExpObject_lastIndexIsWritable)));
+                m_out.testNonZeroPtr(
+                    m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
+                    m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
 
             m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
             return;
@@ -11361,25 +11919,25 @@ private:
 
     void compileRecordRegExpCachedResult()
     {
-        Edge constructorEdge = m_graph.varArgChild(m_node, 0);
+        Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
         Edge regExpEdge = m_graph.varArgChild(m_node, 1);
         Edge stringEdge = m_graph.varArgChild(m_node, 2);
         Edge startEdge = m_graph.varArgChild(m_node, 3);
         Edge endEdge = m_graph.varArgChild(m_node, 4);
         
-        LValue constructor = lowCell(constructorEdge);
+        LValue globalObject = lowCell(globalObjectEdge);
         LValue regExp = lowCell(regExpEdge);
         LValue string = lowCell(stringEdge);
         LValue start = lowInt32(startEdge);
         LValue end = lowInt32(endEdge);
 
-        m_out.storePtr(regExp, constructor, m_heaps.RegExpConstructor_cachedResult_lastRegExp);
-        m_out.storePtr(string, constructor, m_heaps.RegExpConstructor_cachedResult_lastInput);
-        m_out.store32(start, constructor, m_heaps.RegExpConstructor_cachedResult_result_start);
-        m_out.store32(end, constructor, m_heaps.RegExpConstructor_cachedResult_result_end);
+        m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
+        m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
+        m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
+        m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
         m_out.store32As8(
             m_out.constInt32(0),
-            m_out.address(constructor, m_heaps.RegExpConstructor_cachedResult_reified));
+            m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
     }
     
     struct ArgumentsLength {
@@ -11419,12 +11977,12 @@ private:
     
     ArgumentsLength getArgumentsLength()
     {
-        return getArgumentsLength(m_node->origin.semantic.inlineCallFrame);
+        return getArgumentsLength(m_node->origin.semantic.inlineCallFrame());
     }
     
     LValue getCurrentCallee()
     {
-        if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame) {
+        if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame()) {
             if (frame->isClosureCall)
                 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
             return weakPointer(frame->calleeRecovery.constant().asCell());
@@ -11440,7 +11998,7 @@ private:
     
     LValue getArgumentsStart()
     {
-        return getArgumentsStart(m_node->origin.semantic.inlineCallFrame);
+        return getArgumentsStart(m_node->origin.semantic.inlineCallFrame());
     }
     
     template<typename Functor>
@@ -11740,8 +12298,8 @@ private:
 
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(base);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
 
         // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
         // clobbers volatile registers late. It's not necessary for correctness, though, since the
@@ -11813,8 +12371,8 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(base);
         patchpoint->appendSomeRegister(thisValue);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
 
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
 
@@ -11941,45 +12499,46 @@ private:
 
     void compileStringSlice()
     {
+        LBasicBlock lengthCheckCase = m_out.newBlock();
         LBasicBlock emptyCase = m_out.newBlock();
         LBasicBlock notEmptyCase = m_out.newBlock();
         LBasicBlock oneCharCase = m_out.newBlock();
-        LBasicBlock bitCheckCase = m_out.newBlock();
         LBasicBlock is8Bit = m_out.newBlock();
         LBasicBlock is16Bit = m_out.newBlock();
         LBasicBlock bitsContinuation = m_out.newBlock();
         LBasicBlock bigCharacter = m_out.newBlock();
         LBasicBlock slowCase = m_out.newBlock();
+        LBasicBlock ropeSlowCase = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
 
         LValue string = lowString(m_node->child1());
-        LValue length = m_out.load32NonNegative(string, m_heaps.JSString_length);
         LValue start = lowInt32(m_node->child2());
         LValue end = nullptr;
         if (m_node->child3())
             end = lowInt32(m_node->child3());
+        else
+            end = m_out.constInt32(std::numeric_limits<int32_t>::max());
+        m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
 
+        LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
+        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+        LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
         auto range = populateSliceRange(start, end, length);
         LValue from = range.first;
         LValue to = range.second;
-
         LValue span = m_out.sub(to, from);
         m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
 
-        Vector<ValueFromBlock, 4> results;
+        Vector<ValueFromBlock, 5> results;
 
-        LBasicBlock lastNext = m_out.appendTo(emptyCase, notEmptyCase);
-        results.append(m_out.anchor(weakPointer(jsEmptyString(&vm()))));
+        m_out.appendTo(emptyCase, notEmptyCase);
+        results.append(m_out.anchor(weakPointer(jsEmptyString(vm()))));
         m_out.jump(continuation);
 
         m_out.appendTo(notEmptyCase, oneCharCase);
         m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
 
-        m_out.appendTo(oneCharCase, bitCheckCase);
-        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
-        m_out.branch(m_out.isNull(stringImpl), unsure(slowCase), unsure(bitCheckCase));
-
-        m_out.appendTo(bitCheckCase, is8Bit);
+        m_out.appendTo(oneCharCase, is8Bit);
         LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
         m_out.branch(
             m_out.testIsZero32(
@@ -11988,8 +12547,6 @@ private:
             unsure(is16Bit), unsure(is8Bit));
 
         m_out.appendTo(is8Bit, is16Bit);
-        // FIXME: Need to cage strings!
-        // https://bugs.webkit.org/show_bug.cgi?id=174924
         ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
         m_out.jump(bitsContinuation);
 
@@ -11997,7 +12554,7 @@ private:
         LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
         ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
         m_out.branch(
-            m_out.aboveOrEqual(char16BitValue, m_out.constInt32(0x100)),
+            m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
             rarely(bigCharacter), usually(bitsContinuation));
 
         m_out.appendTo(bigCharacter, bitsContinuation);
@@ -12013,10 +12570,14 @@ private:
             m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
         m_out.jump(continuation);
 
-        m_out.appendTo(slowCase, continuation);
+        m_out.appendTo(slowCase, ropeSlowCase);
         results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSubstr), m_callFrame, string, from, span)));
         m_out.jump(continuation);
 
+        m_out.appendTo(ropeSlowCase, continuation);
+        results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSlice), m_callFrame, string, start, end)));
+        m_out.jump(continuation);
+
         m_out.appendTo(continuation, lastNext);
         setJSValue(m_out.phi(pointerType(), results));
     }
@@ -12033,12 +12594,11 @@ private:
         LValue string = lowString(m_node->child1());
         ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
         ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
-        LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
-        m_out.branch(m_out.isZero64(impl),
+        m_out.branch(isRopeString(string, m_node->child1()),
             unsure(slowPath), unsure(notRope));
 
         LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
-
+        LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
         m_out.branch(
             m_out.testIsZero32(
                 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
@@ -12141,7 +12701,7 @@ private:
     void compilePutDynamicVar()
     {
         UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
-        setJSValue(vmCall(Void, m_out.operation(operationPutDynamicVar),
+        setJSValue(vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict),
             m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
     }
     
@@ -12172,8 +12732,7 @@ private:
             LBasicBlock continuation = m_out.newBlock();
 
             LValue structure = loadStructure(cell);
-            LValue poisonedClassInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
-            LValue classInfo = m_out.bitXor(poisonedClassInfo, m_out.constInt64(GlobalDataPoison::key()));
+            LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
             ValueFromBlock otherAtStart = m_out.anchor(classInfo);
             m_out.jump(loop);
 
@@ -12194,8 +12753,8 @@ private:
         RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
         PatchpointValue* patchpoint = m_out.patchpoint(Void);
         patchpoint->appendSomeRegister(cell);
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
 
         NodeOrigin origin = m_origin;
         unsigned osrExitArgumentOffset = patchpoint->numChildren();
@@ -12271,16 +12830,16 @@ private:
 
         unsigned argumentCountIncludingThis = signature->argumentCount + 1;
         LValue result;
-        assertIsTaggedWith(reinterpret_cast<void*>(signature->unsafeFunction), CFunctionPtrTag);
+        auto function = CFunctionPtr(signature->functionWithoutTypeCheck);
         switch (argumentCountIncludingThis) {
         case 1:
-            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
+            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(function.get())), m_callFrame, operands[0]);
             break;
         case 2:
-            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
+            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(function.get())), m_callFrame, operands[0], operands[1]);
             break;
         case 3:
-            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
+            result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(function.get())), m_callFrame, operands[0], operands[1], operands[2]);
             break;
         default:
             RELEASE_ASSERT_NOT_REACHED();
@@ -12319,13 +12878,13 @@ private:
         patchpoint->appendSomeRegister(base);
         if (domJIT->requireGlobalObject)
             patchpoint->appendSomeRegister(globalObject);
-        patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
         patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
         patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
 
         State* state = &m_ftlState;
         Node* node = m_node;
@@ -12441,7 +13000,7 @@ private:
             indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
         speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
 
-        LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector));
+        LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
 
         TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
 
@@ -12600,7 +13159,7 @@ private:
             RELEASE_ASSERT_NOT_REACHED();
         }
 
-        LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector));
+        LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
         TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
 
         if (data.isFloatingPoint) {
@@ -12810,7 +13369,7 @@ private:
         setBoolean(m_out.phi(Int32, fastResult, slowResult));
     }
 
-    LValue stringsEqual(LValue leftJSString, LValue rightJSString)
+    LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
     {
         LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
         LBasicBlock notEmptyCase = m_out.newBlock();
@@ -12825,29 +13384,23 @@ private:
         LBasicBlock slowCase = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
 
-        LValue length = m_out.load32(leftJSString, m_heaps.JSString_length);
-
-        m_out.branch(
-            m_out.notEqual(length, m_out.load32(rightJSString, m_heaps.JSString_length)),
-            unsure(falseCase), unsure(notTriviallyUnequalCase));
-
-        LBasicBlock lastNext = m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
-
-        m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
+        m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
 
-        m_out.appendTo(notEmptyCase, leftReadyCase);
+        LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
+        m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
 
+        m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
         LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
         LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
+        LValue length = m_out.load32(left, m_heaps.StringImpl_length);
+        m_out.branch(
+            m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
+            unsure(falseCase), unsure(notTriviallyUnequalCase));
 
-        m_out.branch(m_out.notNull(left), usually(leftReadyCase), rarely(slowCase));
-
-        m_out.appendTo(leftReadyCase, rightReadyCase);
-        
-        m_out.branch(m_out.notNull(right), usually(rightReadyCase), rarely(slowCase));
-
-        m_out.appendTo(rightReadyCase, left8BitCase);
+        m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
+        m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
 
+        m_out.appendTo(notEmptyCase, left8BitCase);
         m_out.branch(
             m_out.testIsZero32(
                 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
@@ -12855,7 +13408,6 @@ private:
             unsure(slowCase), unsure(left8BitCase));
 
         m_out.appendTo(left8BitCase, right8BitCase);
-
         m_out.branch(
             m_out.testIsZero32(
                 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
@@ -12929,8 +13481,8 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(left);
         patchpoint->appendSomeRegister(right);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         patchpoint->numGPScratchRegisters = 1;
@@ -12938,7 +13490,7 @@ private:
         if (scratchFPRUsage == NeedScratchFPR)
             patchpoint->numFPScratchRegisters++;
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         State* state = &m_ftlState;
         patchpoint->setGenerator(
             [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
@@ -12995,13 +13547,13 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(left);
         patchpoint->appendSomeRegister(right);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         patchpoint->numGPScratchRegisters = 1;
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         State* state = &m_ftlState;
         patchpoint->setGenerator(
             [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
@@ -13050,14 +13602,14 @@ private:
         PatchpointValue* patchpoint = m_out.patchpoint(Int64);
         patchpoint->appendSomeRegister(left);
         patchpoint->appendSomeRegister(right);
-        patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
-        patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+        patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
+        patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
         RefPtr<PatchpointExceptionHandle> exceptionHandle =
             preparePatchpointForExceptions(patchpoint);
         patchpoint->numGPScratchRegisters = 1;
         patchpoint->numFPScratchRegisters = 1;
         patchpoint->clobber(RegisterSet::macroScratchRegisters());
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         State* state = &m_ftlState;
         patchpoint->setGenerator(
             [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
@@ -13099,7 +13651,7 @@ private:
     LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
     {
         JITAllocator actualAllocator;
-        if (allocator->hasInt32())
+        if (allocator->hasIntPtr())
             actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
         else
             actualAllocator = JITAllocator::variable();
@@ -13138,7 +13690,7 @@ private:
         else
             patchpoint->appendSomeRegisterWithClobber(allocator);
         patchpoint->numGPScratchRegisters++;
-        patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         
         m_out.appendSuccessor(usually(continuation));
         m_out.appendSuccessor(rarely(slowPath));
@@ -13247,7 +13799,7 @@ private:
     LValue allocateObject(
         size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
     {
-        Allocator allocator = subspaceFor<ClassType>(vm())->allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
+        Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
         return allocateObject(
             m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
     }
@@ -13311,7 +13863,9 @@ private:
     LValue allocateVariableSizedObject(
         LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
     {
-        LValue allocator = allocatorForSize(*subspaceFor<ClassType>(vm()), size, slowPath);
+        CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
+        RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
+        LValue allocator = allocatorForSize(*subspace, size, slowPath);
         return allocateObject(allocator, structure, butterfly, slowPath);
     }
 
@@ -13319,14 +13873,16 @@ private:
     LValue allocateVariableSizedCell(
         LValue size, Structure* structure, LBasicBlock slowPath)
     {
-        LValue allocator = allocatorForSize(*subspaceFor<ClassType>(vm()), size, slowPath);
+        CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
+        RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
+        LValue allocator = allocatorForSize(*subspace, size, slowPath);
         return allocateCell(allocator, structure, slowPath);
     }
     
     LValue allocateObject(RegisteredStructure structure)
     {
         size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
-        Allocator allocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+        Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
         
         // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
         // instead of putting it on the slow path.
@@ -13422,7 +13978,7 @@ private:
         }
         
         ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
-        
+
         LValue predicate;
         if (shouldLargeArraySizeCreateArrayStorage)
             predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
@@ -13432,16 +13988,16 @@ private:
         m_out.branch(predicate, rarely(largeCase), usually(fastCase));
         
         m_out.appendTo(fastCase, largeCase);
-            
+
         LValue payloadSize =
             m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
-            
+
         LValue butterflySize = m_out.add(
             payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
-            
+
         LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
         LValue startOfStorage = allocateHeapCell(allocator, failCase);
-            
+
         LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
         
         m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
@@ -13527,16 +14083,18 @@ private:
     
     LValue ensureShadowChickenPacket()
     {
+        ShadowChicken* shadowChicken = vm().shadowChicken();
+        RELEASE_ASSERT(shadowChicken);
         LBasicBlock slowCase = m_out.newBlock();
         LBasicBlock continuation = m_out.newBlock();
         
-        TypedPointer addressOfLogCursor = m_out.absolute(vm().shadowChicken().addressOfLogCursor());
+        TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
         LValue logCursor = m_out.loadPtr(addressOfLogCursor);
         
         ValueFromBlock fastResult = m_out.anchor(logCursor);
         
         m_out.branch(
-            m_out.below(logCursor, m_out.constIntPtr(vm().shadowChicken().logEnd())),
+            m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
             usually(continuation), rarely(slowCase));
         
         LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
@@ -13571,11 +14129,8 @@ private:
                 equalNullOrUndefined(
                     edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
                     ManualOperandSpeculation));
-        case StringUse: {
-            LValue stringValue = lowString(edge);
-            LValue length = m_out.load32NonNegative(stringValue, m_heaps.JSString_length);
-            return m_out.notEqual(length, m_out.int32Zero);
-        }
+        case StringUse:
+            return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(m_graph.m_vm)));
         case StringOrOtherUse: {
             LValue value = lowJSValue(edge, ManualOperandSpeculation);
 
@@ -13586,20 +14141,17 @@ private:
             m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
             
             LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
-            
             FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
-            LValue length = m_out.load32NonNegative(value, m_heaps.JSString_length);
-            ValueFromBlock cellResult = m_out.anchor(m_out.notEqual(length, m_out.int32Zero));
+            ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(m_graph.m_vm))));
             m_out.jump(continuation);
-            
+
             m_out.appendTo(notCellCase, continuation);
-            
             FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
             ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
             m_out.jump(continuation);
-            m_out.appendTo(continuation, lastNext);
 
-            return m_out.phi(Int32, cellResult, notCellResult);
+            m_out.appendTo(continuation, lastNext);
+            return m_out.phi(Int32, stringResult, notCellResult);
         }
         case UntypedUse: {
             LValue value = lowJSValue(edge);
@@ -13622,7 +14174,8 @@ private:
             
             LBasicBlock cellCase = m_out.newBlock();
             LBasicBlock notStringCase = m_out.newBlock();
-            LBasicBlock stringOrBigIntCase = m_out.newBlock();
+            LBasicBlock stringCase = m_out.newBlock();
+            LBasicBlock bigIntCase = m_out.newBlock();
             LBasicBlock notStringOrBigIntCase = m_out.newBlock();
             LBasicBlock notCellCase = m_out.newBlock();
             LBasicBlock int32Case = m_out.newBlock();
@@ -13638,17 +14191,21 @@ private:
             LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
             m_out.branch(
                 isString(value, provenType(edge) & SpecCell),
-                unsure(stringOrBigIntCase), unsure(notStringCase));
+                unsure(stringCase), unsure(notStringCase));
             
-            m_out.appendTo(notStringCase, stringOrBigIntCase);
+            m_out.appendTo(notStringCase, stringCase);
             m_out.branch(
                 isBigInt(value, provenType(edge) & (SpecCell - SpecString)),
-                unsure(stringOrBigIntCase), unsure(notStringOrBigIntCase));
+                unsure(bigIntCase), unsure(notStringOrBigIntCase));
+
+            m_out.appendTo(stringCase, bigIntCase);
+            results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(m_graph.m_vm)))));
+            m_out.jump(continuation);
 
-            m_out.appendTo(stringOrBigIntCase, notStringOrBigIntCase);
-            LValue nonZeroCell = m_out.notZero32(
-                m_out.load32NonNegative(value, m_heaps.JSBigIntOrString_length));
-            results.append(m_out.anchor(nonZeroCell));
+            m_out.appendTo(bigIntCase, notStringOrBigIntCase);
+            LValue nonZeroBigInt = m_out.notZero32(
+                m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
+            results.append(m_out.anchor(nonZeroBigInt));
             m_out.jump(continuation);
             
             m_out.appendTo(notStringOrBigIntCase, notCellCase);
@@ -13778,10 +14335,10 @@ private:
         LValue primitiveResult;
         switch (primitiveMode) {
         case EqualNull:
-            primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
+            primitiveResult = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
             break;
         case EqualUndefined:
-            primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
+            primitiveResult = m_out.equal(value, m_out.constInt64(JSValue::ValueUndefined));
             break;
         case EqualNullOrUndefined:
             primitiveResult = isOther(value, provenType(edge));
@@ -13849,17 +14406,62 @@ private:
             m_out.appendTo(performStore, lastNext);
         }
     }
-    
-    LValue caged(Gigacage::Kind kind, LValue ptr)
+
+    LValue untagArrayPtr(LValue ptr, LValue size)
     {
-        if (!Gigacage::isEnabled(kind))
+#if CPU(ARM64E)
+        PatchpointValue* authenticate = m_out.patchpoint(pointerType());
+        authenticate->appendSomeRegister(ptr);
+        authenticate->append(size, B3::ValueRep(B3::ValueRep::SomeLateRegister));
+        authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.untagArrayPtr(params[2].gpr(), params[0].gpr());
+        });
+        return authenticate;
+#else
+        UNUSED_PARAM(size);
+        return ptr;
+#endif
+    }
+
+    LValue removeArrayPtrTag(LValue ptr)
+    {
+#if CPU(ARM64E)
+        PatchpointValue* authenticate = m_out.patchpoint(pointerType());
+        authenticate->appendSomeRegister(ptr);
+        authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.removeArrayPtrTag(params[0].gpr());
+        });
+        return authenticate;
+#endif
+        return ptr;
+    }
+
+    LValue caged(Gigacage::Kind kind, LValue ptr, LValue base)
+    {
+        auto doUntagArrayPtr = [&](LValue taggedPtr) {
+#if CPU(ARM64E)
+            if (kind == Gigacage::Primitive) {
+                LValue size = m_out.load32(base, m_heaps.JSArrayBufferView_length);
+                return untagArrayPtr(taggedPtr, size);
+            }
+            return ptr;
+#else
+            UNUSED_PARAM(taggedPtr);
             return ptr;
+#endif
+        };
+
+#if GIGACAGE_ENABLED
+        if (!Gigacage::isEnabled(kind))
+            return doUntagArrayPtr(ptr);
         
         if (kind == Gigacage::Primitive && Gigacage::canPrimitiveGigacageBeDisabled()) {
             if (vm().primitiveGigacageEnabled().isStillValid())
                 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
             else
-                return ptr;
+                return doUntagArrayPtr(ptr);
         }
         
         LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
@@ -13868,6 +14470,20 @@ private:
         LValue masked = m_out.bitAnd(ptr, mask);
         LValue result = m_out.add(masked, basePtr);
 
+#if CPU(ARM64E)
+        if (kind == Gigacage::Primitive) {
+            PatchpointValue* merge = m_out.patchpoint(pointerType());
+            merge->append(result, B3::ValueRep(B3::ValueRep::SomeLateRegister));
+            merge->appendSomeRegister(ptr);
+            merge->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                jit.move(params[2].gpr(), params[0].gpr());
+                jit.bitFieldInsert64(params[1].gpr(), 0, 64 - MacroAssembler::numberOfPACBits, params[0].gpr());
+            });
+
+            result = doUntagArrayPtr(merge);
+        }
+#endif // CPU(ARM64E)
+
         // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
         // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
         // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
@@ -13880,6 +14496,11 @@ private:
         // and possibly other smart things if we want to be able to remove this opaque.
         // https://bugs.webkit.org/show_bug.cgi?id=175493
         return m_out.opaque(result);
+#endif
+
+        UNUSED_PARAM(kind);
+        UNUSED_PARAM(base);
+        return doUntagArrayPtr(ptr);
     }
     
     void buildSwitch(SwitchData* data, LType type, LValue switchValue)
@@ -13907,7 +14528,7 @@ private:
             lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
     }
     
-    void switchString(SwitchData* data, LValue string)
+    void switchString(SwitchData* data, LValue string, Edge& edge)
     {
         bool canDoBinarySwitch = true;
         unsigned totalLength = 0;
@@ -13930,16 +14551,16 @@ private:
             return;
         }
         
-        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
-        LValue length = m_out.load32(string, m_heaps.JSString_length);
-        
         LBasicBlock hasImplBlock = m_out.newBlock();
         LBasicBlock is8BitBlock = m_out.newBlock();
         LBasicBlock slowBlock = m_out.newBlock();
         
-        m_out.branch(m_out.isNull(stringImpl), unsure(slowBlock), unsure(hasImplBlock));
+        m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
         
         LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
+
+        LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+        LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
         
         m_out.branch(
             m_out.testIsZero32(
@@ -14339,7 +14960,7 @@ private:
         m_out.appendTo(notNumberCase, notNullCase);
         LValue isNull;
         if (provenType(child) & SpecOther)
-            isNull = m_out.equal(value, m_out.constInt64(ValueNull));
+            isNull = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
         else
             isNull = m_out.booleanFalse;
         m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
@@ -14421,7 +15042,7 @@ private:
             return;
         }
         
-        if (m_node->shouldSpeculateAnyInt()) {
+        if (m_node->shouldSpeculateInt52()) {
             setStrictInt52(m_out.zeroExt(result, Int64));
             return;
         }
@@ -14721,7 +15342,7 @@ private:
                                 linkBuffer.link(generatorJump,
                                     CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
                                 
-                                std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>();
+                                std::unique_ptr<LazySlowPath> lazySlowPath = makeUnique<LazySlowPath>();
 
                                 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
 
@@ -15121,7 +15742,7 @@ private:
             return result;
         }
         
-        DFG_CRASH(m_graph, m_node, "Value not defined");
+        DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
         return 0;
     }
 
@@ -15199,13 +15820,13 @@ private:
     {
         if (LValue proven = isProvenValue(type, SpecInt32Only))
             return proven;
-        return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
+        return m_out.aboveOrEqual(jsValue, m_numberTag);
     }
     LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, ~SpecInt32Only))
             return proven;
-        return m_out.below(jsValue, m_tagTypeNumber);
+        return m_out.below(jsValue, m_numberTag);
     }
     LValue unboxInt32(LValue jsValue)
     {
@@ -15213,32 +15834,32 @@ private:
     }
     LValue boxInt32(LValue value)
     {
-        return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
+        return m_out.add(m_out.zeroExt(value, Int64), m_numberTag);
     }
     
     LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, SpecCellCheck | SpecMisc))
             return proven;
-        return m_out.testIsZero64(jsValue, m_tagTypeNumber);
+        return m_out.testIsZero64(jsValue, m_numberTag);
     }
     LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, ~(SpecCellCheck | SpecMisc)))
             return proven;
-        return m_out.testNonZero64(jsValue, m_tagTypeNumber);
+        return m_out.testNonZero64(jsValue, m_numberTag);
     }
 
     LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
     {
-        LValue asInt = m_out.add(jsValue, m_tagTypeNumber);
+        LValue asInt = m_out.add(jsValue, m_numberTag);
         if (unboxedAsInt)
             *unboxedAsInt = asInt;
         return m_out.bitCast(asInt, Double);
     }
     LValue boxDouble(LValue doubleValue)
     {
-        return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
+        return m_out.sub(m_out.bitCast(doubleValue, Int64), m_numberTag);
     }
     
     LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
@@ -15277,16 +15898,33 @@ private:
             
         return m_out.phi(Int64, intToInt52, doubleToInt52);
     }
-    
+
     LValue doubleToStrictInt52(Edge edge, LValue value)
     {
-        LValue possibleResult = m_out.call(
-            Int64, m_out.operation(operationConvertDoubleToInt52), value);
-        FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
-            doubleValue(value), edge, SpecAnyIntAsDouble,
-            m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
-        
-        return possibleResult;
+        LValue integerValue = m_out.doubleToInt64(value);
+        LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
+        LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
+        speculate(Int52Overflow, doubleValue(value), edge.node(), valueNotConvertibleToInteger);
+
+        LBasicBlock valueIsZero = m_out.newBlock();
+        LBasicBlock valueIsNotZero = m_out.newBlock();
+        LBasicBlock continuation = m_out.newBlock();
+        m_out.branch(m_out.isZero64(integerValue), unsure(valueIsZero), unsure(valueIsNotZero));
+
+        LBasicBlock lastNext = m_out.appendTo(valueIsZero, valueIsNotZero);
+        LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
+        LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
+        speculate(Int52Overflow, doubleValue(value), edge.node(), signBitSet);
+        m_out.jump(continuation);
+
+        m_out.appendTo(valueIsNotZero, continuation);
+        speculate(Int52Overflow, doubleValue(value), edge.node(), m_out.greaterThanOrEqual(integerValue, m_out.constInt64(static_cast<int64_t>(1) << (JSValue::numberOfInt52Bits - 1))));
+        speculate(Int52Overflow, doubleValue(value), edge.node(), m_out.lessThan(integerValue, m_out.constInt64(-(static_cast<int64_t>(1) << (JSValue::numberOfInt52Bits - 1)))));
+        m_out.jump(continuation);
+
+        m_out.appendTo(continuation, lastNext);
+        m_interpreter.filter(edge, SpecAnyIntAsDouble);
+        return integerValue;
     }
 
     LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
@@ -15330,21 +15968,21 @@ private:
     {
         if (LValue proven = isProvenValue(type, ~SpecCellCheck))
             return proven;
-        return m_out.testNonZero64(jsValue, m_tagMask);
+        return m_out.testNonZero64(jsValue, m_notCellMask);
     }
     
     LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, SpecCellCheck))
             return proven;
-        return m_out.testIsZero64(jsValue, m_tagMask);
+        return m_out.testIsZero64(jsValue, m_notCellMask);
     }
     
     LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, ~SpecMisc))
             return proven;
-        return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
+        return m_out.above(value, m_out.constInt64(JSValue::MiscTag));
     }
     
     LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
@@ -15359,7 +15997,7 @@ private:
         if (LValue proven = isProvenValue(type, ~SpecBoolean))
             return proven;
         return m_out.testNonZero64(
-            m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
+            m_out.bitXor(jsValue, m_out.constInt64(JSValue::ValueFalse)),
             m_out.constInt64(~1));
     }
     LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
@@ -15377,7 +16015,7 @@ private:
     LValue boxBoolean(LValue value)
     {
         return m_out.select(
-            value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
+            value, m_out.constInt64(JSValue::ValueTrue), m_out.constInt64(JSValue::ValueFalse));
     }
     
     LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
@@ -15385,16 +16023,16 @@ private:
         if (LValue proven = isProvenValue(type, ~SpecOther))
             return proven;
         return m_out.notEqual(
-            m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
-            m_out.constInt64(ValueNull));
+            m_out.bitAnd(value, m_out.constInt64(~JSValue::UndefinedTag)),
+            m_out.constInt64(JSValue::ValueNull));
     }
     LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type, SpecOther))
             return proven;
         return m_out.equal(
-            m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
-            m_out.constInt64(ValueNull));
+            m_out.bitAnd(value, m_out.constInt64(~JSValue::UndefinedTag)),
+            m_out.constInt64(JSValue::ValueNull));
     }
     
     LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
@@ -15451,6 +16089,9 @@ private:
         case RegExpObjectUse:
             speculateRegExpObject(edge);
             break;
+        case PromiseObjectUse:
+            speculatePromiseObject(edge);
+            break;
         case ProxyObjectUse:
             speculateProxyObject(edge);
             break;
@@ -15577,10 +16218,12 @@ private:
         jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
     }
 
-    LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
+    LValue isCellWithType(LValue cell, JSType queriedType, Optional<SpeculatedType> speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
     {
-        if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
-            return proven;
+        if (speculatedTypeForQuery) {
+            if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery.value()))
+                return proven;
+        }
         return m_out.equal(
             m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
             m_out.constInt32(queriedType));
@@ -15634,6 +16277,44 @@ private:
             m_out.constInt32(vm().stringStructure->id()));
     }
 
+    LValue isRopeString(LValue string, Edge edge = Edge())
+    {
+        if (edge) {
+            if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
+                return m_out.booleanFalse;
+            if (JSValue value = provenValue(edge)) {
+                if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
+                    return m_out.booleanFalse;
+            }
+            String value = edge->tryGetString(m_graph);
+            if (!value.isNull()) {
+                // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
+                return m_out.booleanFalse;
+            }
+        }
+
+        return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
+    }
+
+    LValue isNotRopeString(LValue string, Edge edge = Edge())
+    {
+        if (edge) {
+            if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
+                return m_out.booleanTrue;
+            if (JSValue value = provenValue(edge)) {
+                if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
+                    return m_out.booleanTrue;
+            }
+            String value = edge->tryGetString(m_graph);
+            if (!value.isNull()) {
+                // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
+                return m_out.booleanTrue;
+            }
+        }
+
+        return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
+    }
+
     LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
     {
         if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
@@ -15932,6 +16613,17 @@ private:
         speculateDerivedArray(edge, lowCell(edge));
     }
 
+    void speculatePromiseObject(Edge edge, LValue cell)
+    {
+        FTL_TYPE_CHECK(
+            jsValueValue(cell), edge, SpecPromiseObject, isNotType(cell, JSPromiseType));
+    }
+
+    void speculatePromiseObject(Edge edge)
+    {
+        speculatePromiseObject(edge, lowCell(edge));
+    }
+
     void speculateMapObject(Edge edge, LValue cell)
     {
         FTL_TYPE_CHECK(
@@ -16031,12 +16723,12 @@ private:
         if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
             return;
         
-        speculate(BadType, jsValueValue(string), edge.node(), m_out.isNull(stringImpl));
+        speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
         speculate(
             BadType, jsValueValue(string), edge.node(),
             m_out.testIsZero32(
                 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
-                m_out.constInt32(StringImpl::flagIsAtomic())));
+                m_out.constInt32(StringImpl::flagIsAtom())));
         m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
     }
     
@@ -16253,6 +16945,9 @@ private:
 
         LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
         LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
+        // FIXME: We could probably make this a mask.
+        // https://bugs.webkit.org/show_bug.cgi?id=197701
+        vector = removeArrayPtrTag(vector);
         speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
         m_out.jump(continuation);
 
@@ -16394,32 +17089,6 @@ private:
         return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
     }
     
-    LValue dynamicPoison(LValue value, LValue poison)
-    {
-        return m_out.add(
-            value,
-            m_out.shl(
-                m_out.zeroExt(poison, pointerType()),
-                m_out.constInt32(40)));
-    }
-    
-    LValue dynamicPoisonOnLoadedType(LValue value, LValue actualType, JSType expectedType)
-    {
-        return dynamicPoison(
-            value,
-            m_out.bitXor(
-                m_out.opaque(actualType),
-                m_out.constInt32(expectedType)));
-    }
-    
-    LValue dynamicPoisonOnType(LValue value, JSType expectedType)
-    {
-        return dynamicPoisonOnLoadedType(
-            value,
-            m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType),
-            expectedType);
-    }
-
     template<typename... Args>
     LValue vmCall(LType type, LValue function, Args&&... args)
     {
@@ -16473,7 +17142,7 @@ private:
             // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
             // and jaz is inlined in baz. We want the callframe for jaz to appear to 
             // have caller be bar.
-            codeOrigin = *codeOrigin.inlineCallFrame->getCallerSkippingTailCalls();
+            codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
         }
 
         return codeOrigin;
@@ -16514,11 +17183,7 @@ private:
         if (!willCatchException)
             return PatchpointExceptionHandle::defaultHandle(m_ftlState);
 
-        if (verboseCompilationEnabled()) {
-            dataLog("    Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n");
-            if (!m_availableRecoveries.isEmpty())
-                dataLog("        Available recoveries: ", listDump(m_availableRecoveries), "\n");
-        }
+        dataLogLnIf(verboseCompilationEnabled(), "    Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
 
         bool exitOK = true;
         NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
@@ -16572,11 +17237,7 @@ private:
         ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition, 
         NodeOrigin origin, bool isExceptionHandler = false)
     {
-        if (verboseCompilationEnabled()) {
-            dataLog("    OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n");
-            if (!m_availableRecoveries.isEmpty())
-                dataLog("        Available recoveries: ", listDump(m_availableRecoveries), "\n");
-        }
+        dataLogLnIf(verboseCompilationEnabled(), "    OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
 
         DFG_ASSERT(m_graph, m_node, origin.exitOK);
 
@@ -16770,18 +17431,6 @@ private:
             }
         }
         
-        for (unsigned i = 0; i < m_availableRecoveries.size(); ++i) {
-            AvailableRecovery recovery = m_availableRecoveries[i];
-            if (recovery.node() != node)
-                continue;
-            ExitValue result = ExitValue::recovery(
-                recovery.opcode(), arguments.size(), arguments.size() + 1,
-                recovery.format());
-            arguments.append(recovery.left());
-            arguments.append(recovery.right());
-            return result;
-        }
-        
         LoweredNodeValue value = m_int32Values.get(node);
         if (isValid(value))
             return exitArgument(arguments, DataFormatInt32, value.value());
@@ -16848,18 +17497,6 @@ private:
         DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
     }
 
-    void addAvailableRecovery(
-        Node* node, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format)
-    {
-        m_availableRecoveries.append(AvailableRecovery(node, opcode, left, right, format));
-    }
-    
-    void addAvailableRecovery(
-        Edge edge, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format)
-    {
-        addAvailableRecovery(edge.node(), opcode, left, right, format);
-    }
-    
     void setInt32(Node* node, LValue value)
     {
         m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
@@ -16944,7 +17581,17 @@ private:
             return false;
         return true;
     }
-    
+
+    void keepAlive(LValue value)
+    {
+        PatchpointValue* patchpoint = m_out.patchpoint(Void);
+        patchpoint->effects = Effects::none();
+        patchpoint->effects.writesLocalState = true;
+        patchpoint->effects.reads = HeapRange::top();
+        patchpoint->append(value, ValueRep::ColdAny);
+        patchpoint->setGenerator([=] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
     void addWeakReference(JSCell* target)
     {
         m_graph.m_plan.weakReferences().addLazily(target);
@@ -16952,30 +17599,24 @@ private:
 
     LValue loadStructure(LValue value)
     {
-        LValue tableIndex = m_out.load32(value, m_heaps.JSCell_structureID);
-        LValue tableBase = m_out.loadPtr(
-            m_out.absolute(vm().heap.structureIDTable().base()));
-        TypedPointer address = m_out.baseIndex(
-            m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
-        return m_out.loadPtr(address);
+        LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
+        LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
+        LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
+        LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
+        TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
+        LValue encodedStructureBits = m_out.loadPtr(address);
+        return m_out.bitXor(encodedStructureBits, entropyBits);
     }
 
     LValue weakPointer(JSCell* pointer)
     {
         addWeakReference(pointer);
-        return m_out.weakPointer(m_graph, pointer);
-    }
-    
-    template<typename Key>
-    LValue weakPoisonedPointer(JSCell* pointer)
-    {
-        addWeakReference(pointer);
-        return m_out.weakPoisonedPointer<Key>(m_graph, pointer);
+        return m_out.alreadyRegisteredWeakPointer(m_graph, pointer);
     }
 
     LValue frozenPointer(FrozenValue* value)
     {
-        return m_out.weakPointer(value);
+        return m_out.alreadyRegisteredFrozenPointer(value);
     }
 
     LValue weakStructureID(RegisteredStructure structure)
@@ -16986,7 +17627,7 @@ private:
     LValue weakStructure(RegisteredStructure structure)
     {
         ASSERT(!!structure.get());
-        return m_out.weakPointer(m_graph, structure.get());
+        return m_out.alreadyRegisteredWeakPointer(m_graph, structure.get());
     }
     
     TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
@@ -17111,8 +17752,8 @@ private:
     
     LValue m_callFrame;
     LValue m_captured;
-    LValue m_tagTypeNumber;
-    LValue m_tagMask;
+    LValue m_numberTag;
+    LValue m_notCellMask;
     
     HashMap<Node*, LoweredNodeValue> m_int32Values;
     HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
@@ -17126,8 +17767,6 @@ private:
     
     LocalOSRAvailabilityCalculator m_availabilityCalculator;
     
-    Vector<AvailableRecovery, 3> m_availableRecoveries;
-    
     InPlaceAbstractState m_state;
     AbstractInterpreter<InPlaceAbstractState> m_interpreter;
     DFG::BasicBlock* m_highBlock;
@@ -17141,6 +17780,11 @@ private:
     NodeOrigin m_origin;
     unsigned m_nodeIndex;
     Node* m_node;
+
+    // These are used for validating AI state.
+    HashMap<Node*, NodeSet> m_liveInToNode;
+    HashMap<Node*, AbstractValue> m_aiCheckedNodes;
+    String m_graphDump;
 };
 
 } // anonymous namespace