Unreviewed, rolling out r227592.
authorcommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 25 Jan 2018 17:21:09 +0000 (17:21 +0000)
committercommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 25 Jan 2018 17:21:09 +0000 (17:21 +0000)
https://bugs.webkit.org/show_bug.cgi?id=182110

it made ARM64 (Linux and iOS) crash (Requested by pizlo-mbp on
#webkit).

Reverted changeset:

"JSC GC should support TLCs (thread local caches)"
https://bugs.webkit.org/show_bug.cgi?id=181559
https://trac.webkit.org/changeset/227592

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@227609 268f45cc-cd09-0410-ab3c-d52691b4dbfc

70 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/Sources.txt
Source/JavaScriptCore/b3/B3LowerToAir.cpp
Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp
Source/JavaScriptCore/b3/B3StackmapSpecial.cpp
Source/JavaScriptCore/b3/B3StackmapValue.cpp
Source/JavaScriptCore/b3/B3StackmapValue.h
Source/JavaScriptCore/b3/B3Validate.cpp
Source/JavaScriptCore/b3/B3ValueRep.cpp
Source/JavaScriptCore/b3/B3ValueRep.h
Source/JavaScriptCore/bytecode/AccessCase.cpp
Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
Source/JavaScriptCore/bytecode/ObjectAllocationProfileInlines.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/heap/Allocator.cpp [deleted file]
Source/JavaScriptCore/heap/Allocator.h [deleted file]
Source/JavaScriptCore/heap/AllocatorInlines.h [deleted file]
Source/JavaScriptCore/heap/BlockDirectory.cpp
Source/JavaScriptCore/heap/BlockDirectory.h
Source/JavaScriptCore/heap/BlockDirectoryInlines.h
Source/JavaScriptCore/heap/CompleteSubspace.cpp
Source/JavaScriptCore/heap/CompleteSubspace.h
Source/JavaScriptCore/heap/FreeList.h
Source/JavaScriptCore/heap/GCDeferralContext.h
Source/JavaScriptCore/heap/Heap.cpp
Source/JavaScriptCore/heap/Heap.h
Source/JavaScriptCore/heap/IsoCellSet.h
Source/JavaScriptCore/heap/IsoSubspace.cpp
Source/JavaScriptCore/heap/IsoSubspace.h
Source/JavaScriptCore/heap/LocalAllocator.cpp [deleted file]
Source/JavaScriptCore/heap/LocalAllocator.h [deleted file]
Source/JavaScriptCore/heap/LocalAllocatorInlines.h [deleted file]
Source/JavaScriptCore/heap/MarkedBlock.cpp
Source/JavaScriptCore/heap/MarkedSpace.cpp
Source/JavaScriptCore/heap/MarkedSpace.h
Source/JavaScriptCore/heap/SlotVisitor.cpp
Source/JavaScriptCore/heap/SlotVisitor.h
Source/JavaScriptCore/heap/Subspace.h
Source/JavaScriptCore/heap/ThreadLocalCache.cpp [deleted file]
Source/JavaScriptCore/heap/ThreadLocalCache.h [deleted file]
Source/JavaScriptCore/heap/ThreadLocalCacheInlines.h [deleted file]
Source/JavaScriptCore/heap/ThreadLocalCacheLayout.cpp [deleted file]
Source/JavaScriptCore/heap/ThreadLocalCacheLayout.h [deleted file]
Source/JavaScriptCore/jit/AssemblyHelpers.cpp
Source/JavaScriptCore/jit/AssemblyHelpers.h
Source/JavaScriptCore/jit/JITAllocator.h [deleted file]
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/runtime/ButterflyInlines.h
Source/JavaScriptCore/runtime/DirectArguments.cpp
Source/JavaScriptCore/runtime/GenericArgumentsInlines.h
Source/JavaScriptCore/runtime/HashMapImpl.h
Source/JavaScriptCore/runtime/JSArray.cpp
Source/JavaScriptCore/runtime/JSArray.h
Source/JavaScriptCore/runtime/JSArrayBufferView.cpp
Source/JavaScriptCore/runtime/JSCellInlines.h
Source/JavaScriptCore/runtime/JSGlobalObject.cpp
Source/JavaScriptCore/runtime/JSGlobalObject.h
Source/JavaScriptCore/runtime/JSLock.cpp
Source/JavaScriptCore/runtime/Options.h
Source/JavaScriptCore/runtime/RegExpMatchesArray.h
Source/JavaScriptCore/runtime/VM.cpp
Source/JavaScriptCore/runtime/VM.h
Source/JavaScriptCore/runtime/VMEntryScope.cpp
Source/WTF/ChangeLog
Source/WTF/wtf/Bitmap.h

index c832fc0..3b960d3 100644 (file)
@@ -1,3 +1,17 @@
+2018-01-25  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, rolling out r227592.
+        https://bugs.webkit.org/show_bug.cgi?id=182110
+
+        it made ARM64 (Linux and iOS) crash (Requested by pizlo-mbp on
+        #webkit).
+
+        Reverted changeset:
+
+        "JSC GC should support TLCs (thread local caches)"
+        https://bugs.webkit.org/show_bug.cgi?id=181559
+        https://trac.webkit.org/changeset/227592
+
 2018-01-25  Alejandro G. Castro  <alex@igalia.com>
 
         undefined reference to 'JSC::B3::BasicBlock::fallThrough() const
index de3e290..9cc9ef4 100644 (file)
                0F725CAA1C503DED00AD943A /* B3PureCSE.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F725CA61C503DED00AD943A /* B3PureCSE.h */; };
                0F725CB01C506D3B00AD943A /* B3FoldPathConstants.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F725CAE1C506D3B00AD943A /* B3FoldPathConstants.h */; };
                0F74B93B1F89614800B935D3 /* PrototypeKey.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F74B93A1F89614500B935D3 /* PrototypeKey.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F75A05E200D25F60038E2CF /* ThreadLocalCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A055200D25EF0038E2CF /* ThreadLocalCache.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F75A060200D260B0038E2CF /* LocalAllocatorInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A05A200D25F00038E2CF /* LocalAllocatorInlines.h */; };
-               0F75A061200D26180038E2CF /* LocalAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A057200D25F00038E2CF /* LocalAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F75A062200D261D0038E2CF /* AllocatorInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A05D200D25F10038E2CF /* AllocatorInlines.h */; };
-               0F75A063200D261F0038E2CF /* Allocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A054200D25EF0038E2CF /* Allocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F75A064200D26280038E2CF /* ThreadLocalCacheLayout.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A05C200D25F10038E2CF /* ThreadLocalCacheLayout.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F75A0662013E4F10038E2CF /* JITAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F75A0652013E4EF0038E2CF /* JITAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D2A15A8CC34008F363E /* JITStubRoutineSet.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F766D3015A8DCE2008F363E /* GCAwareJITStubRoutine.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D2E15A8DCDD008F363E /* GCAwareJITStubRoutine.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F766D3115AA8112008F363E /* JITStubRoutine.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D1C15A5028D008F363E /* JITStubRoutine.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F725CAD1C506D3B00AD943A /* B3FoldPathConstants.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = B3FoldPathConstants.cpp; path = b3/B3FoldPathConstants.cpp; sourceTree = "<group>"; };
                0F725CAE1C506D3B00AD943A /* B3FoldPathConstants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = B3FoldPathConstants.h; path = b3/B3FoldPathConstants.h; sourceTree = "<group>"; };
                0F74B93A1F89614500B935D3 /* PrototypeKey.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PrototypeKey.h; sourceTree = "<group>"; };
-               0F75A054200D25EF0038E2CF /* Allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Allocator.h; sourceTree = "<group>"; };
-               0F75A055200D25EF0038E2CF /* ThreadLocalCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalCache.h; sourceTree = "<group>"; };
-               0F75A056200D25EF0038E2CF /* ThreadLocalCacheInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalCacheInlines.h; sourceTree = "<group>"; };
-               0F75A057200D25F00038E2CF /* LocalAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LocalAllocator.h; sourceTree = "<group>"; };
-               0F75A058200D25F00038E2CF /* ThreadLocalCache.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ThreadLocalCache.cpp; sourceTree = "<group>"; };
-               0F75A059200D25F00038E2CF /* LocalAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LocalAllocator.cpp; sourceTree = "<group>"; };
-               0F75A05A200D25F00038E2CF /* LocalAllocatorInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LocalAllocatorInlines.h; sourceTree = "<group>"; };
-               0F75A05B200D25F10038E2CF /* ThreadLocalCacheLayout.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ThreadLocalCacheLayout.cpp; sourceTree = "<group>"; };
-               0F75A05C200D25F10038E2CF /* ThreadLocalCacheLayout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalCacheLayout.h; sourceTree = "<group>"; };
-               0F75A05D200D25F10038E2CF /* AllocatorInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AllocatorInlines.h; sourceTree = "<group>"; };
-               0F75A0652013E4EF0038E2CF /* JITAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITAllocator.h; sourceTree = "<group>"; };
                0F766D1C15A5028D008F363E /* JITStubRoutine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITStubRoutine.h; sourceTree = "<group>"; };
                0F766D2615A8CC1B008F363E /* JITStubRoutine.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITStubRoutine.cpp; sourceTree = "<group>"; };
                0F766D2915A8CC34008F363E /* JITStubRoutineSet.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITStubRoutineSet.cpp; sourceTree = "<group>"; };
                                1429D92E0ED22D7000B89619 /* JIT.h */,
                                FE1220251BE7F5640039E6F2 /* JITAddGenerator.cpp */,
                                FE1220261BE7F5640039E6F2 /* JITAddGenerator.h */,
-                               0F75A0652013E4EF0038E2CF /* JITAllocator.h */,
                                86A90ECF0EE7D51F00AB350D /* JITArithmetic.cpp */,
                                A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */,
                                FE3A06AD1C10CB6F00390FDD /* JITBitAndGenerator.cpp */,
                142E312A134FF0A600AFADB5 /* heap */ = {
                        isa = PBXGroup;
                        children = (
-                               0F75A054200D25EF0038E2CF /* Allocator.h */,
-                               0F75A05D200D25F10038E2CF /* AllocatorInlines.h */,
-                               0F75A059200D25F00038E2CF /* LocalAllocator.cpp */,
-                               0F75A057200D25F00038E2CF /* LocalAllocator.h */,
-                               0F75A05A200D25F00038E2CF /* LocalAllocatorInlines.h */,
-                               0F75A058200D25F00038E2CF /* ThreadLocalCache.cpp */,
-                               0F75A055200D25EF0038E2CF /* ThreadLocalCache.h */,
-                               0F75A056200D25EF0038E2CF /* ThreadLocalCacheInlines.h */,
-                               0F75A05B200D25F10038E2CF /* ThreadLocalCacheLayout.cpp */,
-                               0F75A05C200D25F10038E2CF /* ThreadLocalCacheLayout.h */,
                                0FEC3C501F33A41600F59B6C /* AlignedMemoryAllocator.cpp */,
                                0FEC3C511F33A41600F59B6C /* AlignedMemoryAllocator.h */,
                                0FA7620A1DB959F600B7A2FD /* AllocatingScope.h */,
                                0F2AC5711E8EE4540001EE3F /* AirFormTable.h in Headers */,
                                0FEC85771BDACDC70080FF74 /* AirFrequentedBlock.h in Headers */,
                                0FEC85791BDACDC70080FF74 /* AirGenerate.h in Headers */,
-                               0F75A061200D26180038E2CF /* LocalAllocator.h in Headers */,
                                0FEC857A1BDACDC70080FF74 /* AirGenerationContext.h in Headers */,
                                0FEC857C1BDACDC70080FF74 /* AirHandleCalleeSaves.h in Headers */,
                                0FEC857E1BDACDC70080FF74 /* AirInsertionSet.h in Headers */,
                                0F6B8ADD1C4EFAC300969052 /* B3SSACalculator.h in Headers */,
                                0F33FCF81C136E2500323F67 /* B3StackmapGenerationParams.h in Headers */,
                                0FEC85311BDACDAC0080FF74 /* B3StackmapSpecial.h in Headers */,
-                               0F75A064200D26280038E2CF /* ThreadLocalCacheLayout.h in Headers */,
                                0F338DF21BE93AD10013C88F /* B3StackmapValue.h in Headers */,
                                0F9495881C57F47500413A48 /* B3StackSlot.h in Headers */,
                                0FEC85361BDACDAC0080FF74 /* B3SuccessorCollection.h in Headers */,
                                0F1E3A461534CBAF000F9456 /* DFGArgumentPosition.h in Headers */,
                                0F2DD8121AB3D8BE00BBB8E8 /* DFGArgumentsEliminationPhase.h in Headers */,
                                0F2DD8141AB3D8BE00BBB8E8 /* DFGArgumentsUtilities.h in Headers */,
-                               0F75A063200D261F0038E2CF /* Allocator.h in Headers */,
                                0F485322187750560083B687 /* DFGArithMode.h in Headers */,
                                0F05C3B41683CF9200BAF45B /* DFGArrayifySlowPathGenerator.h in Headers */,
                                0F63948515E4811B006A597C /* DFGArrayMode.h in Headers */,
                                FEA0C4031CDD7D1D00481991 /* FunctionWhitelist.h in Headers */,
                                2AACE63D18CA5A0300ED0191 /* GCActivityCallback.h in Headers */,
                                BCBE2CAE14E985AA000593AD /* GCAssertions.h in Headers */,
-                               0F75A060200D260B0038E2CF /* LocalAllocatorInlines.h in Headers */,
                                0F766D3015A8DCE2008F363E /* GCAwareJITStubRoutine.h in Headers */,
                                0FD0E5EA1E43D34D0006AB08 /* GCConductor.h in Headers */,
                                0FB4767E1D99AEA9008EA6CB /* GCDeferralContext.h in Headers */,
                                0FB7F39A15ED8E4600F167B2 /* IndexingHeaderInlines.h in Headers */,
                                0FB7F39B15ED8E4600F167B2 /* IndexingType.h in Headers */,
                                14386A791DD6989C008652C4 /* IndirectEvalExecutable.h in Headers */,
-                               0F75A062200D261D0038E2CF /* AllocatorInlines.h in Headers */,
                                0FBF92B91FD76FFF00AC28A8 /* InferredStructure.h in Headers */,
                                0FBF92BA1FD7700400AC28A8 /* InferredStructureWatchpoint.h in Headers */,
                                0F0A75231B94BFA900110660 /* InferredType.h in Headers */,
                                E33F50791B84225700413856 /* JSInternalPromiseConstructor.h in Headers */,
                                E33F50871B8449EF00413856 /* JSInternalPromiseConstructor.lut.h in Headers */,
                                E33F50851B8437A000413856 /* JSInternalPromiseDeferred.h in Headers */,
-                               0F75A05E200D25F60038E2CF /* ThreadLocalCache.h in Headers */,
                                E33F50751B8421C000413856 /* JSInternalPromisePrototype.h in Headers */,
                                A503FA1E188E0FB000110F14 /* JSJavaScriptCallFramePrototype.h in Headers */,
                                7013CA8C1B491A9400CAE613 /* JSJob.h in Headers */,
                                14E84FA214EE1ACC00D6D5D4 /* WeakImpl.h in Headers */,
                                14BE7D3317135CF400D1807A /* WeakInlines.h in Headers */,
                                A7CA3AE417DA41AE006538AF /* WeakMapConstructor.h in Headers */,
-                               0F75A0662013E4F10038E2CF /* JITAllocator.h in Headers */,
                                E3A32BC71FC83147007D7E76 /* WeakMapImpl.h in Headers */,
                                E393ADD81FE702D00022D681 /* WeakMapImplInlines.h in Headers */,
                                A7CA3AE617DA41AE006538AF /* WeakMapPrototype.h in Headers */,
index 86f56c1..3cd9a88 100644 (file)
@@ -466,7 +466,6 @@ ftl/FTLThunks.cpp
 ftl/FTLValueRange.cpp
 
 heap/AlignedMemoryAllocator.cpp
-heap/Allocator.cpp
 heap/BlockDirectory.cpp
 heap/CellAttributes.cpp
 heap/CellContainer.cpp
@@ -501,7 +500,6 @@ heap/IsoCellSet.cpp
 heap/IsoSubspace.cpp
 heap/JITStubRoutineSet.cpp
 heap/LargeAllocation.cpp
-heap/LocalAllocator.cpp
 heap/MachineStackMarker.cpp
 heap/MarkStack.cpp
 heap/MarkStackMergingConstraint.cpp
@@ -520,8 +518,6 @@ heap/StopIfNecessaryTimer.cpp
 heap/Subspace.cpp
 heap/SynchronousStopTheWorldMutatorScheduler.cpp
 heap/Synchronousness.cpp
-heap/ThreadLocalCache.cpp
-heap/ThreadLocalCacheLayout.cpp
 heap/VisitRaceKey.cpp
 heap/Weak.cpp
 heap/WeakBlock.cpp
index a08e1b4..850d641 100644 (file)
@@ -1277,12 +1277,6 @@ private:
             case ValueRep::SomeRegister:
                 arg = tmp(value.value());
                 break;
-            case ValueRep::SomeRegisterWithClobber: {
-                Tmp dstTmp = m_code.newTmp(value.value()->resultBank());
-                append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), dstTmp);
-                arg = dstTmp;
-                break;
-            }
             case ValueRep::LateRegister:
             case ValueRep::Register:
                 stackmap->earlyClobbered().clear(value.rep().reg());
index 516917b..b375cdd 100644 (file)
@@ -118,7 +118,6 @@ bool PatchpointSpecial::admitsStack(Inst& inst, unsigned argIndex)
         case ValueRep::StackArgument:
             return true;
         case ValueRep::SomeRegister:
-        case ValueRep::SomeRegisterWithClobber:
         case ValueRep::SomeEarlyRegister:
         case ValueRep::Register:
         case ValueRep::LateRegister:
index 620d6f8..5eb5b9b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -110,9 +110,6 @@ void StackmapSpecial::forEachArgImpl(
             case ValueRep::Constant:
                 role = Arg::Use;
                 break;
-            case ValueRep::SomeRegisterWithClobber:
-                role = Arg::UseDef;
-                break;
             case ValueRep::LateRegister:
                 role = Arg::LateUse;
                 break;
@@ -131,10 +128,6 @@ void StackmapSpecial::forEachArgImpl(
             // be able to recover the stackmap value. So, force LateColdUse to preserve the
             // original stackmap value across the Special operation.
             if (!Arg::isLateUse(role) && optionalDefArgWidth && *optionalDefArgWidth < child.value()->resultWidth()) {
-                // The role can only be some kind of def if we did SomeRegisterWithClobber, which is
-                // only allowed for patchpoints. Patchpoints don't use the defArgWidth feature.
-                RELEASE_ASSERT(!Arg::isAnyDef(role));
-                
                 if (Arg::isWarmUse(role))
                     role = Arg::LateUse;
                 else
@@ -252,7 +245,6 @@ bool StackmapSpecial::isArgValidForRep(Air::Code& code, const Air::Arg& arg, con
         // We already verified by isArgValidForValue().
         return true;
     case ValueRep::SomeRegister:
-    case ValueRep::SomeRegisterWithClobber:
     case ValueRep::SomeEarlyRegister:
         return arg.isTmp();
     case ValueRep::LateRegister:
index 0ebaf8a..9b0db2f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -53,11 +53,6 @@ void StackmapValue::appendSomeRegister(Value* value)
     append(ConstrainedValue(value, ValueRep::SomeRegister));
 }
 
-void StackmapValue::appendSomeRegisterWithClobber(Value* value)
-{
-    append(ConstrainedValue(value, ValueRep::SomeRegisterWithClobber));
-}
-
 void StackmapValue::setConstrainedChild(unsigned index, const ConstrainedValue& constrainedValue)
 {
     child(index) = constrainedValue.value();
index 52b2101..66fc644 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -99,8 +99,7 @@ public:
     // This is a helper for something you might do a lot of: append a value that should be constrained
     // to SomeRegister.
     void appendSomeRegister(Value*);
-    void appendSomeRegisterWithClobber(Value*);
-    
+
     const Vector<ValueRep>& reps() const { return m_reps; }
 
     // Stackmaps allow you to specify that the operation may clobber some registers. Clobbering a register
index bb9de9d..8220eb9 100644 (file)
@@ -432,8 +432,21 @@ public:
                 VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
                 if (value->type() == Void)
                     VALIDATE(value->as<PatchpointValue>()->resultConstraint == ValueRep::WarmAny, ("At ", *value));
-                else
+                else {
+                    switch (value->as<PatchpointValue>()->resultConstraint.kind()) {
+                    case ValueRep::WarmAny:
+                    case ValueRep::SomeRegister:
+                    case ValueRep::SomeEarlyRegister:
+                    case ValueRep::Register:
+                    case ValueRep::StackArgument:
+                        break;
+                    default:
+                        VALIDATE(false, ("At ", *value));
+                        break;
+                    }
+                    
                     validateStackmapConstraint(value, ConstrainedValue(value, value->as<PatchpointValue>()->resultConstraint), ConstraintRole::Def);
+                }
                 validateStackmap(value);
                 break;
             case CheckAdd:
@@ -558,24 +571,16 @@ private:
     {
         switch (value.rep().kind()) {
         case ValueRep::WarmAny:
+        case ValueRep::ColdAny:
+        case ValueRep::LateColdAny:
         case ValueRep::SomeRegister:
         case ValueRep::StackArgument:
             break;
-        case ValueRep::LateColdAny:
-        case ValueRep::ColdAny:
-            VALIDATE(role == ConstraintRole::Use, ("At ", *context, ": ", value));
-            break;
-        case ValueRep::SomeRegisterWithClobber:
-            VALIDATE(role == ConstraintRole::Use, ("At ", *context, ": ", value));
-            VALIDATE(context->as<PatchpointValue>(), ("At ", *context));
-            break;
         case ValueRep::SomeEarlyRegister:
             VALIDATE(role == ConstraintRole::Def, ("At ", *context, ": ", value));
             break;
         case ValueRep::Register:
         case ValueRep::LateRegister:
-            if (value.rep().kind() == ValueRep::LateRegister)
-                VALIDATE(role == ConstraintRole::Use, ("At ", *context, ": ", value));
             if (value.rep().reg().isGPR())
                 VALIDATE(isInt(value.value()->type()), ("At ", *context, ": ", value));
             else
index 45a1113..9888d22 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -40,7 +40,6 @@ void ValueRep::addUsedRegistersTo(RegisterSet& set) const
     case ColdAny:
     case LateColdAny:
     case SomeRegister:
-    case SomeRegisterWithClobber:
     case SomeEarlyRegister:
     case Constant:
         return;
@@ -72,7 +71,6 @@ void ValueRep::dump(PrintStream& out) const
     case ColdAny:
     case LateColdAny:
     case SomeRegister:
-    case SomeRegisterWithClobber:
     case SomeEarlyRegister:
         return;
     case LateRegister:
@@ -177,9 +175,6 @@ void printInternal(PrintStream& out, ValueRep::Kind kind)
     case ValueRep::SomeRegister:
         out.print("SomeRegister");
         return;
-    case ValueRep::SomeRegisterWithClobber:
-        out.print("SomeRegisterWithClobber");
-        return;
     case ValueRep::SomeEarlyRegister:
         out.print("SomeEarlyRegister");
         return;
index ced604e..cbf400a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -65,10 +65,6 @@ public:
         // As an input representation, this means that B3 should pick some register. It could be a
         // register that this claims to clobber!
         SomeRegister,
-        
-        // As an input representation, this means that B3 should pick some register but that this
-        // register is then cobbered with garbage. This only works for patchpoints.
-        SomeRegisterWithClobber,
 
         // As an input representation, this tells us that B3 should pick some register, but implies
         // that the def happens before any of the effects of the stackmap. This is only valid for
@@ -111,7 +107,7 @@ public:
     ValueRep(Kind kind)
         : m_kind(kind)
     {
-        ASSERT(kind == WarmAny || kind == ColdAny || kind == LateColdAny || kind == SomeRegister || kind == SomeRegisterWithClobber || kind == SomeEarlyRegister);
+        ASSERT(kind == WarmAny || kind == ColdAny || kind == LateColdAny || kind == SomeRegister || kind == SomeEarlyRegister);
     }
 
     static ValueRep reg(Reg reg)
index 0ce8679..c8f4a49 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -955,9 +955,15 @@ void AccessCase::generateImpl(AccessGenerationState& state)
             size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
 
             if (allocatingInline) {
-                Allocator allocator = vm.jsValueGigacageAuxiliarySpace.allocatorFor(newSize, AllocatorForMode::AllocatorIfExists);
+                BlockDirectory* allocator = vm.jsValueGigacageAuxiliarySpace.allocatorFor(newSize, AllocatorForMode::AllocatorIfExists);
 
-                jit.emitAllocate(scratchGPR, JITAllocator::constant(allocator), scratchGPR2, scratchGPR3, slowPath);
+                if (!allocator) {
+                    // Yuck, this case would suck!
+                    slowPath.append(jit.jump());
+                }
+
+                jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
+                jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
                 jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
 
                 size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
index 6177191..41e5585 100644 (file)
@@ -43,7 +43,8 @@ public:
     static ptrdiff_t offsetOfInlineCapacity() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_inlineCapacity); }
 
     ObjectAllocationProfile()
-        : m_inlineCapacity(0)
+        : m_allocator(0)
+        , m_inlineCapacity(0)
     {
     }
 
@@ -62,7 +63,7 @@ public:
 
     void clear()
     {
-        m_allocator = Allocator();
+        m_allocator = nullptr;
         m_structure.clear();
         m_inlineCapacity = 0;
         ASSERT(isNull());
@@ -76,7 +77,7 @@ public:
 private:
     unsigned possibleDefaultPropertyCount(VM&, JSObject* prototype);
 
-    Allocator m_allocator; // Precomputed to make things easier for generated code.
+    BlockDirectory* m_allocator; // Precomputed to make things easier for generated code.
     WriteBarrier<Structure> m_structure;
     unsigned m_inlineCapacity;
 };
index 5809d9d..9e21913 100644 (file)
@@ -53,7 +53,7 @@ ALWAYS_INLINE void ObjectAllocationProfile::initializeProfile(VM& vm, JSGlobalOb
 
         if (Structure* structure = executable->cachedPolyProtoStructure()) {
             RELEASE_ASSERT(structure->typeInfo().type() == FinalObjectType);
-            m_allocator = Allocator();
+            m_allocator = nullptr;
             m_structure.set(vm, owner, structure);
             m_inlineCapacity = structure->inlineCapacity();
             return;
@@ -99,11 +99,11 @@ ALWAYS_INLINE void ObjectAllocationProfile::initializeProfile(VM& vm, JSGlobalOb
     ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
 
     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
-    Allocator allocator = vm.cellSpace.allocatorForNonVirtual(allocationSize, AllocatorForMode::EnsureAllocator);
+    BlockDirectory* allocator = vm.cellSpace.allocatorForNonVirtual(allocationSize, AllocatorForMode::EnsureAllocator);
 
     // Take advantage of extra inline capacity available in the size class.
     if (allocator) {
-        size_t slop = (allocator.cellSize(vm.heap) - allocationSize) / sizeof(WriteBarrier<Unknown>);
+        size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier<Unknown>);
         inlineCapacity += slop;
         if (inlineCapacity > JSFinalObject::maxInlineCapacity())
             inlineCapacity = JSFinalObject::maxInlineCapacity();
@@ -113,7 +113,7 @@ ALWAYS_INLINE void ObjectAllocationProfile::initializeProfile(VM& vm, JSGlobalOb
 
     if (isPolyProto) {
         ASSERT(structure->hasPolyProto());
-        m_allocator = Allocator();
+        m_allocator = nullptr;
         executable->setCachedPolyProtoStructure(vm, structure);
     } else {
         if (executable) {
index d2d3e36..841532f 100644 (file)
@@ -113,8 +113,9 @@ void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure
     m_jit.move(TrustedImmPtr(0), storageGPR);
 
     if (size) {
-        if (Allocator allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
-            m_jit.emitAllocate(storageGPR, JITAllocator::constant(allocator), scratchGPR, scratch2GPR, slowCases);
+        if (BlockDirectory* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
+            m_jit.move(TrustedImmPtr(allocator), scratchGPR);
+            m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
             
             m_jit.addPtr(
                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
@@ -127,10 +128,11 @@ void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure
     }
 
     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
-    Allocator allocator = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
-    if (allocator) {
+    BlockDirectory* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+    if (allocatorPtr) {
+        m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
         uint32_t mask = WTF::computeIndexingMask(vectorLength);
-        emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
+        emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
     } else
         slowCases.append(m_jit.jump());
@@ -4206,8 +4208,9 @@ void SpeculativeJIT::compileMakeRope(Node* node)
     GPRReg scratchGPR = scratch.gpr();
     
     JITCompiler::JumpList slowPath;
-    Allocator allocatorValue = subspaceFor<JSRopeString>(*m_jit.vm())->allocatorForNonVirtual(sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
-    emitAllocateJSCell(resultGPR, JITAllocator::constant(allocatorValue), allocatorGPR, TrustedImmPtr(m_jit.graph().registerStructure(m_jit.vm()->stringStructure.get())), scratchGPR, slowPath);
+    BlockDirectory* blockDirectory = subspaceFor<JSRopeString>(*m_jit.vm())->allocatorForNonVirtual(sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
+    m_jit.move(TrustedImmPtr(blockDirectory), allocatorGPR);
+    emitAllocateJSCell(resultGPR, blockDirectory, allocatorGPR, TrustedImmPtr(m_jit.graph().registerStructure(m_jit.vm()->stringStructure.get())), scratchGPR, slowPath);
         
     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
     for (unsigned i = 0; i < numOpGPRs; ++i)
@@ -8381,7 +8384,7 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
     
     size_t size = initialOutOfLineCapacity * sizeof(JSValue);
 
-    Allocator allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
+    BlockDirectory* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
 
     if (!allocator || node->transition()->previous->couldHaveIndexingHeader()) {
         SpeculateCellOperand base(this, node->child1());
@@ -8406,8 +8409,9 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
     GPRReg scratchGPR2 = scratch2.gpr();
     GPRReg scratchGPR3 = scratch3.gpr();
         
+    m_jit.move(TrustedImmPtr(allocator), scratchGPR2);
     JITCompiler::JumpList slowPath;
-    m_jit.emitAllocate(scratchGPR1, JITAllocator::constant(allocator), scratchGPR2, scratchGPR3, slowPath);
+    m_jit.emitAllocate(scratchGPR1, allocator, scratchGPR2, scratchGPR3, slowPath);
     m_jit.addPtr(JITCompiler::TrustedImm32(size + sizeof(IndexingHeader)), scratchGPR1);
     
     for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(size); offset += sizeof(void*))
@@ -8425,7 +8429,7 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
     size_t newSize = oldSize * outOfLineGrowthFactor;
     ASSERT(newSize == node->transition()->next->outOfLineCapacity() * sizeof(JSValue));
     
-    Allocator allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(newSize, AllocatorForMode::AllocatorIfExists);
+    BlockDirectory* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(newSize, AllocatorForMode::AllocatorIfExists);
 
     if (!allocator || node->transition()->previous->couldHaveIndexingHeader()) {
         SpeculateCellOperand base(this, node->child1());
@@ -8453,7 +8457,8 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
     GPRReg scratchGPR3 = scratch3.gpr();
     
     JITCompiler::JumpList slowPath;
-    m_jit.emitAllocate(scratchGPR1, JITAllocator::constant(allocator), scratchGPR2, scratchGPR3, slowPath);
+    m_jit.move(TrustedImmPtr(allocator), scratchGPR2);
+    m_jit.emitAllocate(scratchGPR1, allocator, scratchGPR2, scratchGPR3, slowPath);
     
     m_jit.addPtr(JITCompiler::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR1);
         
@@ -11443,14 +11448,14 @@ void SpeculativeJIT::compileCreateThis(Node* node)
         JITCompiler::Address(calleeGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType)));
     m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR));
-    m_jit.load32(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
+    m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
     m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
 
-    slowPath.append(m_jit.branch32(MacroAssembler::Equal, allocatorGPR, TrustedImm32(Allocator().offset())));
+    slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR));
 
     auto butterfly = TrustedImmPtr(nullptr);
     auto mask = TrustedImm32(0);
-    emitAllocateJSObject(resultGPR, JITAllocator::variable(), allocatorGPR, structureGPR, butterfly, mask, scratchGPR, slowPath);
+    emitAllocateJSObject(resultGPR, nullptr, allocatorGPR, structureGPR, butterfly, mask, scratchGPR, slowPath);
 
     m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
     m_jit.load32(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), inlineCapacityGPR);
@@ -11476,17 +11481,14 @@ void SpeculativeJIT::compileNewObject(Node* node)
 
     RegisteredStructure structure = node->structure();
     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-    Allocator allocatorValue = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+    BlockDirectory* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
 
-    if (!allocatorValue)
-        slowPath.append(m_jit.jump());
-    else {
-        auto butterfly = TrustedImmPtr(nullptr);
-        auto mask = TrustedImm32(0);
-        emitAllocateJSObject(resultGPR, JITAllocator::constant(allocatorValue), allocatorGPR, TrustedImmPtr(structure), butterfly, mask, scratchGPR, slowPath);
-        m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
-        m_jit.mutatorFence(*m_jit.vm());
-    }
+    m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR);
+    auto butterfly = TrustedImmPtr(nullptr);
+    auto mask = TrustedImm32(0);
+    emitAllocateJSObject(resultGPR, allocatorPtr, allocatorGPR, TrustedImmPtr(structure), butterfly, mask, scratchGPR, slowPath);
+    m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
+    m_jit.mutatorFence(*m_jit.vm());
 
     addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure));
 
index b45c89a..808c837 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -3142,7 +3142,7 @@ public:
     // Allocator for a cell of a specific size.
     template <typename StructureType> // StructureType can be GPR or ImmPtr.
     void emitAllocateJSCell(
-        GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure,
+        GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, StructureType structure,
         GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
     {
         m_jit.emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
@@ -3151,7 +3151,7 @@ public:
     // Allocator for an object of a specific size.
     template <typename StructureType, typename StorageType, typename MaskType> // StructureType, StorageType and, MaskType can be GPR or ImmPtr.
     void emitAllocateJSObject(
-        GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure,
+        GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, StructureType structure,
         StorageType storage, MaskType mask, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
     {
         m_jit.emitAllocateJSObject(
index af6eddf..862e735 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -131,7 +131,7 @@ namespace JSC { namespace FTL {
 
 #define FOR_EACH_INDEXED_ABSTRACT_HEAP(macro) \
     macro(ArrayStorage_vector, ArrayStorage::vectorOffset(), sizeof(WriteBarrier<Unknown>)) \
-    macro(CompleteSubspace_allocatorForSizeStep, CompleteSubspace::offsetOfAllocatorForSizeStep(), sizeof(Allocator)) \
+    macro(CompleteSubspace_allocatorForSizeStep, CompleteSubspace::offsetOfAllocatorForSizeStep(), sizeof(BlockDirectory*)) \
     macro(DirectArguments_storage, DirectArguments::storageOffset(), sizeof(EncodedJSValue)) \
     macro(JSLexicalEnvironment_variables, JSLexicalEnvironment::offsetOfVariables(), sizeof(EncodedJSValue)) \
     macro(JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, 0, sizeof(WriteBarrier<JSString>)) \
index 4bf932a..dedd61c 100644 (file)
@@ -5876,10 +5876,10 @@ private:
         
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
         
-        Allocator allocator = subspaceFor<JSRopeString>(vm())->allocatorForNonVirtual(sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
+        BlockDirectory* allocator = subspaceFor<JSRopeString>(vm())->allocatorForNonVirtual(sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
         
         LValue result = allocateCell(
-            m_out.constInt32(allocator.offset()), vm().stringStructure.get(), slowPath);
+            m_out.constIntPtr(allocator), vm().stringStructure.get(), slowPath);
         
         m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSString_value);
         for (unsigned i = 0; i < numKids; ++i)
@@ -9909,7 +9909,7 @@ private:
             
             if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
                 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-                Allocator cellAllocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+                BlockDirectory* cellAllocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
 
                 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
                 unsigned indexingHeaderSize = 0;
@@ -9968,7 +9968,7 @@ private:
 
                 LValue mask = computeButterflyIndexingMask(vectorLength);
                 LValue fastObjectValue = allocateObject(
-                    m_out.constInt32(cellAllocator.offset()), structure, fastButterflyValue, mask, slowPath);
+                    m_out.constIntPtr(cellAllocator), structure, fastButterflyValue, mask, slowPath);
 
                 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
                 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
@@ -10955,8 +10955,8 @@ private:
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
 
         size_t sizeInBytes = sizeInValues * sizeof(JSValue);
-        Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
-        LValue startOfStorage = allocateHeapCell(m_out.constInt32(allocator.offset()), slowPath);
+        BlockDirectory* allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
+        LValue startOfStorage = allocateHeapCell(m_out.constIntPtr(allocator), slowPath);
         ValueFromBlock fastButterfly = m_out.anchor(
             m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
         m_out.jump(continuation);
@@ -11988,28 +11988,16 @@ private:
 
     LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
     {
-        JITAllocator actualAllocator;
-        if (allocator->hasInt32())
-            actualAllocator = JITAllocator::constant(Allocator(allocator->asInt32()));
-        else
-            actualAllocator = JITAllocator::variable();
+        BlockDirectory* actualAllocator = nullptr;
+        if (allocator->hasIntPtr())
+            actualAllocator = bitwise_cast<BlockDirectory*>(allocator->asIntPtr());
         
-        if (actualAllocator.isConstant()) {
-            if (!actualAllocator.allocator()) {
-                LBasicBlock haveAllocator = m_out.newBlock();
-                LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
-                m_out.jump(slowPath);
-                m_out.appendTo(haveAllocator, lastNext);
-                return m_out.intPtrZero;
-            }
-        } else {
+        if (!actualAllocator) {
             // This means that either we know that the allocator is null or we don't know what the
             // allocator is. In either case, we need the null check.
             LBasicBlock haveAllocator = m_out.newBlock();
             LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
-            m_out.branch(
-                m_out.notEqual(allocator, m_out.constInt32(Allocator().offset())),
-                usually(haveAllocator), rarely(slowPath));
+            m_out.branch(allocator, usually(haveAllocator), rarely(slowPath));
             m_out.appendTo(haveAllocator, lastNext);
         }
         
@@ -12019,10 +12007,7 @@ private:
         
         PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
         patchpoint->effects.terminal = true;
-        if (actualAllocator.isConstant())
-            patchpoint->numGPScratchRegisters++;
-        else
-            patchpoint->appendSomeRegisterWithClobber(allocator);
+        patchpoint->appendSomeRegister(allocator);
         patchpoint->numGPScratchRegisters++;
         patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
         
@@ -12033,12 +12018,6 @@ private:
             [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
                 CCallHelpers::JumpList jumpToSlowPath;
                 
-                GPRReg allocatorGPR;
-                if (actualAllocator.isConstant())
-                    allocatorGPR = params.gpScratch(1);
-                else
-                    allocatorGPR = params[1].gpr();
-                
                 // We use a patchpoint to emit the allocation path because whenever we mess with
                 // allocation paths, we already reason about them at the machine code level. We know
                 // exactly what instruction sequence we want. We're confident that no compiler
@@ -12046,7 +12025,7 @@ private:
                 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
                 // all of the compiler tiers.
                 jit.emitAllocateWithNonNullAllocator(
-                    params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
+                    params[0].gpr(), actualAllocator, params[1].gpr(), params.gpScratch(0),
                     jumpToSlowPath);
                 
                 CCallHelpers::Jump jumpToSuccess;
@@ -12138,8 +12117,8 @@ private:
     LValue allocateObject(
         size_t size, StructureType structure, LValue butterfly, LValue indexingMask, LBasicBlock slowPath)
     {
-        Allocator allocator = subspaceFor<ClassType>(vm())->allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
-        return allocateObject(m_out.constInt32(allocator.offset()), structure, butterfly, indexingMask, slowPath);
+        BlockDirectory* allocator = subspaceFor<ClassType>(vm())->allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
+        return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, indexingMask, slowPath);
     }
     
     template<typename ClassType, typename StructureType>
@@ -12158,16 +12137,16 @@ private:
             CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
             size_t actualSize = size->asIntPtr();
             
-            Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
+            BlockDirectory* actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
             if (!actualAllocator) {
                 LBasicBlock continuation = m_out.newBlock();
                 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
                 m_out.jump(slowPath);
                 m_out.appendTo(continuation, lastNext);
-                return m_out.int32Zero;
+                return m_out.intPtrZero;
             }
             
-            return m_out.constInt32(actualAllocator.offset());
+            return m_out.constIntPtr(actualAllocator);
         }
         
         unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
@@ -12186,7 +12165,7 @@ private:
         
         m_out.appendTo(continuation, lastNext);
         
-        return m_out.load32(
+        return m_out.loadPtr(
             m_out.baseIndex(
                 m_heaps.CompleteSubspace_allocatorForSizeStep,
                 subspace, m_out.sub(sizeClassIndex, m_out.intPtrOne)));
@@ -12201,7 +12180,8 @@ private:
     LValue allocateVariableSizedObject(
         LValue size, RegisteredStructure structure, LValue butterfly, LValue butterflyIndexingMask, LBasicBlock slowPath)
     {
-        LValue allocator = allocatorForSize(*subspaceFor<ClassType>(vm()), size, slowPath);
+        LValue allocator = allocatorForSize(
+            *subspaceFor<ClassType>(vm()), size, slowPath);
         return allocateObject(allocator, structure, butterfly, butterflyIndexingMask, slowPath);
     }
 
@@ -12209,14 +12189,15 @@ private:
     LValue allocateVariableSizedCell(
         LValue size, Structure* structure, LBasicBlock slowPath)
     {
-        LValue allocator = allocatorForSize(*subspaceFor<ClassType>(vm()), size, slowPath);
+        LValue allocator = allocatorForSize(
+            *subspaceFor<ClassType>(vm()), size, slowPath);
         return allocateCell(allocator, structure, slowPath);
     }
     
     LValue allocateObject(RegisteredStructure structure)
     {
         size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
-        Allocator allocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+        BlockDirectory* allocator = subspaceFor<JSFinalObject>(vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
         
         // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
         // instead of putting it on the slow path.
@@ -12228,7 +12209,7 @@ private:
         LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
         
         ValueFromBlock fastResult = m_out.anchor(allocateObject(
-            m_out.constInt32(allocator.offset()), structure, m_out.intPtrZero, m_out.int32Zero, slowPath));
+            m_out.constIntPtr(allocator), structure, m_out.intPtrZero, m_out.int32Zero, slowPath));
         
         m_out.jump(continuation);
         
diff --git a/Source/JavaScriptCore/heap/Allocator.cpp b/Source/JavaScriptCore/heap/Allocator.cpp
deleted file mode 100644 (file)
index b1281e4..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "Allocator.h"
-
-#include "Heap.h"
-#include "ThreadLocalCacheLayout.h"
-
-namespace JSC {
-
-unsigned Allocator::cellSize(Heap& heap) const
-{
-    return heap.threadLocalCacheLayout().directory(m_offset)->cellSize();
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/Allocator.h b/Source/JavaScriptCore/heap/Allocator.h
deleted file mode 100644 (file)
index 7c3da48..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "AllocationFailureMode.h"
-#include <climits>
-
-namespace JSC {
-
-class GCDeferralContext;
-class Heap;
-class VM;
-
-class Allocator {
-public:
-    Allocator() { }
-    
-    explicit Allocator(unsigned offset)
-        : m_offset(offset)
-    {
-    }
-    
-    void* allocate(VM&, GCDeferralContext*, AllocationFailureMode) const;
-    
-    // This version calls FailureFunc if we have a null allocator or if the TLC hasn't been resized
-    // to include this allocator.
-    template<typename FailureFunc>
-    void* tryAllocate(VM&, GCDeferralContext*, AllocationFailureMode, const FailureFunc&) const;
-    
-    unsigned cellSize(Heap&) const;
-    
-    unsigned offset() const { return m_offset; }
-    
-    bool operator==(const Allocator& other) const { return m_offset == other.offset(); }
-    bool operator!=(const Allocator& other) const { return !(*this == other); }
-    explicit operator bool() const { return *this != Allocator(); }
-    
-private:
-    unsigned m_offset { UINT_MAX };
-};
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/AllocatorInlines.h b/Source/JavaScriptCore/heap/AllocatorInlines.h
deleted file mode 100644 (file)
index 5f0c73e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "Allocator.h"
-#include "ThreadLocalCache.h"
-
-namespace JSC {
-
-inline void* Allocator::allocate(VM& vm, GCDeferralContext* context, AllocationFailureMode mode) const
-{
-    return ThreadLocalCache::allocator(vm, m_offset).allocate(context, mode);
-}
-
-template<typename FailureFunc>
-void* Allocator::tryAllocate(VM& vm, GCDeferralContext* context, AllocationFailureMode mode, const FailureFunc& failureFunc) const
-{
-    void* result;
-    ThreadLocalCache::tryGetAllocator(
-        vm, m_offset,
-        [&] (LocalAllocator& allocator) {
-            result = allocator.allocate(context, mode);
-        },
-        [&] () {
-            result = failureFunc();
-        });
-    return result;
-}
-
-} // namespace JSC
-
index 6cdbc01..8118468 100644 (file)
@@ -26,6 +26,7 @@
 #include "config.h"
 #include "BlockDirectory.h"
 
+#include "AllocatingScope.h"
 #include "BlockDirectoryInlines.h"
 #include "GCActivityCallback.h"
 #include "Heap.h"
 
 namespace JSC {
 
+static constexpr bool tradeDestructorBlocks = true;
+
 BlockDirectory::BlockDirectory(Heap* heap, size_t cellSize)
-    : m_cellSize(static_cast<unsigned>(cellSize))
+    : m_freeList(cellSize)
+    , m_currentBlock(0)
+    , m_lastActiveBlock(0)
+    , m_cellSize(static_cast<unsigned>(cellSize))
     , m_heap(heap)
 {
-    heap->threadLocalCacheLayout().allocateOffset(this);
 }
 
 void BlockDirectory::setSubspace(Subspace* subspace)
@@ -76,14 +81,143 @@ MarkedBlock::Handle* BlockDirectory::findEmptyBlockToSteal()
     return m_blocks[m_emptyCursor];
 }
 
-MarkedBlock::Handle* BlockDirectory::findBlockForAllocation()
+void BlockDirectory::didConsumeFreeList()
+{
+    if (m_currentBlock)
+        m_currentBlock->didConsumeFreeList();
+    
+    m_freeList.clear();
+    m_currentBlock = nullptr;
+}
+
+void* BlockDirectory::tryAllocateWithoutCollecting()
 {
-    m_allocationCursor = (m_canAllocateButNotEmpty | m_empty).findBit(m_allocationCursor, true);
-    if (m_allocationCursor >= m_blocks.size())
+    SuperSamplerScope superSamplerScope(false);
+    
+    ASSERT(!m_currentBlock);
+    ASSERT(m_freeList.allocationWillFail());
+    
+    for (;;) {
+        m_allocationCursor = (m_canAllocateButNotEmpty | m_empty).findBit(m_allocationCursor, true);
+        if (m_allocationCursor >= m_blocks.size())
+            break;
+        
+        setIsCanAllocateButNotEmpty(NoLockingNecessary, m_allocationCursor, false);
+
+        if (void* result = tryAllocateIn(m_blocks[m_allocationCursor]))
+            return result;
+    }
+    
+    if (Options::stealEmptyBlocksFromOtherAllocators()
+        && (tradeDestructorBlocks || !needsDestruction())) {
+        if (MarkedBlock::Handle* block = m_subspace->findEmptyBlockToSteal()) {
+            RELEASE_ASSERT(block->alignedMemoryAllocator() == m_subspace->alignedMemoryAllocator());
+            
+            block->sweep(nullptr);
+            
+            // It's good that this clears canAllocateButNotEmpty as well as all other bits,
+            // because there is a remote chance that a block may have both canAllocateButNotEmpty
+            // and empty set at the same time.
+            block->removeFromDirectory();
+            addBlock(block);
+            return allocateIn(block);
+        }
+    }
+    
+    return nullptr;
+}
+
+void* BlockDirectory::allocateIn(MarkedBlock::Handle* block)
+{
+    void* result = tryAllocateIn(block);
+    RELEASE_ASSERT(result);
+    return result;
+}
+
+void* BlockDirectory::tryAllocateIn(MarkedBlock::Handle* block)
+{
+    ASSERT(block);
+    ASSERT(!block->isFreeListed());
+    
+    block->sweep(&m_freeList);
+    
+    // It's possible to stumble on a completely full block. Marking tries to retire these, but
+    // that algorithm is racy and may forget to do it sometimes.
+    if (m_freeList.allocationWillFail()) {
+        ASSERT(block->isFreeListed());
+        block->unsweepWithNoNewlyAllocated();
+        ASSERT(!block->isFreeListed());
+        ASSERT(!isEmpty(NoLockingNecessary, block));
+        ASSERT(!isCanAllocateButNotEmpty(NoLockingNecessary, block));
         return nullptr;
+    }
+    
+    m_currentBlock = block;
+    
+    void* result = m_freeList.allocate(
+        [] () -> HeapCell* {
+            RELEASE_ASSERT_NOT_REACHED();
+            return nullptr;
+        });
+    setIsEden(NoLockingNecessary, m_currentBlock, true);
+    markedSpace().didAllocateInBlock(m_currentBlock);
+    return result;
+}
+
+ALWAYS_INLINE void BlockDirectory::doTestCollectionsIfNeeded(GCDeferralContext* deferralContext)
+{
+    if (!Options::slowPathAllocsBetweenGCs())
+        return;
+
+    static unsigned allocationCount = 0;
+    if (!allocationCount) {
+        if (!m_heap->isDeferred()) {
+            if (deferralContext)
+                deferralContext->m_shouldGC = true;
+            else
+                m_heap->collectNow(Sync, CollectionScope::Full);
+        }
+    }
+    if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
+        allocationCount = 0;
+}
+
+void* BlockDirectory::allocateSlowCase(GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+{
+    SuperSamplerScope superSamplerScope(false);
+    ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
+    doTestCollectionsIfNeeded(deferralContext);
+
+    ASSERT(!markedSpace().isIterating());
+    m_heap->didAllocate(m_freeList.originalSize());
+    
+    didConsumeFreeList();
+    
+    AllocatingScope helpingHeap(*m_heap);
+
+    m_heap->collectIfNecessaryOrDefer(deferralContext);
     
-    setIsCanAllocateButNotEmpty(NoLockingNecessary, m_allocationCursor, false);
-    return m_blocks[m_allocationCursor];
+    // Goofy corner case: the GC called a callback and now this directory has a currentBlock. This only
+    // happens when running WebKit tests, which inject a callback into the GC's finalization.
+    if (UNLIKELY(m_currentBlock))
+        return allocate(deferralContext, failureMode);
+    
+    void* result = tryAllocateWithoutCollecting();
+    
+    if (LIKELY(result != 0))
+        return result;
+    
+    MarkedBlock::Handle* block = tryAllocateBlock();
+    if (!block) {
+        if (failureMode == AllocationFailureMode::Assert)
+            RELEASE_ASSERT_NOT_REACHED();
+        else
+            return nullptr;
+    }
+    addBlock(block);
+    result = allocateIn(block);
+    ASSERT(result);
+    return result;
 }
 
 static size_t blockHeaderSize()
@@ -179,19 +313,24 @@ void BlockDirectory::stopAllocating()
 {
     if (false)
         dataLog(RawPointer(this), ": BlockDirectory::stopAllocating!\n");
-    m_localAllocators.forEach(
-        [&] (LocalAllocator* allocator) {
-            allocator->stopAllocating();
-        });
+    ASSERT(!m_lastActiveBlock);
+    if (!m_currentBlock) {
+        ASSERT(m_freeList.allocationWillFail());
+        return;
+    }
+    
+    m_currentBlock->stopAllocating(m_freeList);
+    m_lastActiveBlock = m_currentBlock;
+    m_currentBlock = 0;
+    m_freeList.clear();
 }
 
 void BlockDirectory::prepareForAllocation()
 {
-    m_localAllocators.forEach(
-        [&] (LocalAllocator* allocator) {
-            allocator->prepareForAllocation();
-        });
-    
+    m_lastActiveBlock = nullptr;
+    m_currentBlock = nullptr;
+    m_freeList.clear();
+
     m_allocationCursor = 0;
     m_emptyCursor = 0;
     m_unsweptCursor = 0;
@@ -205,21 +344,6 @@ void BlockDirectory::prepareForAllocation()
     }
 }
 
-void BlockDirectory::stopAllocatingForGood()
-{
-    if (false)
-        dataLog(RawPointer(this), ": BlockDirectory::stopAllocatingForGood!\n");
-    
-    m_localAllocators.forEach(
-        [&] (LocalAllocator* allocator) {
-            allocator->stopAllocatingForGood();
-        });
-
-    auto locker = holdLock(m_localAllocatorsLock);
-    while (!m_localAllocators.isEmpty())
-        m_localAllocators.begin()->remove();
-}
-
 void BlockDirectory::lastChanceToFinalize()
 {
     forEachBlock(
@@ -230,10 +354,12 @@ void BlockDirectory::lastChanceToFinalize()
 
 void BlockDirectory::resumeAllocating()
 {
-    m_localAllocators.forEach(
-        [&] (LocalAllocator* allocator) {
-            allocator->resumeAllocating();
-        });
+    if (!m_lastActiveBlock)
+        return;
+
+    m_lastActiveBlock->resumeAllocating(m_freeList);
+    m_currentBlock = m_lastActiveBlock;
+    m_lastActiveBlock = nullptr;
 }
 
 void BlockDirectory::beginMarkingForFullCollection()
@@ -253,7 +379,7 @@ void BlockDirectory::endMarking()
     // know what kind of collection it is. That knowledge is already encoded in the m_markingXYZ
     // vectors.
     
-    if (!Options::tradeDestructorBlocks() && needsDestruction()) {
+    if (!tradeDestructorBlocks && needsDestruction()) {
         ASSERT(m_empty.isEmpty());
         m_canAllocateButNotEmpty = m_live & ~m_markingRetired;
     } else {
@@ -386,15 +512,5 @@ MarkedSpace& BlockDirectory::markedSpace() const
     return m_subspace->space();
 }
 
-bool BlockDirectory::isFreeListedCell(const void* target)
-{
-    bool result = false;
-    m_localAllocators.forEach(
-        [&] (LocalAllocator* allocator) {
-            result |= allocator->isFreeListedCell(target);
-        });
-    return result;
-}
-
 } // namespace JSC
 
index 636164f..cd0d22b 100644 (file)
 #pragma once
 
 #include "AllocationFailureMode.h"
-#include "Allocator.h"
 #include "CellAttributes.h"
 #include "FreeList.h"
-#include "LocalAllocator.h"
 #include "MarkedBlock.h"
 #include <wtf/DataLog.h>
 #include <wtf/FastBitVector.h>
@@ -43,7 +41,6 @@ class Heap;
 class IsoCellSet;
 class MarkedSpace;
 class LLIntOffsetsExtractor;
-class ThreadLocalCacheLayout;
 
 #define FOR_EACH_BLOCK_DIRECTORY_BIT(macro) \
     macro(live, Live) /* The set of block indices that have actual blocks. */\
@@ -79,12 +76,14 @@ class BlockDirectory {
     friend class LLIntOffsetsExtractor;
 
 public:
+    static ptrdiff_t offsetOfFreeList();
+    static ptrdiff_t offsetOfCellSize();
+
     BlockDirectory(Heap*, size_t cellSize);
     void setSubspace(Subspace*);
     void lastChanceToFinalize();
     void prepareForAllocation();
     void stopAllocating();
-    void stopAllocatingForGood();
     void resumeAllocating();
     void beginMarkingForFullCollection();
     void endMarking();
@@ -98,9 +97,10 @@ public:
     bool needsDestruction() const { return m_attributes.destruction == NeedsDestruction; }
     DestructionMode destruction() const { return m_attributes.destruction; }
     HeapCell::Kind cellKind() const { return m_attributes.cellKind; }
+    void* allocate(GCDeferralContext*, AllocationFailureMode);
     Heap* heap() { return m_heap; }
 
-    bool isFreeListedCell(const void* target);
+    bool isFreeListedCell(const void* target) const;
 
     template<typename Functor> void forEachBlock(const Functor&);
     template<typename Functor> void forEachNotEmptyBlock(const Functor&);
@@ -157,20 +157,24 @@ public:
     Subspace* subspace() const { return m_subspace; }
     MarkedSpace& markedSpace() const;
     
-    Allocator allocator() const { return Allocator(m_tlcOffset); }
+    const FreeList& freeList() const { return m_freeList; }
     
     void dump(PrintStream&) const;
     void dumpBits(PrintStream& = WTF::dataFile());
     
 private:
-    friend class LocalAllocator;
     friend class IsoCellSet;
     friend class MarkedBlock;
-    friend class ThreadLocalCacheLayout;
-    
-    MarkedBlock::Handle* findBlockForAllocation();
     
+    JS_EXPORT_PRIVATE void* allocateSlowCase(GCDeferralContext*, AllocationFailureMode failureMode);
+    void didConsumeFreeList();
+    void* tryAllocateWithoutCollecting();
     MarkedBlock::Handle* tryAllocateBlock();
+    void* tryAllocateIn(MarkedBlock::Handle*);
+    void* allocateIn(MarkedBlock::Handle*);
+    ALWAYS_INLINE void doTestCollectionsIfNeeded(GCDeferralContext*);
+    
+    FreeList m_freeList;
     
     Vector<MarkedBlock::Handle*> m_blocks;
     Vector<unsigned> m_freeBlockIndices;
@@ -189,6 +193,10 @@ private:
     size_t m_emptyCursor { 0 }; // Points to the next block that is a candidate for empty allocation (allocating in empty blocks).
     size_t m_unsweptCursor { 0 }; // Points to the next block that is a candidate for incremental sweeping.
     
+    MarkedBlock::Handle* m_currentBlock;
+    MarkedBlock::Handle* m_lastActiveBlock;
+
+    Lock m_lock;
     unsigned m_cellSize;
     CellAttributes m_attributes;
     // FIXME: All of these should probably be references.
@@ -198,10 +206,16 @@ private:
     BlockDirectory* m_nextDirectory { nullptr };
     BlockDirectory* m_nextDirectoryInSubspace { nullptr };
     BlockDirectory* m_nextDirectoryInAlignedMemoryAllocator { nullptr };
-    
-    Lock m_localAllocatorsLock;
-    size_t m_tlcOffset;
-    SentinelLinkedList<LocalAllocator, BasicRawSentinelNode<LocalAllocator>> m_localAllocators;
 };
 
+inline ptrdiff_t BlockDirectory::offsetOfFreeList()
+{
+    return OBJECT_OFFSETOF(BlockDirectory, m_freeList);
+}
+
+inline ptrdiff_t BlockDirectory::offsetOfCellSize()
+{
+    return OBJECT_OFFSETOF(BlockDirectory, m_cellSize);
+}
+
 } // namespace JSC
index 41cbb49..5001a42 100644 (file)
 
 namespace JSC {
 
+inline bool BlockDirectory::isFreeListedCell(const void* target) const
+{
+    return m_freeList.contains(bitwise_cast<HeapCell*>(target));
+}
+
+ALWAYS_INLINE void* BlockDirectory::allocate(GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+{
+    return m_freeList.allocate(
+        [&] () -> HeapCell* {
+            sanitizeStackForVM(heap()->vm());
+            return static_cast<HeapCell*>(allocateSlowCase(deferralContext, failureMode));
+        });
+}
+
 template <typename Functor> inline void BlockDirectory::forEachBlock(const Functor& functor)
 {
     m_live.forEachSetBit(
index bb04c54..b35a83b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #include "config.h"
 #include "Subspace.h"
 
-#include "AlignedMemoryAllocator.h"
-#include "AllocatorInlines.h"
 #include "BlockDirectoryInlines.h"
 #include "JSCInlines.h"
-#include "LocalAllocatorInlines.h"
 #include "MarkedBlockInlines.h"
 #include "PreventCollectionScope.h"
 #include "SubspaceInlines.h"
-#include "ThreadLocalCacheInlines.h"
 
 namespace JSC {
 
@@ -42,38 +38,40 @@ CompleteSubspace::CompleteSubspace(CString name, Heap& heap, HeapCellType* heapC
     : Subspace(name, heap)
 {
     initialize(heapCellType, alignedMemoryAllocator);
+    for (size_t i = MarkedSpace::numSizeClasses; i--;)
+        m_allocatorForSizeStep[i] = nullptr;
 }
 
 CompleteSubspace::~CompleteSubspace()
 {
 }
 
-Allocator CompleteSubspace::allocatorFor(size_t size, AllocatorForMode mode)
+BlockDirectory* CompleteSubspace::allocatorFor(size_t size, AllocatorForMode mode)
 {
     return allocatorForNonVirtual(size, mode);
 }
 
-void* CompleteSubspace::allocate(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+void* CompleteSubspace::allocate(size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
-    return allocateNonVirtual(vm, size, deferralContext, failureMode);
+    return allocateNonVirtual(size, deferralContext, failureMode);
 }
 
-void* CompleteSubspace::allocateNonVirtual(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+void* CompleteSubspace::allocateNonVirtual(size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
-    Allocator allocator = allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
-    return allocator.tryAllocate(
-        vm, deferralContext, failureMode,
-        [&] () {
-            return allocateSlow(vm, size, deferralContext, failureMode);
-        });
+    void *result;
+    if (BlockDirectory* allocator = allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists))
+        result = allocator->allocate(deferralContext, failureMode);
+    else
+        result = allocateSlow(size, deferralContext, failureMode);
+    return result;
 }
 
-Allocator CompleteSubspace::allocatorForSlow(size_t size)
+BlockDirectory* CompleteSubspace::allocatorForSlow(size_t size)
 {
     size_t index = MarkedSpace::sizeClassToIndex(size);
     size_t sizeClass = MarkedSpace::s_sizeClassForSizeStep[index];
     if (!sizeClass)
-        return Allocator();
+        return nullptr;
     
     // This is written in such a way that it's OK for the JIT threads to end up here if they want
     // to generate code that uses some allocator that hadn't been used yet. Note that a possibly-
@@ -84,7 +82,7 @@ Allocator CompleteSubspace::allocatorForSlow(size_t size)
     // that any "forEachAllocator" traversals will only see this allocator after it's initialized
     // enough: it will have 
     auto locker = holdLock(m_space.directoryLock());
-    if (Allocator allocator = m_allocatorForSizeStep[index])
+    if (BlockDirectory* allocator = m_allocatorForSizeStep[index])
         return allocator;
 
     if (false)
@@ -100,7 +98,7 @@ Allocator CompleteSubspace::allocatorForSlow(size_t size)
         if (MarkedSpace::s_sizeClassForSizeStep[index] != sizeClass)
             break;
 
-        m_allocatorForSizeStep[index] = directory->allocator();
+        m_allocatorForSizeStep[index] = directory;
         
         if (!index--)
             break;
@@ -109,23 +107,23 @@ Allocator CompleteSubspace::allocatorForSlow(size_t size)
     m_alignedMemoryAllocator->registerDirectory(directory);
     WTF::storeStoreFence();
     m_firstDirectory = directory;
-    return directory->allocator();
+    return directory;
 }
 
-void* CompleteSubspace::allocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+void* CompleteSubspace::allocateSlow(size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
-    void* result = tryAllocateSlow(vm, size, deferralContext);
+    void* result = tryAllocateSlow(size, deferralContext);
     if (failureMode == AllocationFailureMode::Assert)
         RELEASE_ASSERT(result);
     return result;
 }
 
-void* CompleteSubspace::tryAllocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext)
+void* CompleteSubspace::tryAllocateSlow(size_t size, GCDeferralContext* deferralContext)
 {
-    sanitizeStackForVM(&vm);
+    sanitizeStackForVM(m_space.heap()->vm());
     
-    if (Allocator allocator = allocatorFor(size, AllocatorForMode::EnsureAllocator))
-        return allocator.allocate(vm, deferralContext, AllocationFailureMode::ReturnNull);
+    if (BlockDirectory* allocator = allocatorFor(size, AllocatorForMode::EnsureAllocator))
+        return allocator->allocate(deferralContext, AllocationFailureMode::ReturnNull);
     
     if (size <= Options::largeAllocationCutoff()
         && size <= MarkedSpace::largeCutoff) {
@@ -134,15 +132,15 @@ void* CompleteSubspace::tryAllocateSlow(VM& vm, size_t size, GCDeferralContext*
         RELEASE_ASSERT_NOT_REACHED();
     }
     
-    vm.heap.collectIfNecessaryOrDefer(deferralContext);
+    m_space.heap()->collectIfNecessaryOrDefer(deferralContext);
     
     size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
-    LargeAllocation* allocation = LargeAllocation::tryCreate(vm.heap, size, this);
+    LargeAllocation* allocation = LargeAllocation::tryCreate(*m_space.m_heap, size, this);
     if (!allocation)
         return nullptr;
     
     m_space.m_largeAllocations.append(allocation);
-    vm.heap.didAllocate(size);
+    m_space.m_heap->didAllocate(size);
     m_space.m_capacity += size;
     
     m_largeAllocations.append(allocation);
index e0c30d4..475d4fa 100644 (file)
@@ -39,31 +39,31 @@ public:
     
     // FIXME: Currently subspaces speak of BlockDirectories as "allocators", but that's temporary.
     // https://bugs.webkit.org/show_bug.cgi?id=181559
-    Allocator allocatorFor(size_t, AllocatorForMode) override;
-    Allocator allocatorForNonVirtual(size_t, AllocatorForMode);
+    BlockDirectory* allocatorFor(size_t, AllocatorForMode) override;
+    BlockDirectory* allocatorForNonVirtual(size_t, AllocatorForMode);
     
-    void* allocate(VM&, size_t, GCDeferralContext*, AllocationFailureMode) override;
-    JS_EXPORT_PRIVATE void* allocateNonVirtual(VM&, size_t, GCDeferralContext*, AllocationFailureMode);
+    void* allocate(size_t, GCDeferralContext*, AllocationFailureMode) override;
+    JS_EXPORT_PRIVATE void* allocateNonVirtual(size_t, GCDeferralContext*, AllocationFailureMode);
     
     static ptrdiff_t offsetOfAllocatorForSizeStep() { return OBJECT_OFFSETOF(CompleteSubspace, m_allocatorForSizeStep); }
     
-    Allocator* allocatorForSizeStep() { return &m_allocatorForSizeStep[0]; }
+    BlockDirectory** allocatorForSizeStep() { return &m_allocatorForSizeStep[0]; }
 
 private:
-    Allocator allocatorForSlow(size_t);
+    BlockDirectory* allocatorForSlow(size_t);
     
     // These slow paths are concerned with large allocations and allocator creation.
-    void* allocateSlow(VM&, size_t, GCDeferralContext*, AllocationFailureMode);
-    void* tryAllocateSlow(VM&, size_t, GCDeferralContext*);
+    void* allocateSlow(size_t, GCDeferralContext*, AllocationFailureMode);
+    void* tryAllocateSlow(size_t, GCDeferralContext*);
     
-    std::array<Allocator, MarkedSpace::numSizeClasses> m_allocatorForSizeStep;
+    std::array<BlockDirectory*, MarkedSpace::numSizeClasses> m_allocatorForSizeStep;
     Vector<std::unique_ptr<BlockDirectory>> m_directories;
 };
 
-ALWAYS_INLINE Allocator CompleteSubspace::allocatorForNonVirtual(size_t size, AllocatorForMode mode)
+ALWAYS_INLINE BlockDirectory* CompleteSubspace::allocatorForNonVirtual(size_t size, AllocatorForMode mode)
 {
     if (size <= MarkedSpace::largeCutoff) {
-        Allocator result = m_allocatorForSizeStep[MarkedSpace::sizeClassToIndex(size)];
+        BlockDirectory* result = m_allocatorForSizeStep[MarkedSpace::sizeClassToIndex(size)];
         switch (mode) {
         case AllocatorForMode::MustAlreadyHaveAllocator:
             RELEASE_ASSERT(result);
@@ -78,7 +78,7 @@ ALWAYS_INLINE Allocator CompleteSubspace::allocatorForNonVirtual(size_t size, Al
         return result;
     }
     RELEASE_ASSERT(mode != AllocatorForMode::MustAlreadyHaveAllocator);
-    return Allocator();
+    return nullptr;
 }
 
 } // namespace JSC
index e3ecfb8..7a8edd4 100644 (file)
@@ -57,6 +57,8 @@ struct FreeCell {
 };
 
 class FreeList {
+    WTF_MAKE_NONCOPYABLE(FreeList);
+    
 public:
     FreeList(unsigned cellSize);
     ~FreeList();
index c60dd0a..95e710f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -29,12 +29,10 @@ namespace JSC {
 
 class Heap;
 class BlockDirectory;
-class LocalAllocator;
 
 class GCDeferralContext {
     friend class Heap;
     friend class BlockDirectory;
-    friend class LocalAllocator;
 public:
     inline GCDeferralContext(Heap&);
     inline ~GCDeferralContext();
index 988be1d..76b5c68 100644 (file)
@@ -68,7 +68,6 @@
 #include "StopIfNecessaryTimer.h"
 #include "SweepingScope.h"
 #include "SynchronousStopTheWorldMutatorScheduler.h"
-#include "ThreadLocalCacheLayout.h"
 #include "TypeProfiler.h"
 #include "TypeProfilerLog.h"
 #include "UnlinkedCodeBlock.h"
@@ -313,7 +312,6 @@ Heap::Heap(VM* vm, HeapType heapType)
     , m_helperClient(&heapHelperPool())
     , m_threadLock(Box<Lock>::create())
     , m_threadCondition(AutomaticThreadCondition::create())
-    , m_threadLocalCacheLayout(std::make_unique<ThreadLocalCacheLayout>())
 {
     m_worldState.store(0);
 
@@ -448,7 +446,7 @@ void Heap::lastChanceToFinalize()
         dataLog("5 ");
     
     m_arrayBuffers.lastChanceToFinalize();
-    m_objectSpace.stopAllocatingForGood();
+    m_objectSpace.stopAllocating();
     m_objectSpace.lastChanceToFinalize();
     releaseDelayedReleasedObjects();
 
index a08471b..58e1a63 100644 (file)
@@ -84,7 +84,6 @@ class SlotVisitor;
 class SpaceTimeMutatorScheduler;
 class StopIfNecessaryTimer;
 class SweepingScope;
-class ThreadLocalCacheLayout;
 class VM;
 class WeakGCMapBase;
 struct CurrentThreadState;
@@ -373,8 +372,6 @@ public:
 
     template<typename Func>
     void forEachSlotVisitor(const Func&);
-    
-    ThreadLocalCacheLayout& threadLocalCacheLayout() { return *m_threadLocalCacheLayout; }
 
 private:
     friend class AllocatingScope;
@@ -717,8 +714,6 @@ private:
     
     CurrentThreadState* m_currentThreadState { nullptr };
     WTF::Thread* m_currentThread { nullptr }; // It's OK if this becomes a dangling pointer.
-    
-    std::unique_ptr<ThreadLocalCacheLayout> m_threadLocalCacheLayout;
 };
 
 } // namespace JSC
index 16c9cbb..38695f2 100644 (file)
 
 #pragma once
 
-#include "MarkedBlock.h"
 #include <wtf/Bitmap.h>
 #include <wtf/ConcurrentVector.h>
 #include <wtf/FastBitVector.h>
-#include <wtf/SentinelLinkedList.h>
-#include <wtf/SharedTask.h>
 
 namespace JSC {
 
index 8174b54..194936f 100644 (file)
 #include "config.h"
 #include "IsoSubspace.h"
 
-#include "AllocatorInlines.h"
 #include "BlockDirectoryInlines.h"
 #include "IsoAlignedMemoryAllocator.h"
-#include "LocalAllocatorInlines.h"
-#include "ThreadLocalCacheInlines.h"
 
 namespace JSC {
 
@@ -38,7 +35,6 @@ IsoSubspace::IsoSubspace(CString name, Heap& heap, HeapCellType* heapCellType, s
     : Subspace(name, heap)
     , m_size(size)
     , m_directory(&heap, WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(size))
-    , m_allocator(m_directory.allocator())
     , m_isoAlignedMemoryAllocator(std::make_unique<IsoAlignedMemoryAllocator>())
 {
     initialize(heapCellType, m_isoAlignedMemoryAllocator.get());
@@ -54,20 +50,20 @@ IsoSubspace::~IsoSubspace()
 {
 }
 
-Allocator IsoSubspace::allocatorFor(size_t size, AllocatorForMode mode)
+BlockDirectory* IsoSubspace::allocatorFor(size_t size, AllocatorForMode mode)
 {
     return allocatorForNonVirtual(size, mode);
 }
 
-void* IsoSubspace::allocate(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+void* IsoSubspace::allocate(size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
-    return allocateNonVirtual(vm, size, deferralContext, failureMode);
+    return allocateNonVirtual(size, deferralContext, failureMode);
 }
 
-void* IsoSubspace::allocateNonVirtual(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
+void* IsoSubspace::allocateNonVirtual(size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
     RELEASE_ASSERT(size == this->size());
-    void* result = m_allocator.allocate(vm, deferralContext, failureMode);
+    void* result = m_directory.allocate(deferralContext, failureMode);
     return result;
 }
 
index bafe99d..374f31d 100644 (file)
@@ -41,11 +41,11 @@ public:
 
     size_t size() const { return m_size; }
 
-    Allocator allocatorFor(size_t, AllocatorForMode) override;
-    Allocator allocatorForNonVirtual(size_t, AllocatorForMode);
+    BlockDirectory* allocatorFor(size_t, AllocatorForMode) override;
+    BlockDirectory* allocatorForNonVirtual(size_t, AllocatorForMode);
 
-    void* allocate(VM&, size_t, GCDeferralContext*, AllocationFailureMode) override;
-    JS_EXPORT_PRIVATE void* allocateNonVirtual(VM&, size_t, GCDeferralContext*, AllocationFailureMode);
+    void* allocate(size_t, GCDeferralContext*, AllocationFailureMode) override;
+    JS_EXPORT_PRIVATE void* allocateNonVirtual(size_t, GCDeferralContext*, AllocationFailureMode);
 
 private:
     friend class IsoCellSet;
@@ -56,15 +56,14 @@ private:
     
     size_t m_size;
     BlockDirectory m_directory;
-    Allocator m_allocator;
     std::unique_ptr<IsoAlignedMemoryAllocator> m_isoAlignedMemoryAllocator;
     SentinelLinkedList<IsoCellSet, BasicRawSentinelNode<IsoCellSet>> m_cellSets;
 };
 
-inline Allocator IsoSubspace::allocatorForNonVirtual(size_t size, AllocatorForMode)
+inline BlockDirectory* IsoSubspace::allocatorForNonVirtual(size_t size, AllocatorForMode)
 {
     RELEASE_ASSERT(size == this->size());
-    return m_allocator;
+    return &m_directory;
 }
 
 #define ISO_SUBSPACE_INIT(heap, heapCellType, type) ("Isolated " #type " Space", (heap), (heapCellType), sizeof(type))
diff --git a/Source/JavaScriptCore/heap/LocalAllocator.cpp b/Source/JavaScriptCore/heap/LocalAllocator.cpp
deleted file mode 100644 (file)
index 6e38f32..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "LocalAllocator.h"
-
-#include "AllocatingScope.h"
-#include "LocalAllocatorInlines.h"
-#include "Options.h"
-
-namespace JSC {
-
-LocalAllocator::LocalAllocator(BlockDirectory* directory)
-    : m_directory(directory)
-    , m_cellSize(directory->m_cellSize)
-    , m_freeList(m_cellSize)
-{
-    auto locker = holdLock(directory->m_localAllocatorsLock);
-    directory->m_localAllocators.append(this);
-}
-
-LocalAllocator::LocalAllocator(LocalAllocator&& other)
-    : m_directory(other.m_directory)
-    , m_cellSize(other.m_cellSize)
-    , m_freeList(WTFMove(other.m_freeList))
-    , m_currentBlock(other.m_currentBlock)
-    , m_lastActiveBlock(other.m_lastActiveBlock)
-{
-    other.reset();
-    if (other.isOnList()) {
-        auto locker = holdLock(m_directory->m_localAllocatorsLock);
-        other.remove();
-        m_directory->m_localAllocators.append(this);
-    }
-}
-
-void LocalAllocator::reset()
-{
-    m_freeList.clear();
-    m_currentBlock = nullptr;
-    m_lastActiveBlock = nullptr;
-}
-
-LocalAllocator::~LocalAllocator()
-{
-    if (isOnList()) {
-        auto locker = holdLock(m_directory->m_localAllocatorsLock);
-        remove();
-    }
-    
-    // Assert that this allocator isn't holding onto any memory. This is a valid assertion for the
-    // following two use cases:
-    // 
-    // - Immortal TLC. Those destruct after the heap is done destructing, so they should not have
-    //   any state left in them.
-    //
-    // - TLC owned by an object. Such a TLC gets destroyed after a GC flip during which we proved
-    //   that it is not reachable. Therefore, the TLC should still be in a fully reset state at the
-    //   time of destruction because for it to get into any other state, someone must have allocated
-    //   in it (which is impossible because it's supposedly unreachable).
-    bool ok = true;
-    if (!m_freeList.allocationWillFail()) {
-        dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-empty free-list.\n");
-        ok = false;
-    }
-    if (m_currentBlock) {
-        dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null current block.\n");
-        ok = false;
-    }
-    if (m_lastActiveBlock) {
-        dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null last active block.\n");
-        ok = false;
-    }
-    RELEASE_ASSERT(ok);
-}
-
-void LocalAllocator::stopAllocating()
-{
-    ASSERT(!m_lastActiveBlock);
-    if (!m_currentBlock) {
-        ASSERT(m_freeList.allocationWillFail());
-        return;
-    }
-    
-    m_currentBlock->stopAllocating(m_freeList);
-    m_lastActiveBlock = m_currentBlock;
-    m_currentBlock = nullptr;
-    m_freeList.clear();
-}
-
-void LocalAllocator::resumeAllocating()
-{
-    if (!m_lastActiveBlock)
-        return;
-
-    m_lastActiveBlock->resumeAllocating(m_freeList);
-    m_currentBlock = m_lastActiveBlock;
-    m_lastActiveBlock = nullptr;
-}
-
-void LocalAllocator::prepareForAllocation()
-{
-    reset();
-}
-
-void LocalAllocator::stopAllocatingForGood()
-{
-    stopAllocating();
-    reset();
-}
-
-void* LocalAllocator::allocateSlowCase(GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
-{
-    SuperSamplerScope superSamplerScope(false);
-    Heap& heap = *m_directory->m_heap;
-    ASSERT(heap.vm()->currentThreadIsHoldingAPILock());
-    doTestCollectionsIfNeeded(deferralContext);
-
-    ASSERT(!m_directory->markedSpace().isIterating());
-    heap.didAllocate(m_freeList.originalSize());
-    
-    didConsumeFreeList();
-    
-    AllocatingScope helpingHeap(heap);
-
-    heap.collectIfNecessaryOrDefer(deferralContext);
-    
-    // Goofy corner case: the GC called a callback and now this directory has a currentBlock. This only
-    // happens when running WebKit tests, which inject a callback into the GC's finalization.
-    if (UNLIKELY(m_currentBlock))
-        return allocate(deferralContext, failureMode);
-    
-    void* result = tryAllocateWithoutCollecting();
-    
-    if (LIKELY(result != 0))
-        return result;
-    
-    MarkedBlock::Handle* block = m_directory->tryAllocateBlock();
-    if (!block) {
-        if (failureMode == AllocationFailureMode::Assert)
-            RELEASE_ASSERT_NOT_REACHED();
-        else
-            return nullptr;
-    }
-    m_directory->addBlock(block);
-    result = allocateIn(block);
-    ASSERT(result);
-    return result;
-}
-
-void LocalAllocator::didConsumeFreeList()
-{
-    if (m_currentBlock)
-        m_currentBlock->didConsumeFreeList();
-    
-    m_freeList.clear();
-    m_currentBlock = nullptr;
-}
-
-void* LocalAllocator::tryAllocateWithoutCollecting()
-{
-    // FIXME: If we wanted this to be used for real multi-threaded allocations then we would have to
-    // come up with some concurrency protocol here. That protocol would need to be able to handle:
-    //
-    // - The basic case of multiple LocalAllocators trying to do an allocationCursor search on the
-    //   same bitvector. That probably needs the bitvector lock at least.
-    //
-    // - The harder case of some LocalAllocator triggering a steal from a different BlockDirectory
-    //   via a search in the AlignedMemoryAllocator's list. Who knows what locks that needs.
-    // 
-    // One way to make this work is to have a single per-Heap lock that protects all mutator lock
-    // allocation slow paths. That would probably be scalable enough for years. It would certainly be
-    // for using TLC allocation from JIT threads.
-    // https://bugs.webkit.org/show_bug.cgi?id=181635
-    
-    SuperSamplerScope superSamplerScope(false);
-    
-    ASSERT(!m_currentBlock);
-    ASSERT(m_freeList.allocationWillFail());
-    
-    for (;;) {
-        MarkedBlock::Handle* block = m_directory->findBlockForAllocation();
-        if (!block)
-            break;
-
-        if (void* result = tryAllocateIn(block))
-            return result;
-    }
-    
-    if (Options::stealEmptyBlocksFromOtherAllocators()
-        && (Options::tradeDestructorBlocks() || !m_directory->needsDestruction())) {
-        if (MarkedBlock::Handle* block = m_directory->m_subspace->findEmptyBlockToSteal()) {
-            RELEASE_ASSERT(block->alignedMemoryAllocator() == m_directory->m_subspace->alignedMemoryAllocator());
-            
-            block->sweep(nullptr);
-            
-            // It's good that this clears canAllocateButNotEmpty as well as all other bits,
-            // because there is a remote chance that a block may have both canAllocateButNotEmpty
-            // and empty set at the same time.
-            block->removeFromDirectory();
-            m_directory->addBlock(block);
-            return allocateIn(block);
-        }
-    }
-    
-    return nullptr;
-}
-
-void* LocalAllocator::allocateIn(MarkedBlock::Handle* block)
-{
-    void* result = tryAllocateIn(block);
-    RELEASE_ASSERT(result);
-    return result;
-}
-
-void* LocalAllocator::tryAllocateIn(MarkedBlock::Handle* block)
-{
-    ASSERT(block);
-    ASSERT(!block->isFreeListed());
-    
-    block->sweep(&m_freeList);
-    
-    // It's possible to stumble on a completely full block. Marking tries to retire these, but
-    // that algorithm is racy and may forget to do it sometimes.
-    if (m_freeList.allocationWillFail()) {
-        ASSERT(block->isFreeListed());
-        block->unsweepWithNoNewlyAllocated();
-        ASSERT(!block->isFreeListed());
-        ASSERT(!m_directory->isEmpty(NoLockingNecessary, block));
-        ASSERT(!m_directory->isCanAllocateButNotEmpty(NoLockingNecessary, block));
-        return nullptr;
-    }
-    
-    m_currentBlock = block;
-    
-    void* result = m_freeList.allocate(
-        [] () -> HeapCell* {
-            RELEASE_ASSERT_NOT_REACHED();
-            return nullptr;
-        });
-    m_directory->setIsEden(NoLockingNecessary, m_currentBlock, true);
-    m_directory->markedSpace().didAllocateInBlock(m_currentBlock);
-    return result;
-}
-
-void LocalAllocator::doTestCollectionsIfNeeded(GCDeferralContext* deferralContext)
-{
-    if (!Options::slowPathAllocsBetweenGCs())
-        return;
-
-    static unsigned allocationCount = 0;
-    if (!allocationCount) {
-        if (!m_directory->m_heap->isDeferred()) {
-            if (deferralContext)
-                deferralContext->m_shouldGC = true;
-            else
-                m_directory->m_heap->collectNow(Sync, CollectionScope::Full);
-        }
-    }
-    if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
-        allocationCount = 0;
-}
-
-bool LocalAllocator::isFreeListedCell(const void* target) const
-{
-    // This abomination exists to detect when an object is in the dead-but-not-destructed state.
-    // Therefore, it's not even clear that this needs to do anything beyond returning "false", since
-    // if we know that the block owning the object is free-listed, then it's impossible for any
-    // objects to be in the dead-but-not-destructed state.
-    // FIXME: Get rid of this abomination. https://bugs.webkit.org/show_bug.cgi?id=181655
-    return m_freeList.contains(bitwise_cast<HeapCell*>(target));
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/LocalAllocator.h b/Source/JavaScriptCore/heap/LocalAllocator.h
deleted file mode 100644 (file)
index 37db973..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "FreeList.h"
-#include "MarkedBlock.h"
-#include <wtf/Noncopyable.h>
-
-namespace JSC {
-
-class BlockDirectory;
-class GCDeferralContext;
-
-class LocalAllocator : public BasicRawSentinelNode<LocalAllocator> {
-    WTF_MAKE_NONCOPYABLE(LocalAllocator);
-    
-public:
-    LocalAllocator(BlockDirectory*);
-    LocalAllocator(LocalAllocator&&);
-    ~LocalAllocator();
-    
-    void* allocate(GCDeferralContext*, AllocationFailureMode);
-
-    void stopAllocating();
-    void prepareForAllocation();
-    void resumeAllocating();
-    void stopAllocatingForGood();
-    
-    static ptrdiff_t offsetOfFreeList();
-    static ptrdiff_t offsetOfCellSize();
-    
-    bool isFreeListedCell(const void*) const;
-
-private:
-    void reset();
-    JS_EXPORT_PRIVATE void* allocateSlowCase(GCDeferralContext*, AllocationFailureMode failureMode);
-    void didConsumeFreeList();
-    void* tryAllocateWithoutCollecting();
-    void* tryAllocateIn(MarkedBlock::Handle*);
-    void* allocateIn(MarkedBlock::Handle*);
-    ALWAYS_INLINE void doTestCollectionsIfNeeded(GCDeferralContext*);
-
-    BlockDirectory* m_directory;
-    unsigned m_cellSize;
-    FreeList m_freeList;
-    MarkedBlock::Handle* m_currentBlock { nullptr };
-    MarkedBlock::Handle* m_lastActiveBlock { nullptr };
-};
-
-inline ptrdiff_t LocalAllocator::offsetOfFreeList()
-{
-    return OBJECT_OFFSETOF(LocalAllocator, m_freeList);
-}
-
-inline ptrdiff_t LocalAllocator::offsetOfCellSize()
-{
-    return OBJECT_OFFSETOF(LocalAllocator, m_cellSize);
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/LocalAllocatorInlines.h b/Source/JavaScriptCore/heap/LocalAllocatorInlines.h
deleted file mode 100644 (file)
index c97b69e..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "LocalAllocator.h"
-
-namespace JSC {
-
-ALWAYS_INLINE void* LocalAllocator::allocate(GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
-{
-    return m_freeList.allocate(
-        [&] () -> HeapCell* {
-            sanitizeStackForVM(m_directory->heap()->vm());
-            return static_cast<HeapCell*>(allocateSlowCase(deferralContext, failureMode));
-        });
-}
-
-} // namespace JSC
-
index dc7f579..34a40b5 100644 (file)
@@ -399,15 +399,8 @@ void MarkedBlock::Handle::sweep(FreeList* freeList)
     if (sweepMode == SweepOnly && !needsDestruction)
         return;
 
-    if (m_isFreeListed) {
-        dataLog("FATAL: ", RawPointer(this), "->sweep: block is free-listed.\n");
-        RELEASE_ASSERT_NOT_REACHED();
-    }
-    
-    if (isAllocated()) {
-        dataLog("FATAL: ", RawPointer(this), "->sweep: block is allocated.\n");
-        RELEASE_ASSERT_NOT_REACHED();
-    }
+    RELEASE_ASSERT(!m_isFreeListed);
+    RELEASE_ASSERT(!isAllocated());
     
     if (space()->isMarking())
         block().m_lock.lock();
index 94aafd8..56da3be 100644 (file)
@@ -302,16 +302,6 @@ void MarkedSpace::stopAllocating()
         });
 }
 
-void MarkedSpace::stopAllocatingForGood()
-{
-    ASSERT(!isIterating());
-    forEachDirectory(
-        [&] (BlockDirectory& directory) -> IterationStatus {
-            directory.stopAllocatingForGood();
-            return IterationStatus::Continue;
-        });
-}
-
 void MarkedSpace::prepareForConservativeScan()
 {
     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
index 9c92cd0..2d5be24 100644 (file)
@@ -94,7 +94,7 @@ public:
     
     Heap* heap() const { return m_heap; }
     
-    void lastChanceToFinalize(); // Must call stopAllocatingForGood first.
+    void lastChanceToFinalize(); // You must call stopAllocating before you call this.
     void freeMemory();
 
     static size_t optimalSizeFor(size_t);
@@ -111,7 +111,6 @@ public:
     void didFinishIterating();
 
     void stopAllocating();
-    void stopAllocatingForGood();
     void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
     
     void prepareForMarking();
index 2d8b698..13c48fc 100644 (file)
@@ -37,7 +37,6 @@
 #include "JSObject.h"
 #include "JSString.h"
 #include "JSCInlines.h"
-#include "MarkingConstraintSolver.h"
 #include "SlotVisitorInlines.h"
 #include "StopIfNecessaryTimer.h"
 #include "SuperSampler.h"
index 2a76d7d..a4e9cc6 100644 (file)
@@ -31,7 +31,6 @@
 #include "VisitRaceKey.h"
 #include <wtf/Forward.h>
 #include <wtf/MonotonicTime.h>
-#include <wtf/SharedTask.h>
 #include <wtf/text/CString.h>
 
 namespace JSC {
@@ -42,7 +41,6 @@ class Heap;
 class HeapCell;
 class HeapSnapshotBuilder;
 class MarkedBlock;
-class MarkingConstraint;
 class MarkingConstraintSolver;
 class UnconditionalFinalizer;
 template<typename T> class Weak;
index ba3a14c..17cc4fe 100644 (file)
@@ -27,7 +27,6 @@
 
 #include "AllocationFailureMode.h"
 #include "AllocatorForMode.h"
-#include "Allocator.h"
 #include "MarkedBlock.h"
 #include "MarkedSpace.h"
 #include <wtf/text/CString.h>
@@ -57,8 +56,8 @@ public:
     void finishSweep(MarkedBlock::Handle&, FreeList*);
     void destroy(VM&, JSCell*);
 
-    virtual Allocator allocatorFor(size_t, AllocatorForMode) = 0;
-    virtual void* allocate(VM&, size_t, GCDeferralContext*, AllocationFailureMode) = 0;
+    virtual BlockDirectory* allocatorFor(size_t, AllocatorForMode) = 0;
+    virtual void* allocate(size_t, GCDeferralContext*, AllocationFailureMode) = 0;
     
     void prepareForAllocation();
     
diff --git a/Source/JavaScriptCore/heap/ThreadLocalCache.cpp b/Source/JavaScriptCore/heap/ThreadLocalCache.cpp
deleted file mode 100644 (file)
index d2ebbb7..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "ThreadLocalCache.h"
-
-#include "ThreadLocalCacheInlines.h"
-#include "ThreadLocalCacheLayout.h"
-#include <wtf/StdLibExtras.h>
-
-namespace JSC {
-
-RefPtr<ThreadLocalCache> ThreadLocalCache::create(Heap& heap)
-{
-    return adoptRef(new ThreadLocalCache(heap));
-}
-
-ThreadLocalCache::ThreadLocalCache(Heap& heap)
-    : m_heap(heap)
-{
-    m_data = allocateData();
-}
-
-ThreadLocalCache::~ThreadLocalCache()
-{
-    destroyData(m_data);
-}
-
-ThreadLocalCache::Data* ThreadLocalCache::allocateData()
-{
-    size_t oldSize = m_data ? m_data->size : 0;
-    ThreadLocalCacheLayout::Snapshot layout = m_heap.threadLocalCacheLayout().snapshot();
-    
-    Data* result = static_cast<Data*>(fastMalloc(OBJECT_OFFSETOF(Data, allocator) + layout.size));
-    result->size = layout.size;
-    result->cache = this;
-    for (size_t offset = 0; offset < oldSize; offset += sizeof(LocalAllocator))
-        new (&allocator(*result, offset)) LocalAllocator(WTFMove(allocator(*m_data, offset)));
-    for (size_t offset = oldSize; offset < layout.size; offset += sizeof(LocalAllocator))
-        new (&allocator(*result, offset)) LocalAllocator(layout.directories[offset / sizeof(LocalAllocator)]);
-    return result;
-}
-
-void ThreadLocalCache::destroyData(Data* data)
-{
-    for (size_t offset = 0; offset < data->size; offset += sizeof(LocalAllocator))
-        allocator(*data, offset).~LocalAllocator();
-    fastFree(data);
-}
-
-void ThreadLocalCache::installSlow(VM& vm)
-{
-#if ENABLE(FAST_TLS_JIT)
-    static std::once_flag onceFlag;
-    std::call_once(
-        onceFlag,
-        [] () {
-            pthread_key_init_np(tlsKey, destructor);
-        });
-#endif
-    
-    ref();
-    
-    if (RefPtr<ThreadLocalCache> oldCache = get(vm))
-        oldCache->deref();
-    
-    installData(vm, m_data);
-}
-
-void ThreadLocalCache::installData(VM& vm, Data* data)
-{
-#if ENABLE(FAST_TLS_JIT)
-    UNUSED_PARAM(vm);
-    _pthread_setspecific_direct(tlsKey, data);
-#else
-    vm.threadLocalCacheData = data;
-#endif
-}
-
-LocalAllocator& ThreadLocalCache::allocatorSlow(VM& vm, size_t offset)
-{
-    Data* oldData = m_data;
-    m_data = allocateData();
-    destroyData(oldData);
-    installData(vm, m_data);
-    RELEASE_ASSERT(offset < m_data->size);
-    return allocator(*m_data, offset);
-}
-
-void ThreadLocalCache::destructor(void* arg)
-{
-    if (!arg)
-        return;
-    Data* data = static_cast<Data*>(arg);
-    data->cache->deref();
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/ThreadLocalCache.h b/Source/JavaScriptCore/heap/ThreadLocalCache.h
deleted file mode 100644 (file)
index f79e942..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include <wtf/FastMalloc.h>
-#include <wtf/FastTLS.h>
-#include <wtf/ThreadSafeRefCounted.h>
-
-namespace JSC {
-
-class Heap;
-class VM;
-
-class ThreadLocalCache : public ThreadSafeRefCounted<ThreadLocalCache> {
-    WTF_MAKE_NONCOPYABLE(ThreadLocalCache);
-    WTF_MAKE_FAST_ALLOCATED;
-    
-public:
-    static RefPtr<ThreadLocalCache> create(Heap&);
-    
-    JS_EXPORT_PRIVATE ~ThreadLocalCache();
-
-    static RefPtr<ThreadLocalCache> get(VM&);
-    
-    // This is designed to be fast enough that you could even call it before every allocation, by
-    // optimizing for the case that you're just installing the cache that is already installed. This
-    // assumes a relatively small number of caches or low chance of actual context switch combined
-    // with possibly high rate of "I may have context switched" sites that call this out of paranoia.
-    void install(VM&);
-    
-    static LocalAllocator& allocator(VM&, size_t offset);
-    
-    template<typename SuccessFunc, typename FailureFunc>
-    static void tryGetAllocator(VM&, size_t offset, const SuccessFunc&, const FailureFunc&);
-    
-    static ptrdiff_t offsetOfSizeInData() { return OBJECT_OFFSETOF(Data, size); }
-    static ptrdiff_t offsetOfFirstAllocatorInData() { return OBJECT_OFFSETOF(Data, allocator); }
-    
-private:
-    friend class VM;
-    
-    ThreadLocalCache(Heap&);
-    
-    struct Data {
-        size_t size;
-        ThreadLocalCache* cache;
-        LocalAllocator allocator[1];
-    };
-
-    static Data* getImpl(VM&);
-    
-    Data* allocateData();
-    void destroyData(Data*);
-    static LocalAllocator& allocator(Data& data, size_t offset);
-
-    void installSlow(VM&);
-    static void installData(VM&, Data*);
-    
-    LocalAllocator& allocatorSlow(VM&, size_t offset);
-    
-    static void destructor(void*);
-    
-    // When installed, we ref() the cache. Uninstalling deref()s the cache. TLS destruction deref()s
-    // the cache. When the cache is destructed, it needs to return all of its state to the GC. I
-    // think that just means stopAllocating(), but with some seriously heavy caveats having to do
-    // with when it's valid to make that call. Alternatively, we could RELEASE_ASSERT that the cache
-    // is empty when we destruct it. This is correct for caches that are kept live by GC objects and
-    // it's correct for immortal caches.
-    Heap& m_heap;
-    Data* m_data { nullptr };
-
-#if ENABLE(FAST_TLS_JIT)
-    static const pthread_key_t tlsKey = WTF_GC_TLC_KEY;
-#endif
-};
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/ThreadLocalCacheInlines.h b/Source/JavaScriptCore/heap/ThreadLocalCacheInlines.h
deleted file mode 100644 (file)
index 9fa5244..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "ThreadLocalCache.h"
-
-namespace JSC {
-
-inline ThreadLocalCache::Data* ThreadLocalCache::getImpl(VM& vm)
-{
-#if ENABLE(FAST_TLS_JIT)
-    UNUSED_PARAM(vm);
-    return static_cast<Data*>(_pthread_getspecific_direct(tlsKey));
-#else
-    return vm.threadLocalCacheData;
-#endif
-}
-
-inline RefPtr<ThreadLocalCache> ThreadLocalCache::get(VM& vm)
-{
-    ThreadLocalCache::Data* data = getImpl(vm);
-    if (LIKELY(data))
-        return data->cache;
-    return nullptr;
-}
-
-inline void ThreadLocalCache::install(VM& vm)
-{
-    if (getImpl(vm) == m_data)
-        return;
-    installSlow(vm);
-}
-
-inline LocalAllocator& ThreadLocalCache::allocator(VM& vm, size_t offset)
-{
-    ThreadLocalCache::Data* data = getImpl(vm);
-    if (LIKELY(offset < data->size))
-        return allocator(*data, offset);
-    return data->cache->allocatorSlow(vm, offset);
-}
-
-template<typename SuccessFunc, typename FailureFunc>
-void ThreadLocalCache::tryGetAllocator(VM& vm, size_t offset, const SuccessFunc& successFunc, const FailureFunc& failureFunc)
-{
-    ThreadLocalCache::Data* data = getImpl(vm);
-    if (LIKELY(offset < data->size))
-        successFunc(allocator(*data, offset));
-    else
-        failureFunc();
-}
-
-inline LocalAllocator& ThreadLocalCache::allocator(Data& data, size_t offset)
-{
-    return *bitwise_cast<LocalAllocator*>(bitwise_cast<char*>(&data.allocator[0]) + offset);
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/ThreadLocalCacheLayout.cpp b/Source/JavaScriptCore/heap/ThreadLocalCacheLayout.cpp
deleted file mode 100644 (file)
index c1ac87f..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "ThreadLocalCacheLayout.h"
-
-#include "BlockDirectory.h"
-
-namespace JSC {
-
-ThreadLocalCacheLayout::ThreadLocalCacheLayout()
-{
-}
-
-ThreadLocalCacheLayout::~ThreadLocalCacheLayout()
-{
-}
-
-void ThreadLocalCacheLayout::allocateOffset(BlockDirectory* directory)
-{
-    auto locker = holdLock(m_lock);
-    directory->m_tlcOffset = m_size;
-    m_size += sizeof(LocalAllocator);
-    m_directories.append(directory);
-}
-
-ThreadLocalCacheLayout::Snapshot ThreadLocalCacheLayout::snapshot()
-{
-    auto locker = holdLock(m_lock);
-    Snapshot result;
-    result.size = m_size;
-    result.directories = m_directories;
-    return result;
-}
-
-BlockDirectory* ThreadLocalCacheLayout::directory(unsigned offset)
-{
-    auto locker = holdLock(m_lock);
-    return m_directories[offset / sizeof(LocalAllocator)];
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/heap/ThreadLocalCacheLayout.h b/Source/JavaScriptCore/heap/ThreadLocalCacheLayout.h
deleted file mode 100644 (file)
index e5ea1b6..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include <wtf/FastMalloc.h>
-#include <wtf/Lock.h>
-#include <wtf/Noncopyable.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class BlockDirectory;
-
-// Each Heap has a ThreadLocalCacheLayout that helps us figure out how to allocate a ThreadLocalCache
-// for that Heap.
-
-class ThreadLocalCacheLayout {
-    WTF_MAKE_NONCOPYABLE(ThreadLocalCacheLayout);
-    WTF_MAKE_FAST_ALLOCATED;
-    
-public:
-    ThreadLocalCacheLayout();
-    ~ThreadLocalCacheLayout();
-
-    // BlockDirectory calls this during creation and this fills in BlockDirectory::offset.
-    void allocateOffset(BlockDirectory*);
-    
-    struct Snapshot {
-        size_t size;
-        Vector<BlockDirectory*> directories;
-    };
-    
-    Snapshot snapshot();
-    
-    BlockDirectory* directory(unsigned offset);
-    
-private:
-    Lock m_lock;
-    size_t m_size { 0 };
-    Vector<BlockDirectory*> m_directories;
-};
-
-} // namespace JSC
-
index 476d31b..b7715df 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -582,103 +582,6 @@ void AssemblyHelpers::emitRandomThunk(VM& vm, GPRReg scratch0, GPRReg scratch1,
 }
 #endif
 
-void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
-{
-    // NOTE: This is carefully written so that we can call it while we disallow scratch
-    // register usage.
-        
-    if (Options::forceGCSlowPaths()) {
-        slowPath.append(jump());
-        return;
-    }
-    
-    Jump popPath;
-    Jump done;
-    
-#if ENABLE(FAST_TLS_JIT)
-    loadFromTLSPtr(fastTLSOffsetForKey(WTF_GC_TLC_KEY), scratchGPR);
-#else
-    loadPtr(&vm().threadLocalCacheData, scratchGPR);
-#endif
-    if (allocator.isConstant()) {
-        slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), TrustedImm32(allocator.allocator().offset())));
-        addPtr(TrustedImm32(ThreadLocalCache::offsetOfFirstAllocatorInData() + allocator.allocator().offset()), scratchGPR, allocatorGPR);
-    } else {
-        slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), allocatorGPR));
-        addPtr(TrustedImm32(ThreadLocalCache::offsetOfFirstAllocatorInData()), allocatorGPR);
-        addPtr(scratchGPR, allocatorGPR);
-    }
-
-    load32(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
-    popPath = branchTest32(Zero, resultGPR);
-    if (allocator.isConstant())
-        add32(TrustedImm32(-allocator.allocator().cellSize(vm().heap)), resultGPR, scratchGPR);
-    else {
-        if (isX86()) {
-            move(resultGPR, scratchGPR);
-            sub32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
-        } else {
-            load32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
-            sub32(resultGPR, scratchGPR, scratchGPR);
-        }
-    }
-    negPtr(resultGPR);
-    store32(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
-    Address payloadEndAddr = Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
-    if (isX86())
-        addPtr(payloadEndAddr, resultGPR);
-    else {
-        loadPtr(payloadEndAddr, scratchGPR);
-        addPtr(scratchGPR, resultGPR);
-    }
-        
-    done = jump();
-        
-    popPath.link(this);
-        
-    loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
-    if (isX86())
-        xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
-    else {
-        loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), scratchGPR);
-        xorPtr(scratchGPR, resultGPR);
-    }
-    slowPath.append(branchTestPtr(Zero, resultGPR));
-        
-    // The object is half-allocated: we have what we know is a fresh object, but
-    // it's still on the GC's free list.
-    loadPtr(Address(resultGPR), scratchGPR);
-    storePtr(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
-        
-    done.link(this);
-}
-
-void AssemblyHelpers::emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
-{
-    if (allocator.isConstant()) {
-        if (!allocator.allocator()) {
-            slowPath.append(jump());
-            return;
-        }
-    }
-    emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
-}
-
-void AssemblyHelpers::emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
-{
-    static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
-    
-    unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
-    
-    add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
-    urshift32(TrustedImm32(stepShift), scratchGPR1);
-    slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
-    move(TrustedImmPtr(subspace.allocatorForSizeStep() - 1), scratchGPR2);
-    load32(BaseIndex(scratchGPR2, scratchGPR1, TimesFour), scratchGPR1);
-    
-    emitAllocate(resultGPR, JITAllocator::variable(), scratchGPR1, scratchGPR2, slowPath);
-}
-
 void AssemblyHelpers::restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame)
 {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
index 403a114..ee5732a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -32,7 +32,6 @@
 #include "GPRInfo.h"
 #include "Heap.h"
 #include "InlineCallFrame.h"
-#include "JITAllocator.h"
 #include "JITCode.h"
 #include "MacroAssembler.h"
 #include "MarkedSpace.h"
@@ -60,7 +59,6 @@ public:
     }
 
     CodeBlock* codeBlock() { return m_codeBlock; }
-    VM& vm() { return *m_codeBlock->vm(); }
     AssemblerType_T& assembler() { return m_assembler; }
 
     void checkStackPointerAlignment()
@@ -1493,21 +1491,80 @@ public:
 
     // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
     // that allocator is non-null; allocator can be null as a signal that we don't know what the
-    // value of allocatorGPR is. Additionally, if the allocator is not null, then there is no need
-    // to populate allocatorGPR - this code will ignore the contents of allocatorGPR.
-    void emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
+    // value of allocatorGPR is.
+    void emitAllocateWithNonNullAllocator(GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        // NOTE: This is carefully written so that we can call it while we disallow scratch
+        // register usage.
+        
+        if (Options::forceGCSlowPaths()) {
+            slowPath.append(jump());
+            return;
+        }
+        
+        Jump popPath;
+        Jump done;
+        
+        load32(Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
+        popPath = branchTest32(Zero, resultGPR);
+        if (allocator)
+            add32(TrustedImm32(-allocator->cellSize()), resultGPR, scratchGPR);
+        else {
+            if (isX86()) {
+                move(resultGPR, scratchGPR);
+                sub32(Address(allocatorGPR, BlockDirectory::offsetOfCellSize()), scratchGPR);
+            } else {
+                load32(Address(allocatorGPR, BlockDirectory::offsetOfCellSize()), scratchGPR);
+                sub32(resultGPR, scratchGPR, scratchGPR);
+            }
+        }
+        negPtr(resultGPR);
+        store32(scratchGPR, Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfRemaining()));
+        Address payloadEndAddr = Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
+        if (isX86())
+            addPtr(payloadEndAddr, resultGPR);
+        else {
+            loadPtr(payloadEndAddr, scratchGPR);
+            addPtr(scratchGPR, resultGPR);
+        }
+        
+        done = jump();
+        
+        popPath.link(this);
+        
+        loadPtr(Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
+        if (isX86())
+            xorPtr(Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
+        else {
+            loadPtr(Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfSecret()), scratchGPR);
+            xorPtr(scratchGPR, resultGPR);
+        }
+        slowPath.append(branchTestPtr(Zero, resultGPR));
+        
+        // The object is half-allocated: we have what we know is a fresh object, but
+        // it's still on the GC's free list.
+        loadPtr(Address(resultGPR), scratchGPR);
+        storePtr(scratchGPR, Address(allocatorGPR, BlockDirectory::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
+        
+        done.link(this);
+    }
     
-    void emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
+    void emitAllocate(GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        if (!allocator)
+            slowPath.append(branchTestPtr(Zero, allocatorGPR));
+        emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
+    }
     
     template<typename StructureType>
-    void emitAllocateJSCell(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath)
+    void emitAllocateJSCell(GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath)
     {
         emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
         emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
     }
     
     template<typename StructureType, typename StorageType, typename MaskType>
-    void emitAllocateJSObject(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, MaskType mask, GPRReg scratchGPR, JumpList& slowPath)
+    void emitAllocateJSObject(GPRReg resultGPR, BlockDirectory* allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, MaskType mask, GPRReg scratchGPR, JumpList& slowPath)
     {
         emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
         storePtr(storage, Address(resultGPR, JSObject::butterflyOffset()));
@@ -1519,8 +1576,13 @@ public:
         VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, MaskType mask, GPRReg scratchGPR1,
         GPRReg scratchGPR2, JumpList& slowPath, size_t size)
     {
-        Allocator allocator = subspaceFor<ClassType>(vm)->allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
-        emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR1, structure, storage, mask, scratchGPR2, slowPath);
+        BlockDirectory* allocator = subspaceFor<ClassType>(vm)->allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
+        if (!allocator) {
+            slowPath.append(jump());
+            return;
+        }
+        move(TrustedImmPtr(allocator), scratchGPR1);
+        emitAllocateJSObject(resultGPR, allocator, scratchGPR1, structure, storage, mask, scratchGPR2, slowPath);
     }
     
     template<typename ClassType, typename StructureType, typename StorageType, typename MaskType>
@@ -1531,7 +1593,20 @@ public:
     
     // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
     // won't be clobbered.
-    void emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath);
+    void emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
+        
+        unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
+        
+        add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
+        urshift32(TrustedImm32(stepShift), scratchGPR1);
+        slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
+        move(TrustedImmPtr(subspace.allocatorForSizeStep() - 1), scratchGPR2);
+        loadPtr(BaseIndex(scratchGPR2, scratchGPR1, timesPtr()), scratchGPR1);
+        
+        emitAllocate(resultGPR, nullptr, scratchGPR1, scratchGPR2, slowPath);
+    }
     
     template<typename ClassType, typename StructureType>
     void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
diff --git a/Source/JavaScriptCore/jit/JITAllocator.h b/Source/JavaScriptCore/jit/JITAllocator.h
deleted file mode 100644 (file)
index 051c76e..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2018 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "Allocator.h"
-
-namespace JSC {
-
-class JITAllocator {
-public:
-    enum Kind {
-        Constant,
-        Variable
-    };
-    
-    JITAllocator() { }
-    
-    static JITAllocator constant(Allocator allocator)
-    {
-        JITAllocator result;
-        result.m_kind = Constant;
-        result.m_allocator = allocator;
-        return result;
-    }
-    
-    static JITAllocator variable()
-    {
-        JITAllocator result;
-        result.m_kind = Variable;
-        return result;
-    }
-    
-    bool operator==(const JITAllocator& other) const
-    {
-        return m_kind == other.m_kind
-            && m_allocator == other.m_allocator;
-    }
-    
-    bool operator!=(const JITAllocator& other) const
-    {
-        return !(*this == other);
-    }
-    
-    explicit operator bool() const
-    {
-        return *this != JITAllocator();
-    }
-    
-    Kind kind() const { return m_kind; }
-    bool isConstant() const { return m_kind == Constant; }
-    bool isVariable() const { return m_kind == Variable; }
-    
-    Allocator allocator() const
-    {
-        RELEASE_ASSERT(isConstant());
-        return m_allocator;
-    }
-    
-private:
-    Kind m_kind { Constant };
-    Allocator m_allocator;
-};
-
-} // namespace JSC
-
index 2a3de0d..a2b6f6d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
  * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -81,23 +81,22 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
 {
     Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-    Allocator allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+    BlockDirectory* allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
 
     RegisterID resultReg = regT0;
     RegisterID allocatorReg = regT1;
     RegisterID scratchReg = regT2;
 
-    if (!allocator)
-        addSlowCase(jump());
-    else {
-        JumpList slowCases;
-        auto butterfly = TrustedImmPtr(nullptr);
-        auto mask = TrustedImm32(0);
-        emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, mask, scratchReg, slowCases);
-        emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
-        addSlowCase(slowCases);
-        emitPutVirtualRegister(currentInstruction[1].u.operand);
-    }
+    move(TrustedImmPtr(allocator), allocatorReg);
+    if (allocator)
+        addSlowCase(Jump());
+    JumpList slowCases;
+    auto butterfly = TrustedImmPtr(nullptr);
+    auto mask = TrustedImm32(0);
+    emitAllocateJSObject(resultReg, allocator, allocatorReg, TrustedImmPtr(structure), butterfly, mask, scratchReg, slowCases);
+    emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
+    addSlowCase(slowCases);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
 }
 
 void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -767,9 +766,9 @@ void JIT::emit_op_create_this(Instruction* currentInstruction)
     addSlowCase(branch8(NotEqual, Address(calleeReg, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType)));
     loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
     addSlowCase(branchTestPtr(Zero, rareDataReg));
-    load32(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+    loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
-    addSlowCase(branch32(Equal, allocatorReg, TrustedImm32(Allocator().offset())));
+    addSlowCase(branchTestPtr(Zero, allocatorReg));
 
     loadPtr(cachedFunction, cachedFunctionReg);
     Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
@@ -779,7 +778,7 @@ void JIT::emit_op_create_this(Instruction* currentInstruction)
     JumpList slowCases;
     auto butterfly = TrustedImmPtr(nullptr);
     auto mask = TrustedImm32(0);
-    emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, mask, scratchReg, slowCases);
+    emitAllocateJSObject(resultReg, nullptr, allocatorReg, structureReg, butterfly, mask, scratchReg, slowCases);
     emitGetVirtualRegister(callee, scratchReg);
     loadPtr(Address(scratchReg, JSFunction::offsetOfRareData()), scratchReg);
     load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg);
index 0b05204..1438e10 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
  * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -79,23 +79,22 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
 {
     Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
-    Allocator allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
+    BlockDirectory* allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
 
     RegisterID resultReg = returnValueGPR;
     RegisterID allocatorReg = regT1;
     RegisterID scratchReg = regT3;
 
-    if (!allocator)
-        addSlowCase(jump());
-    else {
-        JumpList slowCases;
-        auto butterfly = TrustedImmPtr(nullptr);
-        auto mask = TrustedImm32(0);
-        emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, mask, scratchReg, slowCases);
-        emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
-        addSlowCase(slowCases);
-        emitStoreCell(currentInstruction[1].u.operand, resultReg);
-    }
+    move(TrustedImmPtr(allocator), allocatorReg);
+    if (allocator)
+        addSlowCase(Jump());
+    JumpList slowCases;
+    auto butterfly = TrustedImmPtr(nullptr);
+    auto mask = TrustedImm32(0);
+    emitAllocateJSObject(resultReg, allocator, allocatorReg, TrustedImmPtr(structure), butterfly, mask, scratchReg, slowCases);
+    emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
+    addSlowCase(slowCases);
+    emitStoreCell(currentInstruction[1].u.operand, resultReg);
 }
 
 void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -857,9 +856,9 @@ void JIT::emit_op_create_this(Instruction* currentInstruction)
     addSlowCase(branch8(NotEqual, Address(calleeReg, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType)));
     loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
     addSlowCase(branchTestPtr(Zero, rareDataReg));
-    load32(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+    loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
-    addSlowCase(branch32(Equal, allocatorReg, TrustedImm32(Allocator().offset())));
+    addSlowCase(branchTestPtr(Zero, allocatorReg));
 
     loadPtr(cachedFunction, cachedFunctionReg);
     Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
@@ -869,7 +868,7 @@ void JIT::emit_op_create_this(Instruction* currentInstruction)
     JumpList slowCases;
     auto butterfly = TrustedImmPtr(nullptr);
     auto mask = TrustedImm32(0);
-    emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, mask, scratchReg, slowCases);
+    emitAllocateJSObject(resultReg, nullptr, allocatorReg, structureReg, butterfly, mask, scratchReg, slowCases);
     addSlowCase(slowCases);
     emitStoreCell(currentInstruction[1].u.operand, resultReg);
 }
index 6265f58..a6728a1 100644 (file)
@@ -79,7 +79,7 @@ ALWAYS_INLINE unsigned Butterfly::optimalContiguousVectorLength(Structure* struc
 inline Butterfly* Butterfly::createUninitialized(VM& vm, JSCell*, size_t preCapacity, size_t propertyCapacity, bool hasIndexingHeader, size_t indexingPayloadSizeInBytes)
 {
     size_t size = totalSize(preCapacity, propertyCapacity, hasIndexingHeader, indexingPayloadSizeInBytes);
-    void* base = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, size, nullptr, AllocationFailureMode::Assert);
+    void* base = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(size, nullptr, AllocationFailureMode::Assert);
     Butterfly* result = fromBase(base, preCapacity, propertyCapacity);
     return result;
 }
@@ -87,7 +87,7 @@ inline Butterfly* Butterfly::createUninitialized(VM& vm, JSCell*, size_t preCapa
 inline Butterfly* Butterfly::tryCreate(VM& vm, JSCell*, size_t preCapacity, size_t propertyCapacity, bool hasIndexingHeader, const IndexingHeader& indexingHeader, size_t indexingPayloadSizeInBytes)
 {
     size_t size = totalSize(preCapacity, propertyCapacity, hasIndexingHeader, indexingPayloadSizeInBytes);
-    void* base = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, size, nullptr, AllocationFailureMode::ReturnNull);
+    void* base = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(size, nullptr, AllocationFailureMode::ReturnNull);
     if (!base)
         return nullptr;
     Butterfly* result = fromBase(base, preCapacity, propertyCapacity);
@@ -165,7 +165,7 @@ inline Butterfly* Butterfly::growArrayRight(
     void* theBase = base(0, propertyCapacity);
     size_t oldSize = totalSize(0, propertyCapacity, hadIndexingHeader, oldIndexingPayloadSizeInBytes);
     size_t newSize = totalSize(0, propertyCapacity, true, newIndexingPayloadSizeInBytes);
-    void* newBase = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, newSize, nullptr, AllocationFailureMode::ReturnNull);
+    void* newBase = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(newSize, nullptr, AllocationFailureMode::ReturnNull);
     if (!newBase)
         return nullptr;
     // FIXME: This probably shouldn't be a memcpy.
index 68bf53e..aa84b59 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -118,7 +118,7 @@ void DirectArguments::overrideThings(VM& vm)
     putDirect(vm, vm.propertyNames->callee, m_callee.get(), static_cast<unsigned>(PropertyAttribute::DontEnum));
     putDirect(vm, vm.propertyNames->iteratorSymbol, globalObject()->arrayProtoValuesFunction(), static_cast<unsigned>(PropertyAttribute::DontEnum));
     
-    void* backingStore = vm.gigacageAuxiliarySpace(m_mappedArguments.kind).allocateNonVirtual(vm, mappedArgumentsSize(), nullptr, AllocationFailureMode::Assert);
+    void* backingStore = vm.gigacageAuxiliarySpace(m_mappedArguments.kind).allocateNonVirtual(mappedArgumentsSize(), nullptr, AllocationFailureMode::Assert);
     bool* overrides = static_cast<bool*>(backingStore);
     m_mappedArguments.set(vm, this, overrides);
     for (unsigned i = m_length; i--;)
index 59882ea..d90e59f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -263,7 +263,7 @@ void GenericArguments<Type>::initModifiedArgumentsDescriptor(VM& vm, unsigned ar
     RELEASE_ASSERT(!m_modifiedArgumentsDescriptor);
 
     if (argsLength) {
-        void* backingStore = vm.gigacageAuxiliarySpace(m_modifiedArgumentsDescriptor.kind).allocateNonVirtual(vm, WTF::roundUpToMultipleOf<8>(argsLength), nullptr, AllocationFailureMode::Assert);
+        void* backingStore = vm.gigacageAuxiliarySpace(m_modifiedArgumentsDescriptor.kind).allocateNonVirtual(WTF::roundUpToMultipleOf<8>(argsLength), nullptr, AllocationFailureMode::Assert);
         bool* modifiedArguments = static_cast<bool*>(backingStore);
         m_modifiedArgumentsDescriptor.set(vm, this, modifiedArguments);
         for (unsigned i = argsLength; i--;)
index 7d949c2..94ee2e3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -207,7 +207,7 @@ public:
     {
         auto scope = DECLARE_THROW_SCOPE(vm);
         size_t allocationSize = HashMapBuffer::allocationSize(capacity);
-        void* data = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, allocationSize, nullptr, AllocationFailureMode::ReturnNull);
+        void* data = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(allocationSize, nullptr, AllocationFailureMode::ReturnNull);
         if (!data) {
             throwOutOfMemoryError(exec, scope);
             return nullptr;
index 884b0a2..9932b5a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
- *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
+ *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
  *  Copyright (C) 2003 Peter Kelly (pmk@post.com)
  *  Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com)
  *
@@ -81,10 +81,7 @@ JSArray* JSArray::tryCreateUninitializedRestricted(ObjectInitializationScope& sc
             || hasContiguous(indexingType));
 
         unsigned vectorLength = Butterfly::optimalContiguousVectorLength(structure, initialLength);
-        void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(
-            vm,
-            Butterfly::totalSize(0, outOfLineStorage, true, vectorLength * sizeof(EncodedJSValue)),
-            deferralContext, AllocationFailureMode::ReturnNull);
+        void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(Butterfly::totalSize(0, outOfLineStorage, true, vectorLength * sizeof(EncodedJSValue)), deferralContext, AllocationFailureMode::ReturnNull);
         if (UNLIKELY(!temp))
             return nullptr;
         butterfly = Butterfly::fromBase(temp, 0, outOfLineStorage);
@@ -101,10 +98,7 @@ JSArray* JSArray::tryCreateUninitializedRestricted(ObjectInitializationScope& sc
     } else {
         static const unsigned indexBias = 0;
         unsigned vectorLength = ArrayStorage::optimalVectorLength(indexBias, structure, initialLength);
-        void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(
-            vm,
-            Butterfly::totalSize(indexBias, outOfLineStorage, true, ArrayStorage::sizeFor(vectorLength)),
-            deferralContext, AllocationFailureMode::ReturnNull);
+        void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(Butterfly::totalSize(indexBias, outOfLineStorage, true, ArrayStorage::sizeFor(vectorLength)), deferralContext, AllocationFailureMode::ReturnNull);
         if (UNLIKELY(!temp))
             return nullptr;
         butterfly = Butterfly::fromBase(temp, indexBias, outOfLineStorage);
@@ -374,8 +368,7 @@ bool JSArray::unshiftCountSlowCase(const AbstractLocker&, VM& vm, DeferGC&, bool
         allocatedNewStorage = false;
     } else {
         size_t newSize = Butterfly::totalSize(0, propertyCapacity, true, ArrayStorage::sizeFor(desiredCapacity));
-        newAllocBase = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(
-            vm, newSize, nullptr, AllocationFailureMode::ReturnNull);
+        newAllocBase = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(newSize, nullptr, AllocationFailureMode::ReturnNull);
         if (!newAllocBase)
             return false;
         newStorageCapacity = desiredCapacity;
index 3244d77..b3c9343 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
- *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
+ *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
  *
  *  This library is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU Lesser General Public
@@ -238,7 +238,6 @@ inline JSArray* JSArray::tryCreate(VM& vm, Structure* structure, unsigned initia
 
         unsigned vectorLength = Butterfly::optimalContiguousVectorLength(structure, vectorLengthHint);
         void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(
-            vm,
             Butterfly::totalSize(0, outOfLineStorage, true, vectorLength * sizeof(EncodedJSValue)),
             nullptr, AllocationFailureMode::ReturnNull);
         if (!temp)
index 8e0075d..97f688e 100644 (file)
@@ -66,7 +66,7 @@ JSArrayBufferView::ConstructionContext::ConstructionContext(
         void* temp;
         size_t size = sizeOf(length, elementSize);
         if (size) {
-            temp = vm.primitiveGigacageAuxiliarySpace.allocateNonVirtual(vm, size, nullptr, AllocationFailureMode::ReturnNull);
+            temp = vm.primitiveGigacageAuxiliarySpace.allocateNonVirtual(size, nullptr, AllocationFailureMode::ReturnNull);
             if (!temp)
                 return;
         } else
index 00b3940..8934c84 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -145,15 +145,14 @@ CompleteSubspace* JSCell::subspaceFor(VM& vm)
 template<typename T>
 ALWAYS_INLINE void* tryAllocateCellHelper(Heap& heap, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
 {
-    VM& vm = *heap.vm();
     ASSERT(deferralContext || !DisallowGC::isInEffectOnCurrentThread());
     ASSERT(size >= sizeof(T));
-    JSCell* result = static_cast<JSCell*>(subspaceFor<T>(vm)->allocateNonVirtual(vm, size, deferralContext, failureMode));
+    JSCell* result = static_cast<JSCell*>(subspaceFor<T>(*heap.vm())->allocateNonVirtual(size, deferralContext, failureMode));
     if (failureMode == AllocationFailureMode::ReturnNull && !result)
         return nullptr;
 #if ENABLE(GC_VALIDATION)
-    ASSERT(!vm.isInitializingObject());
-    vm.setInitializingObjectClass(T::info());
+    ASSERT(!heap.vm()->isInitializingObject());
+    heap.vm()->setInitializingObjectClass(T::info());
 #endif
     result->clearStructure();
     return result;
index e97a9b6..374e464 100644 (file)
@@ -335,7 +335,7 @@ static EncodedJSValue JSC_HOST_CALL enqueueJob(ExecState* exec)
     return JSValue::encode(jsUndefined());
 }
 
-JSGlobalObject::JSGlobalObject(VM& vm, Structure* structure, const GlobalObjectMethodTable* globalObjectMethodTable, RefPtr<ThreadLocalCache> threadLocalCache)
+JSGlobalObject::JSGlobalObject(VM& vm, Structure* structure, const GlobalObjectMethodTable* globalObjectMethodTable)
     : Base(vm, structure, 0)
     , m_vm(vm)
     , m_masqueradesAsUndefinedWatchpoint(adoptRef(new WatchpointSet(IsWatched)))
@@ -353,7 +353,6 @@ JSGlobalObject::JSGlobalObject(VM& vm, Structure* structure, const GlobalObjectM
     , m_templateRegistry(vm)
     , m_runtimeFlags()
     , m_globalObjectMethodTable(globalObjectMethodTable ? globalObjectMethodTable : &s_globalObjectMethodTable)
-    , m_threadLocalCache(threadLocalCache ? WTFMove(threadLocalCache) : vm.defaultThreadLocalCache)
 {
 }
 
index 6d1aac8..8ddce14 100644 (file)
@@ -491,7 +491,7 @@ public:
     const RuntimeFlags& runtimeFlags() const { return m_runtimeFlags; }
 
 protected:
-    JS_EXPORT_PRIVATE explicit JSGlobalObject(VM&, Structure*, const GlobalObjectMethodTable* = 0, RefPtr<ThreadLocalCache> = nullptr);
+    JS_EXPORT_PRIVATE explicit JSGlobalObject(VM&, Structure*, const GlobalObjectMethodTable* = 0);
 
     JS_EXPORT_PRIVATE void finishCreation(VM&);
 
@@ -893,8 +893,6 @@ public:
     JSWrapperMap* wrapperMap() const { return m_wrapperMap.get(); }
     void setWrapperMap(JSWrapperMap* map) { m_wrapperMap = map; }
 #endif
-    
-    ThreadLocalCache& threadLocalCache() const { return *m_threadLocalCache.get(); }
 
 protected:
     struct GlobalPropertyInfo {
@@ -926,8 +924,6 @@ private:
 #if JSC_OBJC_API_ENABLED
     RetainPtr<JSWrapperMap> m_wrapperMap;
 #endif
-    
-    RefPtr<ThreadLocalCache> m_threadLocalCache;
 };
 
 JSGlobalObject* asGlobalObject(JSValue);
index b6bb130..e63fdbc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2005-2017 Apple Inc. All rights reserved.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Library General Public
@@ -28,7 +28,6 @@
 #include "JSCInlines.h"
 #include "MachineStackMarker.h"
 #include "SamplingProfiler.h"
-#include "ThreadLocalCacheInlines.h"
 #include "WasmMachineThreads.h"
 #include <thread>
 #include <wtf/Threading.h>
@@ -148,9 +147,7 @@ void JSLock::didAcquireLock()
 
     m_vm->setLastStackTop(thread.savedLastStackTop());
     ASSERT(thread.stack().contains(m_vm->lastStackTop()));
-    
-    m_vm->defaultThreadLocalCache->install(*m_vm);
-    
+
     m_vm->heap.machineThreads().addCurrentThread();
 #if ENABLE(WEBASSEMBLY)
     Wasm::startTrackingCurrentThread();
index f0c8b96..4be5f5d 100644 (file)
@@ -236,7 +236,6 @@ constexpr bool enableAsyncIteration = false;
     v(bool, dumpSizeClasses, false, Normal, nullptr) \
     v(bool, useBumpAllocator, true, Normal, nullptr) \
     v(bool, stealEmptyBlocksFromOtherAllocators, true, Normal, nullptr) \
-    v(bool, tradeDestructorBlocks, true, Normal, nullptr) \
     v(bool, eagerlyUpdateTopCallFrame, false, Normal, nullptr) \
     \
     v(bool, useOSREntryToDFG, true, Normal, nullptr) \
index e387cc7..90f8c44 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (C) 2008-2018 Apple Inc. All Rights Reserved.
+ *  Copyright (C) 2008-2017 Apple Inc. All Rights Reserved.
  *
  *  This library is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU Lesser General Public
@@ -42,7 +42,7 @@ ALWAYS_INLINE JSArray* tryCreateUninitializedRegExpMatchesArray(ObjectInitializa
 
     JSGlobalObject* globalObject = structure->globalObject();
     bool createUninitialized = globalObject->isOriginalArrayStructure(structure);
-    void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, Butterfly::totalSize(0, structure->outOfLineCapacity(), true, vectorLength * sizeof(EncodedJSValue)), deferralContext, AllocationFailureMode::ReturnNull);
+    void* temp = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(Butterfly::totalSize(0, structure->outOfLineCapacity(), true, vectorLength * sizeof(EncodedJSValue)), deferralContext, AllocationFailureMode::ReturnNull);
     if (UNLIKELY(!temp))
         return nullptr;
     Butterfly* butterfly = Butterfly::fromBase(temp, 0, structure->outOfLineCapacity());
index 18afa2f..d131dc6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #include "StrongInlines.h"
 #include "StructureInlines.h"
 #include "TestRunnerUtils.h"
-#include "ThreadLocalCacheInlines.h"
 #include "ThunkGenerators.h"
 #include "TypeProfiler.h"
 #include "TypeProfilerLog.h"
@@ -307,9 +306,6 @@ VM::VM(VMType vmType, HeapType heapType)
     updateSoftReservedZoneSize(Options::softReservedZoneSize());
     setLastStackTop(stack.origin());
 
-    defaultThreadLocalCache = ThreadLocalCache::create(heap);
-    defaultThreadLocalCache->install(*this);
-
     // Need to be careful to keep everything consistent here
     JSLockHolder lock(this);
     AtomicStringTable* existingEntryAtomicStringTable = Thread::current().setCurrentAtomicStringTable(m_atomicStringTable);
@@ -498,10 +494,6 @@ VM::~VM()
     ASSERT(currentThreadIsHoldingAPILock());
     m_apiLock->willDestroyVM(this);
     heap.lastChanceToFinalize();
-    
-#if !ENABLE(FAST_TLS_JIT)
-    ThreadLocalCache::destructor(threadLocalCacheData);
-#endif
 
     delete interpreter;
 #ifndef NDEBUG
index 55da9ac..be2f786 100644 (file)
@@ -54,7 +54,6 @@
 #include "TemplateRegistryKeyTable.h"
 #include "VMEntryRecord.h"
 #include "VMTraps.h"
-#include "ThreadLocalCache.h"
 #include "WasmContext.h"
 #include "Watchpoint.h"
 #include <wtf/BumpPointerAllocator.h>
@@ -657,11 +656,6 @@ public:
 
     JSObject* stringRecursionCheckFirstObject { nullptr };
     HashSet<JSObject*> stringRecursionCheckVisitedObjects;
-    
-#if !ENABLE(FAST_TLS_JIT)
-    ThreadLocalCache::Data* threadLocalCacheData { nullptr };
-#endif
-    RefPtr<ThreadLocalCache> defaultThreadLocalCache;
 
     LocalTimeOffsetCache localTimeOffsetCache;
 
index 4ce5ef9..d11a8ff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -29,7 +29,6 @@
 #include "DisallowVMReentry.h"
 #include "Options.h"
 #include "SamplingProfiler.h"
-#include "ThreadLocalCacheInlines.h"
 #include "VM.h"
 #include "Watchdog.h"
 #include <wtf/StackBounds.h>
@@ -41,7 +40,6 @@ VMEntryScope::VMEntryScope(VM& vm, JSGlobalObject* globalObject)
     : m_vm(vm)
     , m_globalObject(globalObject)
 {
-    globalObject->threadLocalCache().install(vm);
     ASSERT(!DisallowVMReentry::isInEffectOnCurrentThread());
     ASSERT(Thread::current().stack().isGrowingDownward());
     if (!vm.entryScope) {
index 1e9ff3b..af2291d 100644 (file)
@@ -1,3 +1,17 @@
+2018-01-25  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, rolling out r227592.
+        https://bugs.webkit.org/show_bug.cgi?id=182110
+
+        it made ARM64 (Linux and iOS) crash (Requested by pizlo-mbp on
+        #webkit).
+
+        Reverted changeset:
+
+        "JSC GC should support TLCs (thread local caches)"
+        https://bugs.webkit.org/show_bug.cgi?id=181559
+        https://trac.webkit.org/changeset/227592
+
 2018-01-20  Filip Pizlo  <fpizlo@apple.com>
 
         JSC GC should support TLCs (thread local caches)
index 6345a60..dd33470 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <array>
 #include <wtf/Atomics.h>
-#include <wtf/HashFunctions.h>
 #include <wtf/StdLibExtras.h>
 #include <stdint.h>
 #include <string.h>