2 * Copyright (C) 2007-2008, 2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <wtf/StdLibExtras.h>
33 #if !COMPILER(GCC_OR_CLANG)
34 extern "C" void _ReadWriteBarrier(void);
35 #pragma intrinsic(_ReadWriteBarrier)
42 // Atomic wraps around std::atomic with the sole purpose of making the compare_exchange
43 // operations not alter the expected value. This is more in line with how we typically
44 // use CAS in our code.
46 // Atomic is a struct without explicitly defined constructors so that it can be
47 // initialized at compile time.
51 // Don't pass a non-default value for the order parameter unless you really know
52 // what you are doing and have thought about it very hard. The cost of seq_cst
53 // is usually not high enough to justify the risk.
55 T load(std::memory_order order = std::memory_order_seq_cst) const { return value.load(order); }
57 void store(T desired, std::memory_order order = std::memory_order_seq_cst) { value.store(desired, order); }
59 bool compareExchangeWeak(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
62 // Windows makes strange assertions about the argument to compare_exchange_weak, and anyway,
63 // Windows is X86 so seq_cst is cheap.
64 order = std::memory_order_seq_cst;
66 T expectedOrActual = expected;
67 return value.compare_exchange_weak(expectedOrActual, desired, order);
70 bool compareExchangeStrong(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
74 order = std::memory_order_seq_cst;
76 T expectedOrActual = expected;
77 return value.compare_exchange_strong(expectedOrActual, desired, order);
81 T exchangeAndAdd(U addend, std::memory_order order = std::memory_order_seq_cst)
85 order = std::memory_order_seq_cst;
87 return value.fetch_add(addend, order);
90 T exchange(T newValue, std::memory_order order = std::memory_order_seq_cst)
94 order = std::memory_order_seq_cst;
96 return value.exchange(newValue, order);
102 // This is a weak CAS function that takes a direct pointer and has no portable fencing guarantees.
104 inline bool weakCompareAndSwap(volatile T* location, T expected, T newValue)
106 return bitwise_cast<Atomic<T>*>(location)->compareExchangeWeak(expected, newValue, std::memory_order_relaxed);
109 // Just a compiler fence. Has no effect on the hardware, but tells the compiler
110 // not to move things around this call. Should not affect the compiler's ability
111 // to do things like register allocation and code motion over pure operations.
112 inline void compilerFence()
114 #if OS(WINDOWS) && !COMPILER(GCC_OR_CLANG)
117 asm volatile("" ::: "memory");
121 #if CPU(ARM_THUMB2) || CPU(ARM64)
123 // Full memory fence. No accesses will float above this, and no accesses will sink
125 inline void armV7_dmb()
127 asm volatile("dmb sy" ::: "memory");
130 // Like the above, but only affects stores.
131 inline void armV7_dmb_st()
133 asm volatile("dmb st" ::: "memory");
136 inline void loadLoadFence() { armV7_dmb(); }
137 inline void loadStoreFence() { armV7_dmb(); }
138 inline void storeLoadFence() { armV7_dmb(); }
139 inline void storeStoreFence() { armV7_dmb_st(); }
140 inline void memoryBarrierAfterLock() { armV7_dmb(); }
141 inline void memoryBarrierBeforeUnlock() { armV7_dmb(); }
143 #elif CPU(X86) || CPU(X86_64)
145 inline void x86_mfence()
148 // I think that this does the equivalent of a dummy interlocked instruction,
149 // instead of using the 'mfence' instruction, at least according to MSDN. I
150 // know that it is equivalent for our purposes, but it would be good to
151 // investigate if that is actually better.
154 asm volatile("mfence" ::: "memory");
158 inline void loadLoadFence() { compilerFence(); }
159 inline void loadStoreFence() { compilerFence(); }
160 inline void storeLoadFence() { x86_mfence(); }
161 inline void storeStoreFence() { compilerFence(); }
162 inline void memoryBarrierAfterLock() { compilerFence(); }
163 inline void memoryBarrierBeforeUnlock() { compilerFence(); }
167 inline void loadLoadFence() { compilerFence(); }
168 inline void loadStoreFence() { compilerFence(); }
169 inline void storeLoadFence() { compilerFence(); }
170 inline void storeStoreFence() { compilerFence(); }
171 inline void memoryBarrierAfterLock() { compilerFence(); }
172 inline void memoryBarrierBeforeUnlock() { compilerFence(); }