Update lock benchmarks
[WebKit-https.git] / Source / WTF / benchmarks / ToyLocks.h
1 /*
2  * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef ToyLocks_h
27 #define ToyLocks_h
28
29 #include <mutex>
30 #include <thread>
31 #include <wtf/Atomics.h>
32 #include <wtf/Lock.h>
33 #include <wtf/ParkingLot.h>
34 #include <wtf/WordLock.h>
35
36 #if __has_include(<os/lock.h>)
37 #include <os/lock.h>
38 #define HAS_UNFAIR_LOCK
39 #endif
40
41 #if defined(EXTRA_LOCKS) && EXTRA_LOCKS
42 #include <synchronic>
43 #endif
44
45 namespace {
46
47 unsigned toyLockSpinLimit = 40;
48
49 // This is the old WTF::SpinLock class, included here so that we can still compare our new locks to a
50 // spinlock baseline.
51 class YieldSpinLock {
52 public:
53     YieldSpinLock()
54     {
55         m_lock.store(0, std::memory_order_relaxed);
56     }
57
58     void lock()
59     {
60         while (!m_lock.compareExchangeWeak(0, 1, std::memory_order_acquire))
61             Thread::yield();
62     }
63
64     void unlock()
65     {
66         m_lock.store(0, std::memory_order_release);
67     }
68
69     bool isLocked() const
70     {
71         return m_lock.load(std::memory_order_acquire);
72     }
73
74 private:
75     Atomic<unsigned> m_lock;
76 };
77
78 class PauseSpinLock {
79 public:
80     PauseSpinLock()
81     {
82         m_lock.store(0, std::memory_order_relaxed);
83     }
84
85     void lock()
86     {
87         while (!m_lock.compareExchangeWeak(0, 1, std::memory_order_acquire))
88             asm volatile ("pause");
89     }
90
91     void unlock()
92     {
93         m_lock.store(0, std::memory_order_release);
94     }
95
96     bool isLocked() const
97     {
98         return m_lock.load(std::memory_order_acquire);
99     }
100
101 private:
102     Atomic<unsigned> m_lock;
103 };
104
105 #if defined(EXTRA_LOCKS) && EXTRA_LOCKS
106 class TransactionalSpinLock {
107 public:
108     TransactionalSpinLock()
109     {
110         m_lock = 0;
111     }
112
113     void lock()
114     {
115         for (;;) {
116             unsigned char result;
117             unsigned expected = 0;
118             unsigned desired = 1;
119             asm volatile (
120                 "xacquire; lock; cmpxchgl %3, %2\n\t"
121                 "sete %1"
122                 : "+a"(expected), "=q"(result), "+m"(m_lock)
123                 : "r"(desired)
124                 : "memory");
125             if (result)
126                 return;
127             Thread::yield();
128         }
129     }
130
131     void unlock()
132     {
133         asm volatile (
134             "xrelease; movl $0, %0"
135             :
136             : "m"(m_lock)
137             : "memory");
138     }
139
140     bool isLocked() const
141     {
142         return m_lock;
143     }
144
145 private:
146     unsigned m_lock;
147 };
148
149 class SynchronicLock {
150 public:
151     SynchronicLock()
152         : m_locked(0)
153     {
154     }
155     
156     void lock()
157     {
158         for (;;) {
159             int state = 0;
160             if (m_locked.compare_exchange_weak(state, 1, std::memory_order_acquire))
161                 return;
162             m_sync.wait_for_change(m_locked, state, std::memory_order_relaxed);
163         }
164     }
165     
166     void unlock()
167     {
168         m_sync.notify_one(m_locked, 0, std::memory_order_release);
169     }
170     
171     bool isLocked()
172     {
173         return m_locked.load();
174     }
175
176 private:
177     std::atomic<int> m_locked;
178     std::experimental::synchronic<int> m_sync;
179 };
180 #endif
181
182 template<typename StateType>
183 class BargingLock {
184 public:
185     BargingLock()
186     {
187         m_state.store(0);
188     }
189     
190     void lock()
191     {
192         if (LIKELY(m_state.compareExchangeWeak(0, isLockedBit, std::memory_order_acquire)))
193             return;
194         
195         lockSlow();
196     }
197     
198     void unlock()
199     {
200         if (LIKELY(m_state.compareExchangeWeak(isLockedBit, 0, std::memory_order_release)))
201             return;
202         
203         unlockSlow();
204     }
205     
206     bool isLocked() const
207     {
208         return m_state.load(std::memory_order_acquire) & isLockedBit;
209     }
210     
211 private:
212     NEVER_INLINE void lockSlow()
213     {
214         for (unsigned i = toyLockSpinLimit; i--;) {
215             StateType currentState = m_state.load();
216             
217             if (!(currentState & isLockedBit)
218                 && m_state.compareExchangeWeak(currentState, currentState | isLockedBit))
219                 return;
220             
221             if (currentState & hasParkedBit)
222                 break;
223             
224             Thread::yield();
225         }
226         
227         for (;;) {
228             StateType currentState = m_state.load();
229             
230             if (!(currentState & isLockedBit)
231                 && m_state.compareExchangeWeak(currentState, currentState | isLockedBit))
232                 return;
233             
234             m_state.compareExchangeWeak(isLockedBit, isLockedBit | hasParkedBit);
235             
236             ParkingLot::compareAndPark(&m_state, isLockedBit | hasParkedBit);
237         }
238     }
239     
240     NEVER_INLINE void unlockSlow()
241     {
242         ParkingLot::unparkOne(
243             &m_state,
244             [this] (ParkingLot::UnparkResult result) -> intptr_t {
245                 if (result.mayHaveMoreThreads)
246                     m_state.store(hasParkedBit);
247                 else
248                     m_state.store(0);
249                 return 0;
250             });
251     }
252     
253     static const StateType isLockedBit = 1;
254     static const StateType hasParkedBit = 2;
255     
256     Atomic<StateType> m_state;
257 };
258
259 template<typename StateType>
260 class ThunderLock {
261 public:
262     ThunderLock()
263     {
264         m_state.store(Unlocked);
265     }
266     
267     void lock()
268     {
269         if (LIKELY(m_state.compareExchangeWeak(Unlocked, Locked, std::memory_order_acquire)))
270             return;
271         
272         lockSlow();
273     }
274     
275     void unlock()
276     {
277         if (LIKELY(m_state.compareExchangeWeak(Locked, Unlocked, std::memory_order_release)))
278             return;
279         
280         unlockSlow();
281     }
282     
283     bool isLocked() const
284     {
285         return m_state.load(std::memory_order_acquire) != Unlocked;
286     }
287     
288 private:
289     NEVER_INLINE void lockSlow()
290     {
291         for (unsigned i = toyLockSpinLimit; i--;) {
292             State currentState = m_state.load();
293             
294             if (currentState == Unlocked
295                 && m_state.compareExchangeWeak(Unlocked, Locked))
296                 return;
297             
298             if (currentState == LockedAndParked)
299                 break;
300             
301             Thread::yield();
302         }
303         
304         for (;;) {
305             if (m_state.compareExchangeWeak(Unlocked, Locked))
306                 return;
307             
308             m_state.compareExchangeWeak(Locked, LockedAndParked);
309             ParkingLot::compareAndPark(&m_state, LockedAndParked);
310         }
311     }
312     
313     NEVER_INLINE void unlockSlow()
314     {
315         if (m_state.exchange(Unlocked) == LockedAndParked)
316             ParkingLot::unparkAll(&m_state);
317     }
318     
319     enum State : StateType {
320         Unlocked,
321         Locked,
322         LockedAndParked
323     };
324     
325     Atomic<State> m_state;
326 };
327
328 template<typename StateType>
329 class CascadeLock {
330 public:
331     CascadeLock()
332     {
333         m_state.store(Unlocked);
334     }
335     
336     void lock()
337     {
338         if (LIKELY(m_state.compareExchangeWeak(Unlocked, Locked, std::memory_order_acquire)))
339             return;
340         
341         lockSlow();
342     }
343     
344     void unlock()
345     {
346         if (LIKELY(m_state.compareExchangeWeak(Locked, Unlocked, std::memory_order_release)))
347             return;
348         
349         unlockSlow();
350     }
351     
352     bool isLocked() const
353     {
354         return m_state.load(std::memory_order_acquire) != Unlocked;
355     }
356     
357 private:
358     NEVER_INLINE void lockSlow()
359     {
360         for (unsigned i = toyLockSpinLimit; i--;) {
361             State currentState = m_state.load();
362             
363             if (currentState == Unlocked
364                 && m_state.compareExchangeWeak(Unlocked, Locked))
365                 return;
366             
367             if (currentState == LockedAndParked)
368                 break;
369             
370             Thread::yield();
371         }
372         
373         State desiredState = Locked;
374         for (;;) {
375             if (m_state.compareExchangeWeak(Unlocked, desiredState))
376                 return;
377             
378             desiredState = LockedAndParked;
379             m_state.compareExchangeWeak(Locked, LockedAndParked);
380             ParkingLot::compareAndPark(&m_state, LockedAndParked);
381         }
382     }
383     
384     NEVER_INLINE void unlockSlow()
385     {
386         if (m_state.exchange(Unlocked) == LockedAndParked)
387             ParkingLot::unparkOne(&m_state);
388     }
389     
390     enum State : StateType {
391         Unlocked,
392         Locked,
393         LockedAndParked
394     };
395     
396     Atomic<State> m_state;
397 };
398
399 class HandoffLock {
400 public:
401     HandoffLock()
402     {
403         m_state.store(0);
404     }
405     
406     void lock()
407     {
408         if (LIKELY(m_state.compareExchangeWeak(0, isLockedBit, std::memory_order_acquire)))
409             return;
410
411         lockSlow();
412     }
413
414     void unlock()
415     {
416         if (LIKELY(m_state.compareExchangeWeak(isLockedBit, 0, std::memory_order_release)))
417             return;
418
419         unlockSlow();
420     }
421
422     bool isLocked() const
423     {
424         return m_state.load(std::memory_order_acquire) & isLockedBit;
425     }
426     
427 private:
428     NEVER_INLINE void lockSlow()
429     {
430         for (;;) {
431             unsigned state = m_state.load();
432             
433             if (!(state & isLockedBit)) {
434                 if (m_state.compareExchangeWeak(state, state | isLockedBit))
435                     return;
436                 continue;
437             }
438             
439             if (m_state.compareExchangeWeak(state, state + parkedCountUnit)) {
440                 bool result = ParkingLot::compareAndPark(&m_state, state + parkedCountUnit).wasUnparked;
441                 m_state.exchangeAdd(-parkedCountUnit);
442                 if (result)
443                     return;
444             }
445         }
446     }
447     
448     NEVER_INLINE void unlockSlow()
449     {
450         for (;;) {
451             unsigned state = m_state.load();
452             
453             if (!(state >> parkedCountShift)) {
454                 RELEASE_ASSERT(state == isLockedBit);
455                 if (m_state.compareExchangeWeak(isLockedBit, 0))
456                     return;
457                 continue;
458             }
459             
460             if (ParkingLot::unparkOne(&m_state).didUnparkThread) {
461                 // We unparked someone. There are now running and they hold the lock.
462                 return;
463             }
464             
465             // Nobody unparked. Maybe there isn't anyone waiting. Just try again.
466         }
467     }
468     
469     static const unsigned isLockedBit = 1;
470     static const unsigned parkedCountShift = 1;
471     static const unsigned parkedCountUnit = 1 << parkedCountShift;
472     
473     Atomic<unsigned> m_state;
474 };
475
476 #ifdef HAS_UNFAIR_LOCK
477 class UnfairLock {
478     os_unfair_lock l = OS_UNFAIR_LOCK_INIT;
479 public:
480     void lock()
481     {
482         os_unfair_lock_lock(&l);
483     }
484     void unlock()
485     {
486         os_unfair_lock_unlock(&l);
487     }
488 };
489 #endif
490
491 template<typename Benchmark>
492 void runEverything(const char* what)
493 {
494     if (!strcmp(what, "yieldspinlock") || !strcmp(what, "all"))
495         Benchmark::template run<YieldSpinLock>("YieldSpinLock");
496     if (!strcmp(what, "pausespinlock") || !strcmp(what, "all"))
497         Benchmark::template run<PauseSpinLock>("PauseSpinLock");
498 #if defined(EXTRA_LOCKS) && EXTRA_LOCKS
499     if (!strcmp(what, "transactionalspinlock") || !strcmp(what, "all"))
500         Benchmark::template run<TransactionalSpinLock>("TransactionalSpinLock");
501     if (!strcmp(what, "synchroniclock") || !strcmp(what, "all"))
502         Benchmark::template run<SynchronicLock>("SynchronicLock");
503 #endif
504     if (!strcmp(what, "wordlock") || !strcmp(what, "all"))
505         Benchmark::template run<WordLock>("WTFWordLock");
506     if (!strcmp(what, "lock") || !strcmp(what, "all"))
507         Benchmark::template run<Lock>("WTFLock");
508     if (!strcmp(what, "barginglock") || !strcmp(what, "all"))
509         Benchmark::template run<BargingLock<uint8_t>>("ByteBargingLock");
510     if (!strcmp(what, "bargingwordlock") || !strcmp(what, "all"))
511         Benchmark::template run<BargingLock<uint32_t>>("WordBargingLock");
512     if (!strcmp(what, "thunderlock") || !strcmp(what, "all"))
513         Benchmark::template run<ThunderLock<uint8_t>>("ByteThunderLock");
514     if (!strcmp(what, "thunderwordlock") || !strcmp(what, "all"))
515         Benchmark::template run<ThunderLock<uint32_t>>("WordThunderLock");
516     if (!strcmp(what, "cascadelock") || !strcmp(what, "all"))
517         Benchmark::template run<CascadeLock<uint8_t>>("ByteCascadeLock");
518     if (!strcmp(what, "cascadewordlock") || !strcmp(what, "all"))
519         Benchmark::template run<CascadeLock<uint32_t>>("WordCascadeLock");
520     if (!strcmp(what, "handofflock") || !strcmp(what, "all"))
521         Benchmark::template run<HandoffLock>("HandoffLock");
522 #ifdef HAS_UNFAIR_LOCK
523     if (!strcmp(what, "unfairlock") || !strcmp(what, "all"))
524         Benchmark::template run<UnfairLock>("UnfairLock");
525 #endif
526     if (!strcmp(what, "mutex") || !strcmp(what, "all"))
527         Benchmark::template run<std::mutex>("std::mutex");
528 }
529
530 } // anonymous namespace
531
532 #endif // ToyLocks_h
533