From 386f168ab340791631e4d8979c4370ecef7e6b05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 18 May 2020 14:49:44 +0300 Subject: [PATCH] MDEV-22456 after-merge fix: introduce Atomic_relaxed In the merge 9e6e43551fc61bc34152f8d60f5d72f0d3814787 we made Atomic_counter a more generic wrapper of std::atomic so that dict_index_t would support the implicit assignment operator. It is better to revert the changes to Atomic_counter and instead introduce Atomic_relaxed as a generic wrapper to std::atomic. Unlike Atomic_counter, we will not define operator++, operator+= or similar, because we want to make the operations more explicit in the users of Atomic_wrapper, because unlike loads and stores, atomic read-modify-write operations always incur some overhead. --- include/my_atomic.h | 45 +++++++++++++++++++++++++++++ include/my_counter.h | 13 +-------- storage/innobase/dict/dict0dict.cc | 11 ++----- storage/innobase/include/dict0mem.h | 6 ++-- storage/innobase/include/sync0rw.h | 6 ++-- storage/innobase/include/sync0rw.ic | 11 ++++--- storage/innobase/sync/sync0rw.cc | 4 +-- 7 files changed, 61 insertions(+), 35 deletions(-) diff --git a/include/my_atomic.h b/include/my_atomic.h index 1c54c24d455..88f6746ba3d 100644 --- a/include/my_atomic.h +++ b/include/my_atomic.h @@ -2,6 +2,7 @@ #define MY_ATOMIC_INCLUDED /* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2018, 2020, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -169,4 +170,48 @@ my_atomic_casptr((P), (E), (D)) #endif +#ifdef __cplusplus +#include +/** + A wrapper for std::atomic, defaulting to std::memory_order_relaxed. + + When it comes to atomic loads or stores at std::memory_order_relaxed + on IA-32 or AMD64, this wrapper is only introducing some constraints + to the C++ compiler, to prevent some optimizations of loads or + stores. + + On POWER and ARM, atomic loads and stores involve different instructions + from normal loads and stores and will thus incur some overhead. + + Because atomic read-modify-write operations will always incur + overhead, we intentionally do not define + operator++(), operator--(), operator+=(), operator-=(), or similar, + to make the overhead stand out in the users of this code. +*/ +template class Atomic_relaxed +{ + std::atomic m; +public: + Atomic_relaxed(const Atomic_relaxed &rhs) + { m.store(rhs, std::memory_order_relaxed); } + Atomic_relaxed(Type val) : m(val) {} + Atomic_relaxed() {} + + operator Type() const { return m.load(std::memory_order_relaxed); } + Type operator=(const Type val) + { m.store(val, std::memory_order_relaxed); return val; } + Type operator=(const Atomic_relaxed &rhs) { return *this= Type{rhs}; } + Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed) + { return m.fetch_add(i, o); } + Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed) + { return m.fetch_sub(i, o); } + bool compare_exchange_strong(Type& i1, const Type i2, + std::memory_order o1= std::memory_order_relaxed, + std::memory_order o2= std::memory_order_relaxed) + { return m.compare_exchange_strong(i1, i2, o1, o2); } + Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed) + { return m.exchange(i, o); } +}; +#endif /* __cplusplus */ + #endif /* MY_ATOMIC_INCLUDED */ diff --git a/include/my_counter.h b/include/my_counter.h index 432dc7dda3d..c5cbe296df0 100644 --- a/include/my_counter.h +++ b/include/my_counter.h @@ -1,7 +1,7 @@ #ifndef MY_COUNTER_H_INCLUDED #define MY_COUNTER_H_INCLUDED /* - Copyright (C) 2018, 2020, MariaDB + Copyright (C) 2018 MariaDB Foundation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -45,16 +45,5 @@ public: operator Type() const { return m_counter.load(std::memory_order_relaxed); } Type operator=(const Type val) { m_counter.store(val, std::memory_order_relaxed); return val; } - Type operator=(const Atomic_counter &rhs) { return *this= Type{rhs}; } - - Type fetch_add(const Type i, std::memory_order m) - { return m_counter.fetch_add(i, m); } - Type fetch_sub(const Type i, std::memory_order m) - { return m_counter.fetch_sub(i, m); } - bool compare_exchange_strong(Type& i1, const Type i2, - std::memory_order m1, std::memory_order m2) - { return m_counter.compare_exchange_strong(i1, i2, m1, m2); } - Type exchange(const Type i, std::memory_order m) - { return m_counter.exchange(i, m); } }; #endif /* MY_COUNTER_H_INCLUDED */ diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index c9abc03f682..e04d1dcbbe0 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -6158,10 +6158,7 @@ dict_index_zip_pad_update( beyond max pad size. */ if (info->pad + ZIP_PAD_INCR < (srv_page_size * zip_pad_max) / 100) { - /* Use atomics even though we have the mutex. - This is to ensure that we are able to read - info->pad atomically. */ - info->pad += ZIP_PAD_INCR; + info->pad.fetch_add(ZIP_PAD_INCR); MONITOR_INC(MONITOR_PAD_INCREMENTS); } @@ -6178,11 +6175,7 @@ dict_index_zip_pad_update( padding. */ if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT && info->pad > 0) { - - /* Use atomics even though we have the mutex. - This is to ensure that we are able to read - info->pad atomically. */ - info->pad -= ZIP_PAD_INCR; + info->pad.fetch_sub(ZIP_PAD_INCR); info->n_rounds = 0; diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index b2be01ec637..78c723b8a76 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -931,7 +931,7 @@ an uncompressed page should be left as padding to avoid compression failures. This estimate is based on a self-adapting heuristic. */ struct zip_pad_info_t { SysMutex mutex; /*!< mutex protecting the info */ - Atomic_counter + Atomic_relaxed pad; /*!< number of bytes used as pad */ ulint success;/*!< successful compression ops during current round */ @@ -1107,10 +1107,10 @@ struct dict_index_t { /* @} */ private: /** R-tree split sequence number */ - Atomic_counter rtr_ssn; + Atomic_relaxed rtr_ssn; public: void set_ssn(node_seq_t ssn) { rtr_ssn= ssn; } - node_seq_t assign_ssn() { return ++rtr_ssn; } + node_seq_t assign_ssn() { return rtr_ssn.fetch_add(1) + 1; } node_seq_t ssn() const { return rtr_ssn; } rtr_info_track_t* diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h index 6592988def8..48528eb4d30 100644 --- a/storage/innobase/include/sync0rw.h +++ b/storage/innobase/include/sync0rw.h @@ -569,10 +569,10 @@ struct rw_lock_t #endif /* UNIV_DEBUG */ { /** Holds the state of the lock. */ - Atomic_counter lock_word; + Atomic_relaxed lock_word; - /** 1: there are waiters */ - Atomic_counter waiters; + /** 0=no waiters, 1=waiters for X or SX lock exist */ + Atomic_relaxed waiters; /** number of granted SX locks. */ volatile ulint sx_recursive; diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index 2a7b008a532..70723b05944 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -355,16 +355,15 @@ rw_lock_s_unlock_func( ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S)); /* Increment lock_word to indicate 1 less reader */ - int32_t lock_word = ++lock->lock_word; + int32_t lock_word = lock->lock_word.fetch_add(1); - if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) { + if (lock_word == -1 || lock_word == -X_LOCK_HALF_DECR - 1) { /* wait_ex waiter exists. It may not be asleep, but we signal anyway. We do not wake other waiters, because they can't exist without wait_ex waiter and wait_ex waiter goes first.*/ os_event_set(lock->wait_ex_event); sync_array_object_signalled(); } else { - ut_ad(--lock_word); ut_ad(lock_word > -X_LOCK_DECR); ut_ad(lock_word < X_LOCK_DECR); } @@ -414,11 +413,11 @@ rw_lock_x_unlock_func( } else if (lock_word == -X_LOCK_DECR || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) { /* There are 2 x-locks */ - lock->lock_word += X_LOCK_DECR; + lock->lock_word.fetch_add(X_LOCK_DECR); } else { /* There are more than 2 x-locks. */ ut_ad(lock_word < -X_LOCK_DECR); - lock->lock_word++; + lock->lock_word.fetch_add(1); } ut_ad(rw_lock_validate(lock)); @@ -470,7 +469,7 @@ rw_lock_sx_unlock_func( /* still has x-lock */ ut_ad(lock_word == -X_LOCK_HALF_DECR || lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR)); - lock->lock_word += X_LOCK_HALF_DECR; + lock->lock_word.fetch_add(X_LOCK_HALF_DECR); } } diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc index 22bebdb33da..fea94cc05f9 100644 --- a/storage/innobase/sync/sync0rw.cc +++ b/storage/innobase/sync/sync0rw.cc @@ -528,10 +528,10 @@ rw_lock_x_lock_low( exists. Add another. */ if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) { - lock->lock_word -= X_LOCK_DECR; + lock->lock_word.fetch_sub(X_LOCK_DECR); } else { ut_ad(lock_word <= -X_LOCK_DECR); - lock->lock_word--; + lock->lock_word.fetch_sub(1); } }