]> Gentwo Git Trees - linux/.git/commitdiff
Avoid memory barrier in read_seqcount() through load acquire
authorChristoph Lameter (Ampere) <cl@gentwo.org>
Wed, 28 Aug 2024 17:00:41 +0000 (10:00 -0700)
committerChristoph Lameter <cl@gentwo.org>
Wed, 28 Aug 2024 17:00:41 +0000 (10:00 -0700)
    Some architectures support load acquire which can save us a memory
    barrier and save some cycles.

    A typical sequence

            do {
                    seq = read_seqcount_begin(&s);
                    <something>
            } while (read_seqcount_retry(&s, seq);

    requires 13 cycles on ARM64 for an empty loop. Two read memory
    barriers are needed. One for each of the seqcount_* functions.

    We can replace the first read barrier with a load acquire of
    the seqcount which saves us one barrier.

    On ARM64 doing so reduces the cycle count from 13 to 8.

    This is a general improvement for the ARM64 architecture and not
    specific to a certain processor. The cycle count here was
    obtained on a Neoverse N1 (Ampere Altra).

    The ARM documentation states that load acquire is more effective
    than a load plus barrier. In general that tends to be true on all
    compute platforms that support both.

    See (as quoted by Linus Torvalds):
       https://developer.arm.com/documentation/102336/0100/Load-Acquire-and-Store-Release-instructions

     "Weaker ordering requirements that are imposed by Load-Acquire and
      Store-Release instructions allow for micro-architectural
      optimizations, which could reduce some of the performance impacts that
      are otherwise imposed by an explicit memory barrier.

      If the ordering requirement is satisfied using either a Load-Acquire
      or Store-Release, then it would be preferable to use these
      instructions instead of a DMB"

    Macro work by Thomas Gleixner

Signed-off-by: Christoph Lameter (Ampere) <cl@gentwo.org>
arch/Kconfig
arch/arm64/Kconfig
include/linux/seqlock.h

index 975dd22a2dbd22dc1fa7ef727149d99e5ebcbe0a..3c270f496231a9b5558d4a8af30d49db54b478d8 100644 (file)
@@ -1600,6 +1600,14 @@ config ARCH_HAS_KERNEL_FPU_SUPPORT
          Architectures that select this option can run floating-point code in
          the kernel, as described in Documentation/core-api/floating-point.rst.
 
+config ARCH_HAS_ACQUIRE_RELEASE
+       bool
+       help
+         Setting ARCH_HAS_ACQUIRE_RELEASE indicates that the architecture
+         supports load acquire and release. Typically these are more effective
+         than memory barriers. Code will prefer the use of load acquire and
+         store release over memory barriers if this option is enabled.
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index a2f8ff354ca670af50e44c790c6c201b63aa0ddb..19e34fff145f32d689c8312b55a56df5a2217f7d 100644 (file)
@@ -39,6 +39,7 @@ config ARM64
        select ARCH_HAS_PTE_DEVMAP
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_HW_PTE_YOUNG
+       select ARCH_HAS_ACQUIRE_RELEASE
        select ARCH_HAS_SETUP_DMA_OPS
        select ARCH_HAS_SET_DIRECT_MAP
        select ARCH_HAS_SET_MEMORY
index d90d8ee29d811ecfd489581eb43a1efab3f37cdd..68b3af8bd6c604e443d07f533c11c239fd22b97c 100644 (file)
@@ -132,6 +132,14 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
 #define seqcount_rwlock_init(s, lock)          seqcount_LOCKNAME_init(s, lock, rwlock)
 #define seqcount_mutex_init(s, lock)           seqcount_LOCKNAME_init(s, lock, mutex)
 
+static __always_inline unsigned __seqprop_load_sequence(const seqcount_t *s, bool acquire)
+{
+       if (acquire && IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE))
+               return smp_load_acquire(&s->sequence);
+       else
+               return READ_ONCE(s->sequence);
+}
+
 /*
  * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
  * seqprop_LOCKNAME_*()        - Property accessors for seqcount_LOCKNAME_t
@@ -155,9 +163,10 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
 }                                                                      \
                                                                        \
 static __always_inline unsigned                                                \
-__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)      \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s,      \
+                               bool acquire)                           \
 {                                                                      \
-       unsigned seq = READ_ONCE(s->seqcount.sequence);                 \
+       unsigned seq = __seqprop_load_sequence(&s->seqcount, acquire);  \
                                                                        \
        if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
                return seq;                                             \
@@ -170,7 +179,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)   \
                 * Re-read the sequence counter since the (possibly     \
                 * preempted) writer made progress.                     \
                 */                                                     \
-               seq = READ_ONCE(s->seqcount.sequence);                  \
+               seq = __seqprop_load_sequence(&s->seqcount, acquire);   \
        }                                                               \
                                                                        \
        return seq;                                                     \
@@ -206,9 +215,9 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
        return s;
 }
 
-static inline unsigned __seqprop_sequence(const seqcount_t *s)
+static inline unsigned __seqprop_sequence(const seqcount_t *s, bool acquire)
 {
-       return READ_ONCE(s->sequence);
+       return __seqprop_load_sequence(s, acquire);
 }
 
 static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -258,35 +267,49 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
 
 #define seqprop_ptr(s)                 __seqprop(s, ptr)(s)
 #define seqprop_const_ptr(s)           __seqprop(s, const_ptr)(s)
-#define seqprop_sequence(s)            __seqprop(s, sequence)(s)
+#define seqprop_sequence(s, a)         __seqprop(s, sequence)(s, a)
 #define seqprop_preemptible(s)         __seqprop(s, preemptible)(s)
 #define seqprop_assert(s)              __seqprop(s, assert)(s)
 
 /**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
- *
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
+ * read_seqcount_begin_cond_acquire() - begin a seqcount_t read section
+ * @s:      Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @acquire: If true, the read of the sequence count uses smp_load_acquire()
+ *          if the architecure provides and enabled it.
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-#define __read_seqcount_begin(s)                                       \
+#define read_seqcount_begin_cond_acquire(s, acquire)                   \
 ({                                                                     \
        unsigned __seq;                                                 \
                                                                        \
-       while ((__seq = seqprop_sequence(s)) & 1)                       \
+       while ((__seq = seqprop_sequence(s, acquire)) & 1)              \
                cpu_relax();                                            \
                                                                        \
        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
        __seq;                                                          \
 })
 
+/**
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but it neither
+ * provides a smp_rmb() barrier nor does it use smp_load_acquire() on
+ * architectures which provide it.
+ *
+ * Callers should ensure that smp_rmb() or equivalent ordering is provided
+ * before actually loading any of the variables that are to be protected in
+ * this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
+ */
+#define __read_seqcount_begin(s)                                       \
+       read_seqcount_begin_cond_acquire(s, false)
+
 /**
  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
@@ -295,9 +318,10 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
  */
 #define raw_read_seqcount_begin(s)                                     \
 ({                                                                     \
-       unsigned _seq = __read_seqcount_begin(s);                       \
+       unsigned _seq = read_seqcount_begin_cond_acquire(s, true);      \
                                                                        \
-       smp_rmb();                                                      \
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE))               \
+               smp_rmb();                                              \
        _seq;                                                           \
 })
 
@@ -326,9 +350,10 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
  */
 #define raw_read_seqcount(s)                                           \
 ({                                                                     \
-       unsigned __seq = seqprop_sequence(s);                           \
+       unsigned __seq = seqprop_sequence(s, true);                     \
                                                                        \
-       smp_rmb();                                                      \
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE))               \
+               smp_rmb();                                              \
        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
        __seq;                                                          \
 })