]> Gentwo Git Trees - linux/.git/commitdiff
Avoid memory barrier in read_seqcount() through load acquire b4/seq_optimize
authorChristoph Lameter (Ampere) <cl@gentwo.org>
Tue, 13 Aug 2024 18:09:01 +0000 (11:09 -0700)
committerChristoph Lameter <cl@gentwo.org>
Tue, 24 Sep 2024 19:04:30 +0000 (12:04 -0700)
Some architectures support load acquire which can save us a memory
barrier and save some cycles.

A typical sequence

do {
seq = read_seqcount_begin(&s);
<something>
} while (read_seqcount_retry(&s, seq);

requires 13 cycles on ARM64 for an empty loop. Two read memory
barriers are needed. One for each of the seqcount_* functions.

Replace the first read barrier with a load acquire of
the seqcount which saves one barrier.

On ARM64 doing so reduces the cycle count from 13 to 8.

This is a general improvement for the ARM64 architecture and not
specific to a certain processor. The cycle count here was
obtained on a Neoverse N1 (Ampere Altra).

Further optimization is possible through the use of the cond_load_acquire logic
which will give an ARM CPU a chance to enter some power saving mode
while waiting for changes to a cacheline thereby avoiding busy loops
and therefore saving power.

The ARM documentation states that load acquire is more effective
than a load plus barrier. In general that tends to be true on all
compute platforms that support both.

See (as quoted by Linus Torvalds):
   https://developer.arm.com/documentation/102336/0100/Load-Acquire-and-Store-Release-instructions

 "Weaker ordering requirements that are imposed by Load-Acquire and
  Store-Release instructions allow for micro-architectural
  optimizations, which could reduce some of the performance impacts that
  are otherwise imposed by an explicit memory barrier.

  If the ordering requirement is satisfied using either a Load-Acquire
  or Store-Release, then it would be preferable to use these
  instructions instead of a DMB"

The patch benefited significantly from the knowledge of the innards
of the seqlock code by Thomas Gleixner.

Signed-off-by: Christoph Lameter (Ampere) <cl@gentwo.org>
drivers/gpu/drm/i915/gt/intel_tlb.h
include/linux/seqlock.h

index 337327af92ac43db4c1b21b125d99dd20db8a9e1..81998c4cd4fbcc012df579c96b05370a19162b95 100644 (file)
@@ -18,7 +18,7 @@ void intel_gt_fini_tlb(struct intel_gt *gt);
 
 static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
 {
-       return seqprop_sequence(&gt->tlb.seqno);
+       return seqprop_sequence(&gt->tlb.seqno, false);
 }
 
 static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
index fffeb754880fca84672e89cd8dfdaa973a6e7a47..76faed7ebf066e7afb4506a9fe86c92a719855b2 100644 (file)
@@ -132,6 +132,17 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
 #define seqcount_rwlock_init(s, lock)          seqcount_LOCKNAME_init(s, lock, rwlock)
 #define seqcount_mutex_init(s, lock)           seqcount_LOCKNAME_init(s, lock, mutex)
 
+static __always_inline unsigned __seqprop_load_sequence(const seqcount_t *s, bool acquire)
+{
+       if (!acquire)
+               return READ_ONCE(s->sequence);
+
+       if (!IS_ENABLED(PREEMPT_RT))
+               return smp_cond_load_acquire((unsigned int *)&s->sequence, (VAL & 1) == 0);
+
+       return smp_load_acquire(&s->sequence);
+}
+
 /*
  * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
  * seqprop_LOCKNAME_*()        - Property accessors for seqcount_LOCKNAME_t
@@ -155,9 +166,10 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
 }                                                                      \
                                                                        \
 static __always_inline unsigned                                                \
-__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)      \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s,      \
+                               bool acquire)                           \
 {                                                                      \
-       unsigned seq = smp_load_acquire(&s->seqcount.sequence);         \
+       unsigned seq = __seqprop_load_sequence(&s->seqcount, acquire);  \
                                                                        \
        if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
                return seq;                                             \
@@ -170,7 +182,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)   \
                 * Re-read the sequence counter since the (possibly     \
                 * preempted) writer made progress.                     \
                 */                                                     \
-               seq = smp_load_acquire(&s->seqcount.sequence);          \
+               seq = __seqprop_load_sequence(&s->seqcount, acquire);   \
        }                                                               \
                                                                        \
        return seq;                                                     \
@@ -206,9 +218,9 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
        return s;
 }
 
-static inline unsigned __seqprop_sequence(const seqcount_t *s)
+static inline unsigned __seqprop_sequence(const seqcount_t *s, bool acquire)
 {
-       return smp_load_acquire(&s->sequence);
+       return __seqprop_load_sequence(s, acquire);
 }
 
 static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -258,34 +270,65 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
 
 #define seqprop_ptr(s)                 __seqprop(s, ptr)(s)
 #define seqprop_const_ptr(s)           __seqprop(s, const_ptr)(s)
-#define seqprop_sequence(s)            __seqprop(s, sequence)(s)
+#define seqprop_sequence(s, a)         __seqprop(s, sequence)(s, a)
 #define seqprop_preemptible(s)         __seqprop(s, preemptible)(s)
 #define seqprop_assert(s)              __seqprop(s, assert)(s)
 
 /**
- * __read_seqcount_begin() - begin a seqcount_t read section
- * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * read_seqcount_begin_cond_acquire() - begin a seqcount_t read section
+ * @s:      Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @acquire: If true, the read of the sequence count uses smp_load_acquire()
+ *          if the architecure provides and enabled it.
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-#define __read_seqcount_begin(s)                                       \
+#define read_seqcount_begin_cond_acquire(s, acquire)                   \
 ({                                                                     \
        unsigned __seq;                                                 \
                                                                        \
-       while ((__seq = seqprop_sequence(s)) & 1)                       \
-               cpu_relax();                                            \
+       if (acquire && !IS_ENABLED(PREEMPT_RT)) {                       \
+               __seq = seqprop_sequence(s, acquire);                   \
+       } else {                                                        \
+               while ((__seq = seqprop_sequence(s, acquire)) & 1)      \
+                       cpu_relax();                                    \
+       }                                                               \
                                                                        \
        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
        __seq;                                                          \
 })
 
+/**
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but it neither
+ * provides a smp_rmb() barrier nor does it use smp_load_acquire() on
+ * architectures which provide it.
+ *
+ * Callers should ensure that smp_rmb() or equivalent ordering is provided
+ * before actually loading any of the variables that are to be protected in
+ * this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
+ */
+#define __read_seqcount_begin(s)                                       \
+       read_seqcount_begin_cond_acquire(s, false)
+
 /**
  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
+#define raw_read_seqcount_begin(s)                                     \
+({                                                                     \
+       unsigned _seq = read_seqcount_begin_cond_acquire(s, true);      \
+                                                                       \
+       _seq;                                                           \
+})
 
 /**
  * read_seqcount_begin() - begin a seqcount_t read critical section
@@ -312,7 +355,7 @@ SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
  */
 #define raw_read_seqcount(s)                                           \
 ({                                                                     \
-       unsigned __seq = seqprop_sequence(s);                           \
+       unsigned __seq = seqprop_sequence(s, true);                     \
                                                                        \
        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
        __seq;                                                          \