mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
seqlock/latch: Provide raw_read_seqcount_latch_retry()
The read side of seqcount_latch consists of: do { seq = raw_read_seqcount_latch(&latch->seq); ... } while (read_seqcount_latch_retry(&latch->seq, seq)); which is asymmetric in the raw_ department, and sure enough, read_seqcount_latch_retry() includes (explicit) instrumentation where raw_read_seqcount_latch() does not. This inconsistency becomes a problem when trying to use it from noinstr code. As such, fix it by renaming and re-implementing raw_read_seqcount_latch_retry() without the instrumentation. Specifically the instrumentation in question is kcsan_atomic_next(0) in do___read_seqcount_retry(). Loosing this annotation is not a problem because raw_read_seqcount_latch() does not pass through kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.233598176@infradead.org
This commit is contained in:
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
|
|||||||
do {
|
do {
|
||||||
seq = raw_read_seqcount_latch(&root->seq);
|
seq = raw_read_seqcount_latch(&root->seq);
|
||||||
node = __lt_find(key, root, seq & 1, ops->comp);
|
node = __lt_find(key, root, seq & 1, ops->comp);
|
||||||
} while (read_seqcount_latch_retry(&root->seq, seq));
|
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
@@ -671,9 +671,9 @@ typedef struct {
|
|||||||
*
|
*
|
||||||
* Return: sequence counter raw value. Use the lowest bit as an index for
|
* Return: sequence counter raw value. Use the lowest bit as an index for
|
||||||
* picking which data copy to read. The full counter must then be checked
|
* picking which data copy to read. The full counter must then be checked
|
||||||
* with read_seqcount_latch_retry().
|
* with raw_read_seqcount_latch_retry().
|
||||||
*/
|
*/
|
||||||
static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
|
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
|
||||||
@@ -683,16 +683,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
|
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
|
||||||
* @s: Pointer to seqcount_latch_t
|
* @s: Pointer to seqcount_latch_t
|
||||||
* @start: count, from raw_read_seqcount_latch()
|
* @start: count, from raw_read_seqcount_latch()
|
||||||
*
|
*
|
||||||
* Return: true if a read section retry is required, else false
|
* Return: true if a read section retry is required, else false
|
||||||
*/
|
*/
|
||||||
static inline int
|
static __always_inline int
|
||||||
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
||||||
{
|
{
|
||||||
return read_seqcount_retry(&s->seqcount, start);
|
smp_rmb();
|
||||||
|
return unlikely(READ_ONCE(s->seqcount.sequence) != start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -752,7 +753,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
|||||||
* entry = data_query(latch->data[idx], ...);
|
* entry = data_query(latch->data[idx], ...);
|
||||||
*
|
*
|
||||||
* // This includes needed smp_rmb()
|
* // This includes needed smp_rmb()
|
||||||
* } while (read_seqcount_latch_retry(&latch->seq, seq));
|
* } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
|
||||||
*
|
*
|
||||||
* return entry;
|
* return entry;
|
||||||
* }
|
* }
|
||||||
|
@@ -528,7 +528,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
|
|||||||
seq = raw_read_seqcount_latch(&ls->latch);
|
seq = raw_read_seqcount_latch(&ls->latch);
|
||||||
idx = seq & 0x1;
|
idx = seq & 0x1;
|
||||||
val = ls->val[idx];
|
val = ls->val[idx];
|
||||||
} while (read_seqcount_latch_retry(&ls->latch, seq));
|
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@@ -77,7 +77,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
|
|||||||
|
|
||||||
notrace int sched_clock_read_retry(unsigned int seq)
|
notrace int sched_clock_read_retry(unsigned int seq)
|
||||||
{
|
{
|
||||||
return read_seqcount_latch_retry(&cd.seq, seq);
|
return raw_read_seqcount_latch_retry(&cd.seq, seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long long notrace sched_clock(void)
|
unsigned long long notrace sched_clock(void)
|
||||||
|
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
|||||||
tkr = tkf->base + (seq & 0x01);
|
tkr = tkf->base + (seq & 0x01);
|
||||||
now = ktime_to_ns(tkr->base);
|
now = ktime_to_ns(tkr->base);
|
||||||
now += fast_tk_get_delta_ns(tkr);
|
now += fast_tk_get_delta_ns(tkr);
|
||||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
|
||||||
|
|
||||||
return now;
|
return now;
|
||||||
}
|
}
|
||||||
@@ -566,7 +566,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
|
|||||||
basem = ktime_to_ns(tkr->base);
|
basem = ktime_to_ns(tkr->base);
|
||||||
baser = ktime_to_ns(tkr->base_real);
|
baser = ktime_to_ns(tkr->base_real);
|
||||||
delta = fast_tk_get_delta_ns(tkr);
|
delta = fast_tk_get_delta_ns(tkr);
|
||||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
|
||||||
|
|
||||||
if (mono)
|
if (mono)
|
||||||
*mono = basem + delta;
|
*mono = basem + delta;
|
||||||
|
Reference in New Issue
Block a user