mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
time/sched_clock: Provide sched_clock_noinstr()
With the intent to provide local_clock_noinstr(), a variant of local_clock() that's safe to be called from noinstr code (with the assumption that any such code will already be non-preemptible), prepare for things by providing a noinstr sched_clock_noinstr() function. Specifically, preempt_enable_*() calls out to schedule(), which upsets noinstr validation efforts. As such, pull out the preempt_{dis,en}able_notrace() requirements from the sched_clock_read() implementations by explicitly providing it in the sched_clock() function. This further requires said sched_clock_read() functions to be noinstr themselves, for ARCH_WANTS_NO_INSTR users. See the next few patches. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.302350330@infradead.org
This commit is contained in:
@@ -64,7 +64,7 @@ static struct clock_data cd ____cacheline_aligned = {
|
|||||||
.actual_read_sched_clock = jiffy_sched_clock_read,
|
.actual_read_sched_clock = jiffy_sched_clock_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||||
{
|
{
|
||||||
return (cyc * mult) >> shift;
|
return (cyc * mult) >> shift;
|
||||||
}
|
}
|
||||||
@@ -80,23 +80,33 @@ notrace int sched_clock_read_retry(unsigned int seq)
|
|||||||
return raw_read_seqcount_latch_retry(&cd.seq, seq);
|
return raw_read_seqcount_latch_retry(&cd.seq, seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long long notrace sched_clock(void)
|
unsigned long long noinstr sched_clock_noinstr(void)
|
||||||
{
|
{
|
||||||
u64 cyc, res;
|
|
||||||
unsigned int seq;
|
|
||||||
struct clock_read_data *rd;
|
struct clock_read_data *rd;
|
||||||
|
unsigned int seq;
|
||||||
|
u64 cyc, res;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
rd = sched_clock_read_begin(&seq);
|
seq = raw_read_seqcount_latch(&cd.seq);
|
||||||
|
rd = cd.read_data + (seq & 1);
|
||||||
|
|
||||||
cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
|
cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
|
||||||
rd->sched_clock_mask;
|
rd->sched_clock_mask;
|
||||||
res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
|
res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
|
||||||
} while (sched_clock_read_retry(seq));
|
} while (raw_read_seqcount_latch_retry(&cd.seq, seq));
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long long notrace sched_clock(void)
|
||||||
|
{
|
||||||
|
unsigned long long ns;
|
||||||
|
preempt_disable_notrace();
|
||||||
|
ns = sched_clock_noinstr();
|
||||||
|
preempt_enable_notrace();
|
||||||
|
return ns;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Updating the data required to read the clock.
|
* Updating the data required to read the clock.
|
||||||
*
|
*
|
||||||
|
Reference in New Issue
Block a user