percpu_ref: replace pcpu_ prefix with percpu_

percpu_ref uses pcpu_ prefix for internal stuff and percpu_ for
externally visible ones.  This is the same convention used in the
percpu allocator implementation.  It works fine there but percpu_ref
doesn't have too much internal-only stuff and scattered usages of
pcpu_ prefix are confusing than helpful.

This patch replaces all pcpu_ prefixes with percpu_.  This is pure
rename and there's no functional change.  Note that PCPU_REF_DEAD is
renamed to __PERCPU_REF_DEAD to signify that the flag is internal.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
Tejun Heo
2014-09-24 13:31:48 -04:00
parent 6251f9976a
commit eecc16ba9a
2 changed files with 52 additions and 50 deletions

View File

@@ -11,8 +11,8 @@
* percpu counters will all sum to the correct value
*
* (More precisely: because moduler arithmatic is commutative the sum of all the
* pcpu_count vars will be equal to what it would have been if all the gets and
* puts were done to a single integer, even if some of the percpu integers
* percpu_count vars will be equal to what it would have been if all the gets
* and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -29,11 +29,12 @@
* atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
#define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref)
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
return (unsigned long __percpu *)
(ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
}
/**
@@ -51,10 +52,11 @@ static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref)
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
gfp_t gfp)
{
atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp);
if (!ref->pcpu_count_ptr)
ref->percpu_count_ptr =
(unsigned long)alloc_percpu_gfp(unsigned long, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;
ref->release = release;
@@ -74,11 +76,11 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
if (pcpu_count) {
free_percpu(pcpu_count);
ref->pcpu_count_ptr = PCPU_REF_DEAD;
if (percpu_count) {
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
@@ -86,14 +88,14 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);
count += *per_cpu_ptr(percpu_count, cpu);
pr_debug("global %ld pcpu %ld",
pr_debug("global %ld percpu %ld",
atomic_long_read(&ref->count), (long)count);
/*
@@ -108,7 +110,7 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count);
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
WARN_ONCE(atomic_long_read(&ref->count) <= 0,
"percpu ref (%pf) <= 0 (%ld) after killed",
@@ -143,10 +145,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
"%s called more than once on %pf!", __func__, ref->release);
ref->pcpu_count_ptr |= PCPU_REF_DEAD;
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
ref->confirm_kill = confirm_kill;
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
@@ -166,24 +168,24 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu;
BUG_ON(!pcpu_count);
BUG_ON(!percpu_count);
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
/*
* Restore per-cpu operation. smp_store_release() is paired with
* smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
* that the zeroing is visible to all percpu accesses which can see
* the following PCPU_REF_DEAD clearing.
* smp_read_barrier_depends() in __percpu_ref_alive() and
* guarantees that the zeroing is visible to all percpu accesses
* which can see the following __PERCPU_REF_DEAD clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(pcpu_count, cpu) = 0;
*per_cpu_ptr(percpu_count, cpu) = 0;
smp_store_release(&ref->pcpu_count_ptr,
ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
smp_store_release(&ref->percpu_count_ptr,
ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);