Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Introduce cmpxchg128() -- aka. the demise of cmpxchg_double()

   The cmpxchg128() family of functions is basically & functionally the
   same as cmpxchg_double(), but with a saner interface.

   Instead of a 6-parameter horror that forced u128 - u64/u64-halves
   layout details on the interface and exposed users to complexity,
   fragility & bugs, use a natural 3-parameter interface with u128
   types.

 - Restructure the generated atomic headers, and add kerneldoc comments
   for all of the generic atomic{,64,_long}_t operations.

   The generated definitions are much cleaner now, and come with
   documentation.

 - Implement lock_set_cmp_fn() on lockdep, for defining an ordering when
   taking multiple locks of the same type.

   This gets rid of one use of lockdep_set_novalidate_class() in the
   bcache code.

 - Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable
   shadowing generating garbage code on Clang on certain ARM builds.

* tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
  locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc
  percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg()
  locking/atomic: treewide: delete arch_atomic_*() kerneldoc
  locking/atomic: docs: Add atomic operations to the driver basic API documentation
  locking/atomic: scripts: generate kerneldoc comments
  docs: scripts: kernel-doc: accept bitwise negation like ~@var
  locking/atomic: scripts: simplify raw_atomic*() definitions
  locking/atomic: scripts: simplify raw_atomic_long*() definitions
  locking/atomic: scripts: split pfx/name/sfx/order
  locking/atomic: scripts: restructure fallback ifdeffery
  locking/atomic: scripts: build raw_atomic_long*() directly
  locking/atomic: treewide: use raw_atomic*_<op>()
  locking/atomic: scripts: add trivial raw_atomic*_<op>()
  locking/atomic: scripts: factor out order template generation
  locking/atomic: scripts: remove leftover "${mult}"
  locking/atomic: scripts: remove bogus order parameter
  locking/atomic: xtensa: add preprocessor symbols
  locking/atomic: x86: add preprocessor symbols
  locking/atomic: sparc: add preprocessor symbols
  locking/atomic: sh: add preprocessor symbols
  ...
This commit is contained in:
Linus Torvalds
2023-06-27 14:14:30 -07:00
136 changed files with 10107 additions and 4346 deletions

View File

@@ -36,9 +36,16 @@ meta_has_relaxed()
meta_in "$1" "BFIR"
}
#find_fallback_template(pfx, name, sfx, order)
find_fallback_template()
#meta_is_implicitly_relaxed(meta)
meta_is_implicitly_relaxed()
{
meta_in "$1" "vls"
}
#find_template(tmpltype, pfx, name, sfx, order)
find_template()
{
local tmpltype="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
@@ -52,8 +59,8 @@ find_fallback_template()
#
# Start at the most specific, and fall back to the most general. Once
# we find a specific fallback, don't bother looking for more.
for base in "${pfx}${name}${sfx}${order}" "${name}"; do
file="${ATOMICDIR}/fallbacks/${base}"
for base in "${pfx}${name}${sfx}${order}" "${pfx}${name}${sfx}" "${name}"; do
file="${ATOMICDIR}/${tmpltype}/${base}"
if [ -f "${file}" ]; then
printf "${file}"
@@ -62,6 +69,18 @@ find_fallback_template()
done
}
#find_fallback_template(pfx, name, sfx, order)
find_fallback_template()
{
find_template "fallbacks" "$@"
}
#find_kerneldoc_template(pfx, name, sfx, order)
find_kerneldoc_template()
{
find_template "kerneldoc" "$@"
}
#gen_ret_type(meta, int)
gen_ret_type() {
local meta="$1"; shift
@@ -142,6 +161,91 @@ gen_args()
done
}
#gen_desc_return(meta)
gen_desc_return()
{
local meta="$1"; shift
case "${meta}" in
[v])
printf "Return: Nothing."
;;
[Ff])
printf "Return: The original value of @v."
;;
[R])
printf "Return: The updated value of @v."
;;
[l])
printf "Return: The value of @v."
;;
esac
}
#gen_template_kerneldoc(template, class, meta, pfx, name, sfx, order, atomic, int, args...)
gen_template_kerneldoc()
{
local template="$1"; shift
local class="$1"; shift
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
local desc_order=""
local desc_instrumentation=""
local desc_return=""
if [ ! -z "${order}" ]; then
desc_order="${order##_}"
elif meta_is_implicitly_relaxed "${meta}"; then
desc_order="relaxed"
else
desc_order="full"
fi
if [ -z "${class}" ]; then
desc_noinstr="Unsafe to use in noinstr code; use raw_${atomicname}() there."
else
desc_noinstr="Safe to use in noinstr code; prefer ${atomicname}() elsewhere."
fi
desc_return="$(gen_desc_return "${meta}")"
. ${template}
}
#gen_kerneldoc(class, meta, pfx, name, sfx, order, atomic, int, args...)
gen_kerneldoc()
{
local class="$1"; shift
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local tmpl="$(find_kerneldoc_template "${pfx}" "${name}" "${sfx}" "${order}")"
if [ -z "${tmpl}" ]; then
printf "/*\n"
printf " * No kerneldoc available for ${class}${atomicname}\n"
printf " */\n"
else
gen_template_kerneldoc "${tmpl}" "${class}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
fi
}
#gen_proto_order_variants(meta, pfx, name, sfx, ...)
gen_proto_order_variants()
{

View File

@@ -27,7 +27,7 @@ and vF i v
andnot vF i v
or vF i v
xor vF i v
xchg I v i
xchg I v i:new
cmpxchg I v i:old i:new
try_cmpxchg B v p:old i:new
sub_and_test b i v

View File

@@ -1,9 +1,5 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}
EOF

View File

@@ -1,15 +1,3 @@
cat <<EOF
/**
* arch_${atomic}_add_negative${order} - Add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
* Atomically adds @i to @v and returns true if the result is negative,
* or false when the result is greater than or equal to zero.
*/
static __always_inline bool
arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_add_return${order}(i, v) < 0;
}
return raw_${atomic}_add_return${order}(i, v) < 0;
EOF

View File

@@ -1,16 +1,3 @@
cat << EOF
/**
* arch_${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
static __always_inline bool
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return arch_${atomic}_fetch_add_unless(v, a, u) != u;
}
return raw_${atomic}_fetch_add_unless(v, a, u) != u;
EOF

View File

@@ -1,7 +1,3 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
}
${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v);
EOF

View File

@@ -0,0 +1,3 @@
cat <<EOF
return raw_cmpxchg${order}(&v->counter, old, new);
EOF

View File

@@ -1,7 +1,3 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
}
${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v);
EOF

View File

@@ -1,15 +1,3 @@
cat <<EOF
/**
* arch_${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static __always_inline bool
arch_${atomic}_dec_and_test(${atomic}_t *v)
{
return arch_${atomic}_dec_return(v) == 0;
}
return raw_${atomic}_dec_return(v) == 0;
EOF

View File

@@ -1,15 +1,11 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = arch_${atomic}_read(v);
${int} dec, c = raw_${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}
EOF

View File

@@ -1,14 +1,10 @@
cat <<EOF
static __always_inline bool
arch_${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}
EOF

View File

@@ -1,11 +1,7 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}
EOF

View File

@@ -1,23 +1,10 @@
cat << EOF
/**
* arch_${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
static __always_inline ${int}
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}
EOF

View File

@@ -1,7 +1,3 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
}
${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v);
EOF

View File

@@ -1,15 +1,3 @@
cat <<EOF
/**
* arch_${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __always_inline bool
arch_${atomic}_inc_and_test(${atomic}_t *v)
{
return arch_${atomic}_inc_return(v) == 0;
}
return raw_${atomic}_inc_return(v) == 0;
EOF

View File

@@ -1,14 +1,3 @@
cat <<EOF
/**
* arch_${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static __always_inline bool
arch_${atomic}_inc_not_zero(${atomic}_t *v)
{
return arch_${atomic}_add_unless(v, 1, 0);
}
return raw_${atomic}_add_unless(v, 1, 0);
EOF

View File

@@ -1,14 +1,10 @@
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}
EOF

View File

@@ -1,16 +1,12 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_read_acquire(const ${atomic}_t *v)
{
${int} ret;
if (__native_word(${atomic}_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_${atomic}_read(v);
ret = raw_${atomic}_read(v);
__atomic_acquire_fence();
}
return ret;
}
EOF

View File

@@ -1,8 +1,4 @@
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF

View File

@@ -1,12 +1,8 @@
cat <<EOF
static __always_inline void
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
{
if (__native_word(${atomic}_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_${atomic}_set(v, i);
raw_${atomic}_set(v, i);
}
}
EOF

View File

@@ -1,16 +1,3 @@
cat <<EOF
/**
* arch_${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static __always_inline bool
arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_sub_return(i, v) == 0;
}
return raw_${atomic}_sub_return(i, v) == 0;
EOF

View File

@@ -1,11 +1,7 @@
cat <<EOF
static __always_inline bool
arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = arch_${atomic}_cmpxchg${order}(v, o, new);
r = raw_${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
}
EOF

View File

@@ -0,0 +1,3 @@
cat <<EOF
return raw_xchg${order}(&v->counter, new);
EOF

View File

@@ -17,19 +17,26 @@ gen_template_fallback()
local atomic="$1"; shift
local int="$1"; shift
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
if [ ! -z "${template}" ]; then
printf "#ifndef ${atomicname}\n"
. ${template}
printf "#define ${atomicname} ${atomicname}\n"
printf "#endif\n\n"
fi
. ${template}
}
#gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
gen_order_fallback()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local tmpl_order=${order#_}
local tmpl="${ATOMICDIR}/fallbacks/${tmpl_order:-fence}"
gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
}
#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
@@ -45,17 +52,7 @@ gen_proto_fallback()
gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
}
#gen_basic_fallbacks(basename)
gen_basic_fallbacks()
{
local basename="$1"; shift
cat << EOF
#define ${basename}_acquire ${basename}
#define ${basename}_release ${basename}
#define ${basename}_relaxed ${basename}
EOF
}
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, args...)
gen_proto_order_variant()
{
local meta="$1"; shift
@@ -63,13 +60,65 @@ gen_proto_order_variant()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomic="$1"
local atomic="$1"; shift
local int="$1"; shift
local basename="arch_${atomic}_${pfx}${name}${sfx}"
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local basename="${atomic}_${pfx}${name}${sfx}"
printf "#define ${basename}${order} ${basename}${order}\n"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
gen_kerneldoc "raw_" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
printf "static __always_inline ${ret}\n"
printf "raw_${atomicname}(${params})\n"
printf "{\n"
# Where there is no possible fallback, this order variant is mandatory
# and must be provided by arch code. Add a comment to the header to
# make this obvious.
#
# Ideally we'd error on a missing definition, but arch code might
# define this order variant as a C function without a preprocessor
# symbol.
if [ -z ${template} ] && [ -z "${order}" ] && ! meta_has_relaxed "${meta}"; then
printf "\t${retstmt}arch_${atomicname}(${args});\n"
printf "}\n\n"
return
fi
printf "#if defined(arch_${atomicname})\n"
printf "\t${retstmt}arch_${atomicname}(${args});\n"
# Allow FULL/ACQUIRE/RELEASE ops to be defined in terms of RELAXED ops
if [ "${order}" != "_relaxed" ] && meta_has_relaxed "${meta}"; then
printf "#elif defined(arch_${basename}_relaxed)\n"
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
fi
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_${basename})\n"
printf "\t${retstmt}arch_${basename}(${args});\n"
fi
printf "#else\n"
if [ ! -z "${template}" ]; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
else
printf "#error \"Unable to define raw_${atomicname}\"\n"
fi
printf "#endif\n"
printf "}\n\n"
}
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
gen_proto_order_variants()
{
@@ -79,49 +128,30 @@ gen_proto_order_variants()
local sfx="$1"; shift
local atomic="$1"
local basename="arch_${atomic}_${pfx}${name}${sfx}"
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
# read_acquire and set_release need to be templated, though
if ! meta_has_relaxed "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
if meta_has_acquire "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
if meta_has_release "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
return
if meta_has_acquire "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
printf "#ifndef ${basename}_relaxed\n"
if [ ! -z "${template}" ]; then
printf "#ifdef ${basename}\n"
if meta_has_release "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
gen_basic_fallbacks "${basename}"
if [ ! -z "${template}" ]; then
printf "#endif /* ${basename} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
if meta_has_relaxed "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
fi
}
printf "#else /* ${basename}_relaxed */\n\n"
gen_template_fallback "${ATOMICDIR}/fallbacks/acquire" "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_template_fallback "${ATOMICDIR}/fallbacks/release" "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
gen_template_fallback "${ATOMICDIR}/fallbacks/fence" "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
printf "#endif /* ${basename}_relaxed */\n\n"
#gen_basic_fallbacks(basename)
gen_basic_fallbacks()
{
local basename="$1"; shift
cat << EOF
#define raw_${basename}_acquire arch_${basename}
#define raw_${basename}_release arch_${basename}
#define raw_${basename}_relaxed arch_${basename}
EOF
}
gen_order_fallbacks()
@@ -130,36 +160,65 @@ gen_order_fallbacks()
cat <<EOF
#ifndef ${xchg}_acquire
#define ${xchg}_acquire(...) \\
__atomic_op_acquire(${xchg}, __VA_ARGS__)
#define raw_${xchg}_relaxed arch_${xchg}_relaxed
#ifdef arch_${xchg}_acquire
#define raw_${xchg}_acquire arch_${xchg}_acquire
#else
#define raw_${xchg}_acquire(...) \\
__atomic_op_acquire(arch_${xchg}, __VA_ARGS__)
#endif
#ifndef ${xchg}_release
#define ${xchg}_release(...) \\
__atomic_op_release(${xchg}, __VA_ARGS__)
#ifdef arch_${xchg}_release
#define raw_${xchg}_release arch_${xchg}_release
#else
#define raw_${xchg}_release(...) \\
__atomic_op_release(arch_${xchg}, __VA_ARGS__)
#endif
#ifndef ${xchg}
#define ${xchg}(...) \\
__atomic_op_fence(${xchg}, __VA_ARGS__)
#ifdef arch_${xchg}
#define raw_${xchg} arch_${xchg}
#else
#define raw_${xchg}(...) \\
__atomic_op_fence(arch_${xchg}, __VA_ARGS__)
#endif
EOF
}
gen_xchg_order_fallback()
{
local xchg="$1"; shift
local order="$1"; shift
local forder="${order:-_fence}"
printf "#if defined(arch_${xchg}${order})\n"
printf "#define raw_${xchg}${order} arch_${xchg}${order}\n"
if [ "${order}" != "_relaxed" ]; then
printf "#elif defined(arch_${xchg}_relaxed)\n"
printf "#define raw_${xchg}${order}(...) \\\\\n"
printf " __atomic_op${forder}(arch_${xchg}, __VA_ARGS__)\n"
fi
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_${xchg})\n"
printf "#define raw_${xchg}${order} arch_${xchg}\n"
fi
printf "#else\n"
printf "extern void raw_${xchg}${order}_not_implemented(void);\n"
printf "#define raw_${xchg}${order}(...) raw_${xchg}${order}_not_implemented()\n"
printf "#endif\n\n"
}
gen_xchg_fallbacks()
{
local xchg="$1"; shift
printf "#ifndef ${xchg}_relaxed\n"
gen_basic_fallbacks ${xchg}
printf "#else /* ${xchg}_relaxed */\n"
gen_order_fallbacks ${xchg}
printf "#endif /* ${xchg}_relaxed */\n\n"
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg_order_fallback "${xchg}" "${order}"
done
}
gen_try_cmpxchg_fallback()
@@ -168,40 +227,61 @@ gen_try_cmpxchg_fallback()
local order="$1"; shift;
cat <<EOF
#ifndef arch_try_${cmpxchg}${order}
#define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
#define raw_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
___r = raw_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\
*___op = ___r; \\
likely(___r == ___o); \\
})
#endif /* arch_try_${cmpxchg}${order} */
EOF
}
gen_try_cmpxchg_order_fallback()
{
local cmpxchg="$1"; shift
local order="$1"; shift
local forder="${order:-_fence}"
printf "#if defined(arch_try_${cmpxchg}${order})\n"
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}${order}\n"
if [ "${order}" != "_relaxed" ]; then
printf "#elif defined(arch_try_${cmpxchg}_relaxed)\n"
printf "#define raw_try_${cmpxchg}${order}(...) \\\\\n"
printf " __atomic_op${forder}(arch_try_${cmpxchg}, __VA_ARGS__)\n"
fi
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_try_${cmpxchg})\n"
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}\n"
fi
printf "#else\n"
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
printf "#endif\n\n"
}
gen_try_cmpxchg_fallbacks()
{
local cmpxchg="$1"; shift;
printf "#ifndef arch_try_${cmpxchg}_relaxed\n"
printf "#ifdef arch_try_${cmpxchg}\n"
gen_basic_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_${cmpxchg} */\n\n"
for order in "" "_acquire" "_release" "_relaxed"; do
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
gen_try_cmpxchg_order_fallback "${cmpxchg}" "${order}"
done
}
printf "#else /* arch_try_${cmpxchg}_relaxed */\n"
gen_cmpxchg_local_fallbacks()
{
local cmpxchg="$1"; shift
gen_order_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n"
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
printf "#ifdef arch_try_${cmpxchg}\n"
printf "#define raw_try_${cmpxchg} arch_try_${cmpxchg}\n"
printf "#else\n"
gen_try_cmpxchg_fallback "${cmpxchg}" ""
printf "#endif\n\n"
}
cat << EOF
@@ -217,16 +297,20 @@ cat << EOF
EOF
for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128"; do
gen_xchg_fallbacks "${xchg}"
done
for cmpxchg in "cmpxchg" "cmpxchg64"; do
for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do
gen_try_cmpxchg_fallbacks "${cmpxchg}"
done
for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do
gen_try_cmpxchg_fallback "${cmpxchg}" ""
for cmpxchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local"; do
gen_cmpxchg_local_fallbacks "${cmpxchg}" ""
done
for cmpxchg in "sync_cmpxchg"; do
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
done
grep '^[a-z]' "$1" | while read name meta args; do

View File

@@ -68,12 +68,14 @@ gen_proto_order_variant()
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
gen_kerneldoc "" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
cat <<EOF
static __always_inline ${ret}
${atomicname}(${params})
{
${checks}
${retstmt}arch_${atomicname}(${args});
${retstmt}raw_${atomicname}(${args});
}
EOF
@@ -84,7 +86,6 @@ gen_xchg()
{
local xchg="$1"; shift
local order="$1"; shift
local mult="$1"; shift
kcsan_barrier=""
if [ "${xchg%_local}" = "${xchg}" ]; then
@@ -104,9 +105,9 @@ cat <<EOF
EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF
instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
instrument_read_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\
arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \\
raw_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
})
EOF
@@ -119,8 +120,8 @@ cat <<EOF
EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF
instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\
raw_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\
})
EOF
@@ -134,15 +135,10 @@ cat << EOF
// DO NOT MODIFY THIS FILE DIRECTLY
/*
* This file provides wrappers with KASAN instrumentation for atomic operations.
* To use this functionality an arch's atomic.h file needs to define all
* atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
* this file at the end. This file provides atomic_read() that forwards to
* arch_atomic_read() for actual atomic operation.
* Note: if an arch atomic operation is implemented by means of other atomic
* operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
* arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
* double instrumentation.
* This file provoides atomic operations with explicit instrumentation (e.g.
* KASAN, KCSAN), which should be used unless it is necessary to avoid
* instrumentation. Where it is necessary to aovid instrumenation, the
* raw_atomic*() operations should be used.
*/
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H
@@ -166,24 +162,18 @@ grep '^[a-z]' "$1" | while read name meta args; do
done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}" ""
gen_xchg "${xchg}" "${order}"
printf "\n"
done
done
for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" ; do
gen_xchg "${xchg}" "" ""
for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do
gen_xchg "${xchg}" ""
printf "\n"
done
gen_xchg "cmpxchg_double" "" "2 * "
printf "\n\n"
gen_xchg "cmpxchg_double_local" "" "2 * "
cat <<EOF
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */

View File

@@ -32,24 +32,34 @@ gen_args_cast()
done
}
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
#gen_proto_order_variant(meta, pfx, name, sfx, order, arg...)
gen_proto_order_variant()
{
local meta="$1"; shift
local name="$1$2$3$4"; shift; shift; shift; shift
local atomic="$1"; shift
local int="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomicname="${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "long")"
local params="$(gen_params "long" "atomic_long" "$@")"
local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")"
local argscast_32="$(gen_args_cast "int" "atomic" "$@")"
local argscast_64="$(gen_args_cast "s64" "atomic64" "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
gen_kerneldoc "raw_" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "atomic_long" "long" "$@"
cat <<EOF
static __always_inline ${ret}
arch_atomic_long_${name}(${params})
raw_atomic_long_${atomicname}(${params})
{
${retstmt}arch_${atomic}_${name}(${argscast});
#ifdef CONFIG_64BIT
${retstmt}raw_atomic64_${atomicname}(${argscast_64});
#else
${retstmt}raw_atomic_${atomicname}(${argscast_32});
#endif
}
EOF
@@ -79,24 +89,12 @@ typedef atomic_t atomic_long_t;
#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
#endif
#ifdef CONFIG_64BIT
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
gen_proto "${meta}" "${name}" ${args}
done
cat <<EOF
#else /* CONFIG_64BIT */
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
done
cat <<EOF
#endif /* CONFIG_64BIT */
#endif /* _LINUX_ATOMIC_LONG_H */
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic add with ${desc_order} ordering
* @i: ${int} value to add
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v + @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic add and test if negative with ${desc_order} ordering
* @i: ${int} value to add
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v + @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,18 @@
if [ -z "${pfx}" ]; then
desc_return="Return: @true if @v was updated, @false otherwise."
fi
cat <<EOF
/**
* ${class}${atomicname}() - atomic add unless value with ${desc_order} ordering
* @v: pointer to ${atomic}_t
* @a: ${int} value to add
* @u: ${int} value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic bitwise AND with ${desc_order} ordering
* @i: ${int} value
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v & @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic bitwise AND NOT with ${desc_order} ordering
* @i: ${int} value
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v & ~@i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,14 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic compare and exchange with ${desc_order} ordering
* @v: pointer to ${atomic}_t
* @old: ${int} value to compare with
* @new: ${int} value to assign
*
* If (@v == @old), atomically updates @v to @new with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: The original value of @v.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic decrement with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic decrement and test if zero with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic decrement if positive with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: The old value of (@v - 1), regardless of whether @v was updated.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic decrement unless positive with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if @v was updated, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic increment with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v + 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic increment and test if zero with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v + 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic increment unless zero with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if @v was updated, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic increment unless negative with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if @v was updated, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic bitwise OR with ${desc_order} ordering
* @i: ${int} value
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v | @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,12 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic load with ${desc_order} ordering
* @v: pointer to ${atomic}_t
*
* Atomically loads the value of @v with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: The value loaded from @v.
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic set with ${desc_order} ordering
* @v: pointer to ${atomic}_t
* @i: ${int} value to assign
*
* Atomically sets @v to @i with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: Nothing.
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic subtract with ${desc_order} ordering
* @i: ${int} value to subtract
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
* @i: ${int} value to add
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,15 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic compare and exchange with ${desc_order} ordering
* @v: pointer to ${atomic}_t
* @old: pointer to ${int} value to compare with
* @new: ${int} value to assign
*
* If (@v == @old), atomically updates @v to @new with ${desc_order} ordering.
* Otherwise, updates @old to the current value of @v.
*
* ${desc_noinstr}
*
* Return: @true if the exchange occured, @false otherwise.
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic exchange with ${desc_order} ordering
* @v: pointer to ${atomic}_t
* @new: ${int} value to assign
*
* Atomically updates @v to @new with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* Return: The original value of @v.
*/
EOF

View File

@@ -0,0 +1,13 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic bitwise XOR with ${desc_order} ordering
* @i: ${int} value
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v ^ @i) with ${desc_order} ordering.
*
* ${desc_noinstr}
*
* ${desc_return}
*/
EOF

View File

@@ -64,7 +64,7 @@ my $type_constant = '\b``([^\`]+)``\b';
my $type_constant2 = '\%([-_\w]+)';
my $type_func = '(\w+)\(\)';
my $type_param = '\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
my $type_param_ref = '([\!]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
my $type_param_ref = '([\!~]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params
my $type_fp_param2 = '\@(\w+->\S+)\(\)'; # Special RST handling for structs with func ptr params
my $type_env = '(\$\w+)';

View File

@@ -17,7 +17,11 @@ binutils)
echo 2.25.0
;;
gcc)
echo 5.1.0
if [ "$SRCARCH" = parisc ]; then
echo 11.0.0
else
echo 5.1.0
fi
;;
llvm)
if [ "$SRCARCH" = s390 ]; then