mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
epoll: rename global epmutex
As of 4f04cbaf128 ("epoll: use refcount to reduce ep_mutex contention"), this lock is now specific to nesting cases - inserting an epoll fd onto another epoll fd. Rename the lock to be less generic. Link: https://lkml.kernel.org/r/20230411234159.20421-1-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dave@stgolabs.net> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Eric Dumazet <edumazet@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
5a10562bde
commit
d4cb626d6f
@@ -43,7 +43,7 @@
|
|||||||
* LOCKING:
|
* LOCKING:
|
||||||
* There are three level of locking required by epoll :
|
* There are three level of locking required by epoll :
|
||||||
*
|
*
|
||||||
* 1) epmutex (mutex)
|
* 1) epnested_mutex (mutex)
|
||||||
* 2) ep->mtx (mutex)
|
* 2) ep->mtx (mutex)
|
||||||
* 3) ep->lock (rwlock)
|
* 3) ep->lock (rwlock)
|
||||||
*
|
*
|
||||||
@@ -57,8 +57,8 @@
|
|||||||
* we need a lock that will allow us to sleep. This lock is a
|
* we need a lock that will allow us to sleep. This lock is a
|
||||||
* mutex (ep->mtx). It is acquired during the event transfer loop,
|
* mutex (ep->mtx). It is acquired during the event transfer loop,
|
||||||
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
|
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
|
||||||
* The epmutex is acquired when inserting an epoll fd onto another epoll
|
* The epnested_mutex is acquired when inserting an epoll fd onto another
|
||||||
* fd. We do this so that we walk the epoll tree and ensure that this
|
* epoll fd. We do this so that we walk the epoll tree and ensure that this
|
||||||
* insertion does not create a cycle of epoll file descriptors, which
|
* insertion does not create a cycle of epoll file descriptors, which
|
||||||
* could lead to deadlock. We need a global mutex to prevent two
|
* could lead to deadlock. We need a global mutex to prevent two
|
||||||
* simultaneous inserts (A into B and B into A) from racing and
|
* simultaneous inserts (A into B and B into A) from racing and
|
||||||
@@ -74,9 +74,9 @@
|
|||||||
* of epoll file descriptors, we use the current recursion depth as
|
* of epoll file descriptors, we use the current recursion depth as
|
||||||
* the lockdep subkey.
|
* the lockdep subkey.
|
||||||
* It is possible to drop the "ep->mtx" and to use the global
|
* It is possible to drop the "ep->mtx" and to use the global
|
||||||
* mutex "epmutex" (together with "ep->lock") to have it working,
|
* mutex "epnested_mutex" (together with "ep->lock") to have it working,
|
||||||
* but having "ep->mtx" will make the interface more scalable.
|
* but having "ep->mtx" will make the interface more scalable.
|
||||||
* Events that require holding "epmutex" are very rare, while for
|
* Events that require holding "epnested_mutex" are very rare, while for
|
||||||
* normal operations the epoll private "ep->mtx" will guarantee
|
* normal operations the epoll private "ep->mtx" will guarantee
|
||||||
* a better scalability.
|
* a better scalability.
|
||||||
*/
|
*/
|
||||||
@@ -248,7 +248,7 @@ struct ep_pqueue {
|
|||||||
static long max_user_watches __read_mostly;
|
static long max_user_watches __read_mostly;
|
||||||
|
|
||||||
/* Used for cycles detection */
|
/* Used for cycles detection */
|
||||||
static DEFINE_MUTEX(epmutex);
|
static DEFINE_MUTEX(epnested_mutex);
|
||||||
|
|
||||||
static u64 loop_check_gen = 0;
|
static u64 loop_check_gen = 0;
|
||||||
|
|
||||||
@@ -263,7 +263,7 @@ static struct kmem_cache *pwq_cache __read_mostly;
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* List of files with newly added links, where we may need to limit the number
|
* List of files with newly added links, where we may need to limit the number
|
||||||
* of emanating paths. Protected by the epmutex.
|
* of emanating paths. Protected by the epnested_mutex.
|
||||||
*/
|
*/
|
||||||
struct epitems_head {
|
struct epitems_head {
|
||||||
struct hlist_head epitems;
|
struct hlist_head epitems;
|
||||||
@@ -1337,7 +1337,7 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
|
|||||||
* is connected to n file sources. In this case each file source has 1 path
|
* is connected to n file sources. In this case each file source has 1 path
|
||||||
* of length 1. Thus, the numbers below should be more than sufficient. These
|
* of length 1. Thus, the numbers below should be more than sufficient. These
|
||||||
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
|
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
|
||||||
* and delete can't add additional paths. Protected by the epmutex.
|
* and delete can't add additional paths. Protected by the epnested_mutex.
|
||||||
*/
|
*/
|
||||||
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
|
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
|
||||||
static int path_count[PATH_ARR_SIZE];
|
static int path_count[PATH_ARR_SIZE];
|
||||||
@@ -2167,7 +2167,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
|
|||||||
* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
|
* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
|
||||||
* the epoll file descriptor is attaching directly to a wakeup source,
|
* the epoll file descriptor is attaching directly to a wakeup source,
|
||||||
* unless the epoll file descriptor is nested. The purpose of taking the
|
* unless the epoll file descriptor is nested. The purpose of taking the
|
||||||
* 'epmutex' on add is to prevent complex toplogies such as loops and
|
* 'epnested_mutex' on add is to prevent complex toplogies such as loops and
|
||||||
* deep wakeup paths from forming in parallel through multiple
|
* deep wakeup paths from forming in parallel through multiple
|
||||||
* EPOLL_CTL_ADD operations.
|
* EPOLL_CTL_ADD operations.
|
||||||
*/
|
*/
|
||||||
@@ -2178,7 +2178,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
|
|||||||
if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
|
if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
|
||||||
is_file_epoll(tf.file)) {
|
is_file_epoll(tf.file)) {
|
||||||
mutex_unlock(&ep->mtx);
|
mutex_unlock(&ep->mtx);
|
||||||
error = epoll_mutex_lock(&epmutex, 0, nonblock);
|
error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
|
||||||
if (error)
|
if (error)
|
||||||
goto error_tgt_fput;
|
goto error_tgt_fput;
|
||||||
loop_check_gen++;
|
loop_check_gen++;
|
||||||
@@ -2239,7 +2239,7 @@ error_tgt_fput:
|
|||||||
if (full_check) {
|
if (full_check) {
|
||||||
clear_tfile_check_list();
|
clear_tfile_check_list();
|
||||||
loop_check_gen++;
|
loop_check_gen++;
|
||||||
mutex_unlock(&epmutex);
|
mutex_unlock(&epnested_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
fdput(tf);
|
fdput(tf);
|
||||||
|
Reference in New Issue
Block a user