mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
block: remove unnecessary ioc nested locking
The legacy CFQ IO scheduler could call put_io_context() in its exit_icq() elevator callback. This led to a lockdep warning, which was fixed in commitd8c66c5d59
("block: fix lockdep warning on io_context release put_io_context()") by using a nested subclass for the ioc spinlock. However, with commitf382fb0bce
("block: remove legacy IO schedulers") the CFQ IO scheduler no longer exists. The BFQ IO scheduler also implements the exit_icq() elevator callback but does not call put_io_context(). The nested subclass for the ioc spinlock is no longer needed. Since it existed as an exception and no longer applies, remove the nested subclass usage. Signed-off-by: John Ogness <john.ogness@linutronix.de> Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -96,15 +96,7 @@ static void ioc_release_fn(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct io_context *ioc = container_of(work, struct io_context,
|
struct io_context *ioc = container_of(work, struct io_context,
|
||||||
release_work);
|
release_work);
|
||||||
unsigned long flags;
|
spin_lock_irq(&ioc->lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* Exiting icq may call into put_io_context() through elevator
|
|
||||||
* which will trigger lockdep warning. The ioc's are guaranteed to
|
|
||||||
* be different, use a different locking subclass here. Use
|
|
||||||
* irqsave variant as there's no spin_lock_irq_nested().
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
||||||
|
|
||||||
while (!hlist_empty(&ioc->icq_list)) {
|
while (!hlist_empty(&ioc->icq_list)) {
|
||||||
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
||||||
@@ -115,13 +107,13 @@ static void ioc_release_fn(struct work_struct *work)
|
|||||||
ioc_destroy_icq(icq);
|
ioc_destroy_icq(icq);
|
||||||
spin_unlock(&q->queue_lock);
|
spin_unlock(&q->queue_lock);
|
||||||
} else {
|
} else {
|
||||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
spin_unlock_irq(&ioc->lock);
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
spin_lock_irq(&ioc->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
spin_unlock_irq(&ioc->lock);
|
||||||
|
|
||||||
kmem_cache_free(iocontext_cachep, ioc);
|
kmem_cache_free(iocontext_cachep, ioc);
|
||||||
}
|
}
|
||||||
@@ -170,7 +162,6 @@ void put_io_context(struct io_context *ioc)
|
|||||||
*/
|
*/
|
||||||
void put_io_context_active(struct io_context *ioc)
|
void put_io_context_active(struct io_context *ioc)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct io_cq *icq;
|
struct io_cq *icq;
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&ioc->active_ref)) {
|
if (!atomic_dec_and_test(&ioc->active_ref)) {
|
||||||
@@ -178,19 +169,14 @@ void put_io_context_active(struct io_context *ioc)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
spin_lock_irq(&ioc->lock);
|
||||||
* Need ioc lock to walk icq_list and q lock to exit icq. Perform
|
|
||||||
* reverse double locking. Read comment in ioc_release_fn() for
|
|
||||||
* explanation on the nested locking annotation.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
||||||
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
|
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
|
||||||
if (icq->flags & ICQ_EXITED)
|
if (icq->flags & ICQ_EXITED)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ioc_exit_icq(icq);
|
ioc_exit_icq(icq);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
spin_unlock_irq(&ioc->lock);
|
||||||
|
|
||||||
put_io_context(ioc);
|
put_io_context(ioc);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user