mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
workqueue: Add pwq->stats[] and a monitoring script
Currently, the only way to peer into workqueue operations is through tracing. While possible, it isn't easy or convenient to monitor per-workqueue behaviors over time this way. Let's add pwq->stats[] that track relevant events and a drgn monitoring script - tools/workqueue/wq_monitor.py. It's arguable whether this needs to be configurable. However, it currently only has several counters and the runtime overhead shouldn't be noticeable given that they're on pwq's which are per-cpu on per-cpu workqueues and per-numa-node on unbound ones. Let's keep it simple for the time being. v2: Patch reordered to earlier with fewer fields. Field will be added back gradually. Help message improved. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Lai Jiangshan <jiangshanlai@gmail.com>
This commit is contained in:
@@ -199,6 +199,20 @@ struct worker_pool {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Per-pool_workqueue statistics. These can be monitored using
|
||||
* tools/workqueue/wq_monitor.py.
|
||||
*/
|
||||
enum pool_workqueue_stats {
|
||||
PWQ_STAT_STARTED, /* work items started execution */
|
||||
PWQ_STAT_COMPLETED, /* work items completed execution */
|
||||
PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
|
||||
PWQ_STAT_MAYDAY, /* maydays to rescuer */
|
||||
PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
|
||||
|
||||
PWQ_NR_STATS,
|
||||
};
|
||||
|
||||
/*
|
||||
* The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
|
||||
* of work_struct->data are used for flags and the remaining high bits
|
||||
@@ -236,6 +250,8 @@ struct pool_workqueue {
|
||||
struct list_head pwqs_node; /* WR: node on wq->pwqs */
|
||||
struct list_head mayday_node; /* MD: node on wq->maydays */
|
||||
|
||||
u64 stats[PWQ_NR_STATS];
|
||||
|
||||
/*
|
||||
* Release of unbound pwq is punted to system_wq. See put_pwq()
|
||||
* and pwq_unbound_release_workfn() for details. pool_workqueue
|
||||
@@ -929,8 +945,10 @@ void wq_worker_sleeping(struct task_struct *task)
|
||||
}
|
||||
|
||||
pool->nr_running--;
|
||||
if (need_more_worker(pool))
|
||||
if (need_more_worker(pool)) {
|
||||
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
|
||||
wake_up_worker(pool);
|
||||
}
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
}
|
||||
|
||||
@@ -2165,6 +2183,7 @@ static void send_mayday(struct work_struct *work)
|
||||
get_pwq(pwq);
|
||||
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
||||
wake_up_process(wq->rescuer->task);
|
||||
pwq->stats[PWQ_STAT_MAYDAY]++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2403,6 +2422,7 @@ __acquires(&pool->lock)
|
||||
* workqueues), so hiding them isn't a problem.
|
||||
*/
|
||||
lockdep_invariant_state(true);
|
||||
pwq->stats[PWQ_STAT_STARTED]++;
|
||||
trace_workqueue_execute_start(work);
|
||||
worker->current_func(work);
|
||||
/*
|
||||
@@ -2410,6 +2430,7 @@ __acquires(&pool->lock)
|
||||
* point will only record its address.
|
||||
*/
|
||||
trace_workqueue_execute_end(work, worker->current_func);
|
||||
pwq->stats[PWQ_STAT_COMPLETED]++;
|
||||
lock_map_release(&lockdep_map);
|
||||
lock_map_release(&pwq->wq->lockdep_map);
|
||||
|
||||
@@ -2653,6 +2674,7 @@ repeat:
|
||||
if (first)
|
||||
pool->watchdog_ts = jiffies;
|
||||
move_linked_works(work, scheduled, &n);
|
||||
pwq->stats[PWQ_STAT_RESCUED]++;
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
|
Reference in New Issue
Block a user