mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge branch 'for-6.5/block-late' into block-6.5
* for-6.5/block-late: blk-sysfs: add a new attr_group for blk_mq blk-iocost: move wbt_enable/disable_default() out of spinlock blk-wbt: cleanup rwb_enabled() and wbt_disabled() blk-wbt: remove dead code to handle wbt enable/disable with io inflight blk-wbt: don't create wbt sysfs entry if CONFIG_BLK_WBT is disabled blk-mq: fix two misuses on RQF_USE_SCHED blk-throttle: Fix io statistics for cgroup v1 bcache: Fix bcache device claiming bcache: Alloc holder object before async registration raid10: avoid spin_lock from fastpath from raid10_unplug() md: fix 'delete_mutex' deadlock md: use mddev->external to select holder in export_rdev() md/raid1-10: fix casting from randomized structure in raid1_submit_write() md/raid10: fix the condition to call bio_end_io_acct()
This commit is contained in:
@@ -2086,6 +2086,9 @@ void blk_cgroup_bio_start(struct bio *bio)
|
||||
struct blkg_iostat_set *bis;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
|
||||
return;
|
||||
|
||||
/* Root-level stats are sourced from system-wide IO stats */
|
||||
if (!cgroup_parent(blkcg->css.cgroup))
|
||||
return;
|
||||
@@ -2116,8 +2119,7 @@ void blk_cgroup_bio_start(struct bio *bio)
|
||||
}
|
||||
|
||||
u64_stats_update_end_irqrestore(&bis->sync, flags);
|
||||
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
|
||||
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
|
||||
cgroup_rstat_updated(blkcg->css.cgroup, cpu);
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
|
@@ -3301,11 +3301,9 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
|
||||
blk_stat_enable_accounting(disk->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
|
||||
ioc->enabled = true;
|
||||
wbt_disable_default(disk);
|
||||
} else {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
|
||||
ioc->enabled = false;
|
||||
wbt_enable_default(disk);
|
||||
}
|
||||
|
||||
if (user) {
|
||||
@@ -3318,6 +3316,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
|
||||
ioc_refresh_params(ioc, true);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
|
||||
if (enable)
|
||||
wbt_disable_default(disk);
|
||||
else
|
||||
wbt_enable_default(disk);
|
||||
|
||||
blk_mq_unquiesce_queue(disk->queue);
|
||||
blk_mq_unfreeze_queue(disk->queue);
|
||||
|
||||
|
@@ -1280,7 +1280,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||
|
||||
if (!plug->multiple_queues && last && last->q != rq->q)
|
||||
plug->multiple_queues = true;
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_USE_SCHED))
|
||||
/*
|
||||
* Any request allocated from sched tags can't be issued to
|
||||
* ->queue_rqs() directly
|
||||
*/
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
|
||||
plug->has_elevator = true;
|
||||
rq->rq_next = NULL;
|
||||
rq_list_add(&plug->mq_list, rq);
|
||||
|
@@ -47,19 +47,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t queue_var_store64(s64 *var, const char *page)
|
||||
{
|
||||
int err;
|
||||
s64 v;
|
||||
|
||||
err = kstrtos64(page, 10, &v);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
*var = v;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t queue_requests_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(q->nr_requests, page);
|
||||
@@ -451,61 +438,6 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
|
||||
{
|
||||
if (!wbt_rq_qos(q))
|
||||
return -EINVAL;
|
||||
|
||||
if (wbt_disabled(q))
|
||||
return sprintf(page, "0\n");
|
||||
|
||||
return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
|
||||
}
|
||||
|
||||
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct rq_qos *rqos;
|
||||
ssize_t ret;
|
||||
s64 val;
|
||||
|
||||
ret = queue_var_store64(&val, page);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (val < -1)
|
||||
return -EINVAL;
|
||||
|
||||
rqos = wbt_rq_qos(q);
|
||||
if (!rqos) {
|
||||
ret = wbt_init(q->disk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val == -1)
|
||||
val = wbt_default_latency_nsec(q);
|
||||
else if (val >= 0)
|
||||
val *= 1000ULL;
|
||||
|
||||
if (wbt_get_min_lat(q) == val)
|
||||
return count;
|
||||
|
||||
/*
|
||||
* Ensure that the queue is idled, in case the latency update
|
||||
* ends up either enabling or disabling wbt completely. We can't
|
||||
* have IO inflight if that happens.
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
wbt_set_min_lat(q, val);
|
||||
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t queue_wc_show(struct request_queue *q, char *page)
|
||||
{
|
||||
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
@@ -598,7 +530,6 @@ QUEUE_RW_ENTRY(queue_wc, "write_cache");
|
||||
QUEUE_RO_ENTRY(queue_fua, "fua");
|
||||
QUEUE_RO_ENTRY(queue_dax, "dax");
|
||||
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
|
||||
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
|
||||
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
|
||||
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
|
||||
|
||||
@@ -617,8 +548,79 @@ QUEUE_RW_ENTRY(queue_iostats, "iostats");
|
||||
QUEUE_RW_ENTRY(queue_random, "add_random");
|
||||
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
|
||||
|
||||
#ifdef CONFIG_BLK_WBT
|
||||
static ssize_t queue_var_store64(s64 *var, const char *page)
|
||||
{
|
||||
int err;
|
||||
s64 v;
|
||||
|
||||
err = kstrtos64(page, 10, &v);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
*var = v;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
|
||||
{
|
||||
if (!wbt_rq_qos(q))
|
||||
return -EINVAL;
|
||||
|
||||
if (wbt_disabled(q))
|
||||
return sprintf(page, "0\n");
|
||||
|
||||
return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
|
||||
}
|
||||
|
||||
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct rq_qos *rqos;
|
||||
ssize_t ret;
|
||||
s64 val;
|
||||
|
||||
ret = queue_var_store64(&val, page);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (val < -1)
|
||||
return -EINVAL;
|
||||
|
||||
rqos = wbt_rq_qos(q);
|
||||
if (!rqos) {
|
||||
ret = wbt_init(q->disk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val == -1)
|
||||
val = wbt_default_latency_nsec(q);
|
||||
else if (val >= 0)
|
||||
val *= 1000ULL;
|
||||
|
||||
if (wbt_get_min_lat(q) == val)
|
||||
return count;
|
||||
|
||||
/*
|
||||
* Ensure that the queue is idled, in case the latency update
|
||||
* ends up either enabling or disabling wbt completely. We can't
|
||||
* have IO inflight if that happens.
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
wbt_set_min_lat(q, val);
|
||||
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
|
||||
#endif
|
||||
|
||||
static struct attribute *queue_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&queue_ra_entry.attr,
|
||||
&queue_max_hw_sectors_entry.attr,
|
||||
&queue_max_sectors_entry.attr,
|
||||
@@ -626,7 +628,6 @@ static struct attribute *queue_attrs[] = {
|
||||
&queue_max_discard_segments_entry.attr,
|
||||
&queue_max_integrity_segments_entry.attr,
|
||||
&queue_max_segment_size_entry.attr,
|
||||
&elv_iosched_entry.attr,
|
||||
&queue_hw_sector_size_entry.attr,
|
||||
&queue_logical_block_size_entry.attr,
|
||||
&queue_physical_block_size_entry.attr,
|
||||
@@ -647,7 +648,6 @@ static struct attribute *queue_attrs[] = {
|
||||
&queue_max_open_zones_entry.attr,
|
||||
&queue_max_active_zones_entry.attr,
|
||||
&queue_nomerges_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
&queue_iostats_entry.attr,
|
||||
&queue_stable_writes_entry.attr,
|
||||
&queue_random_entry.attr,
|
||||
@@ -655,9 +655,7 @@ static struct attribute *queue_attrs[] = {
|
||||
&queue_wc_entry.attr,
|
||||
&queue_fua_entry.attr,
|
||||
&queue_dax_entry.attr,
|
||||
&queue_wb_lat_entry.attr,
|
||||
&queue_poll_delay_entry.attr,
|
||||
&queue_io_timeout_entry.attr,
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
&blk_throtl_sample_time_entry.attr,
|
||||
#endif
|
||||
@@ -666,16 +664,23 @@ static struct attribute *queue_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *blk_mq_queue_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&elv_iosched_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
&queue_io_timeout_entry.attr,
|
||||
#ifdef CONFIG_BLK_WBT
|
||||
&queue_wb_lat_entry.attr,
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
|
||||
int n)
|
||||
{
|
||||
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
if (attr == &queue_io_timeout_entry.attr &&
|
||||
(!q->mq_ops || !q->mq_ops->timeout))
|
||||
return 0;
|
||||
|
||||
if ((attr == &queue_max_open_zones_entry.attr ||
|
||||
attr == &queue_max_active_zones_entry.attr) &&
|
||||
!blk_queue_is_zoned(q))
|
||||
@@ -684,11 +689,30 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
if (!queue_is_mq(q))
|
||||
return 0;
|
||||
|
||||
if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group queue_attr_group = {
|
||||
.attrs = queue_attrs,
|
||||
.is_visible = queue_attr_visible,
|
||||
};
|
||||
|
||||
static struct attribute_group blk_mq_queue_attr_group = {
|
||||
.attrs = blk_mq_queue_attrs,
|
||||
.is_visible = blk_mq_queue_attr_visible,
|
||||
};
|
||||
|
||||
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
|
||||
|
||||
@@ -733,6 +757,7 @@ static const struct sysfs_ops queue_sysfs_ops = {
|
||||
|
||||
static const struct attribute_group *blk_queue_attr_groups[] = {
|
||||
&queue_attr_group,
|
||||
&blk_mq_queue_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@@ -2178,12 +2178,6 @@ bool __blk_throtl_bio(struct bio *bio)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
|
||||
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
|
||||
bio->bi_iter.bi_size);
|
||||
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
|
||||
}
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
|
||||
throtl_update_latency_buckets(td);
|
||||
|
@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio)
|
||||
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
|
||||
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
|
||||
bio_set_flag(bio, BIO_CGROUP_ACCT);
|
||||
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
|
||||
bio->bi_iter.bi_size);
|
||||
}
|
||||
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
|
||||
}
|
||||
|
||||
/* iops limit is always counted */
|
||||
if (tg->has_rules_iops[rw])
|
||||
return true;
|
||||
|
@@ -146,7 +146,7 @@ enum {
|
||||
static inline bool rwb_enabled(struct rq_wb *rwb)
|
||||
{
|
||||
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
|
||||
rwb->wb_normal != 0;
|
||||
rwb->enable_state != WBT_STATE_OFF_MANUAL;
|
||||
}
|
||||
|
||||
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
|
||||
@@ -200,15 +200,6 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
|
||||
inflight = atomic_dec_return(&rqw->inflight);
|
||||
|
||||
/*
|
||||
* wbt got disabled with IO in flight. Wake up any potential
|
||||
* waiters, we don't have to do more than that.
|
||||
*/
|
||||
if (unlikely(!rwb_enabled(rwb))) {
|
||||
rwb_wake_all(rwb);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For discards, our limit is always the background. For writes, if
|
||||
* the device does write back caching, drop further down before we
|
||||
@@ -503,8 +494,7 @@ bool wbt_disabled(struct request_queue *q)
|
||||
{
|
||||
struct rq_qos *rqos = wbt_rq_qos(q);
|
||||
|
||||
return !rqos || RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT ||
|
||||
RQWB(rqos)->enable_state == WBT_STATE_OFF_MANUAL;
|
||||
return !rqos || !rwb_enabled(RQWB(rqos));
|
||||
}
|
||||
|
||||
u64 wbt_get_min_lat(struct request_queue *q)
|
||||
@@ -545,13 +535,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
|
||||
{
|
||||
unsigned int limit;
|
||||
|
||||
/*
|
||||
* If we got disabled, just return UINT_MAX. This ensures that
|
||||
* we'll properly inc a new IO, and dec+wakeup at the end.
|
||||
*/
|
||||
if (!rwb_enabled(rwb))
|
||||
return UINT_MAX;
|
||||
|
||||
if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
|
||||
return rwb->wb_background;
|
||||
|
||||
|
@@ -18,10 +18,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
|
||||
|
||||
#else
|
||||
|
||||
static inline int wbt_init(struct gendisk *disk)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void wbt_disable_default(struct gendisk *disk)
|
||||
{
|
||||
}
|
||||
@@ -31,21 +27,6 @@ static inline void wbt_enable_default(struct gendisk *disk)
|
||||
static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
|
||||
{
|
||||
}
|
||||
static inline u64 wbt_get_min_lat(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
|
||||
{
|
||||
}
|
||||
static inline u64 wbt_default_latency_nsec(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool wbt_disabled(struct request_queue *q)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_WBT */
|
||||
|
||||
|
Reference in New Issue
Block a user