mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
block: revert 4f1e9630af
("blk-throtl: optimize IOPS throttle for large IO scenarios")
Revert commit 4f1e9630af
("blk-throtl: optimize IOPS throttle for large
IO scenarios") since we have another easier way to address this issue and
get better iops throttling result.
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220216044514.2903784-9-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -640,8 +640,6 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
|
|||||||
tg->bytes_disp[rw] = 0;
|
tg->bytes_disp[rw] = 0;
|
||||||
tg->io_disp[rw] = 0;
|
tg->io_disp[rw] = 0;
|
||||||
|
|
||||||
atomic_set(&tg->io_split_cnt[rw], 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Previous slice has expired. We must have trimmed it after last
|
* Previous slice has expired. We must have trimmed it after last
|
||||||
* bio dispatch. That means since start of last slice, we never used
|
* bio dispatch. That means since start of last slice, we never used
|
||||||
@@ -665,8 +663,6 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
|
|||||||
tg->slice_start[rw] = jiffies;
|
tg->slice_start[rw] = jiffies;
|
||||||
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
|
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
|
||||||
|
|
||||||
atomic_set(&tg->io_split_cnt[rw], 0);
|
|
||||||
|
|
||||||
throtl_log(&tg->service_queue,
|
throtl_log(&tg->service_queue,
|
||||||
"[%c] new slice start=%lu end=%lu jiffies=%lu",
|
"[%c] new slice start=%lu end=%lu jiffies=%lu",
|
||||||
rw == READ ? 'R' : 'W', tg->slice_start[rw],
|
rw == READ ? 'R' : 'W', tg->slice_start[rw],
|
||||||
@@ -900,9 +896,6 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
|
|||||||
jiffies + tg->td->throtl_slice);
|
jiffies + tg->td->throtl_slice);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iops_limit != UINT_MAX)
|
|
||||||
tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
|
|
||||||
|
|
||||||
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
|
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
|
||||||
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
|
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
|
||||||
if (wait)
|
if (wait)
|
||||||
@@ -1927,14 +1920,12 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (tg->iops[READ][LIMIT_LOW]) {
|
if (tg->iops[READ][LIMIT_LOW]) {
|
||||||
tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
|
|
||||||
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
|
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
|
||||||
if (iops >= tg->iops[READ][LIMIT_LOW])
|
if (iops >= tg->iops[READ][LIMIT_LOW])
|
||||||
tg->last_low_overflow_time[READ] = now;
|
tg->last_low_overflow_time[READ] = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tg->iops[WRITE][LIMIT_LOW]) {
|
if (tg->iops[WRITE][LIMIT_LOW]) {
|
||||||
tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
|
|
||||||
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
|
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
|
||||||
if (iops >= tg->iops[WRITE][LIMIT_LOW])
|
if (iops >= tg->iops[WRITE][LIMIT_LOW])
|
||||||
tg->last_low_overflow_time[WRITE] = now;
|
tg->last_low_overflow_time[WRITE] = now;
|
||||||
@@ -2053,25 +2044,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void blk_throtl_charge_bio_split(struct bio *bio)
|
|
||||||
{
|
|
||||||
struct blkcg_gq *blkg = bio->bi_blkg;
|
|
||||||
struct throtl_grp *parent = blkg_to_tg(blkg);
|
|
||||||
struct throtl_service_queue *parent_sq;
|
|
||||||
bool rw = bio_data_dir(bio);
|
|
||||||
|
|
||||||
do {
|
|
||||||
if (!parent->has_rules[rw])
|
|
||||||
break;
|
|
||||||
|
|
||||||
atomic_inc(&parent->io_split_cnt[rw]);
|
|
||||||
atomic_inc(&parent->last_io_split_cnt[rw]);
|
|
||||||
|
|
||||||
parent_sq = parent->service_queue.parent_sq;
|
|
||||||
parent = sq_to_tg(parent_sq);
|
|
||||||
} while (parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool __blk_throtl_bio(struct bio *bio)
|
bool __blk_throtl_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
|
@@ -138,9 +138,6 @@ struct throtl_grp {
|
|||||||
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
|
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
|
||||||
unsigned long bio_cnt_reset_time;
|
unsigned long bio_cnt_reset_time;
|
||||||
|
|
||||||
atomic_t io_split_cnt[2];
|
|
||||||
atomic_t last_io_split_cnt[2];
|
|
||||||
|
|
||||||
struct blkg_rwstat stat_bytes;
|
struct blkg_rwstat stat_bytes;
|
||||||
struct blkg_rwstat stat_ios;
|
struct blkg_rwstat stat_ios;
|
||||||
};
|
};
|
||||||
@@ -164,13 +161,11 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
|
|||||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||||
static inline void blk_throtl_exit(struct request_queue *q) { }
|
static inline void blk_throtl_exit(struct request_queue *q) { }
|
||||||
static inline void blk_throtl_register_queue(struct request_queue *q) { }
|
static inline void blk_throtl_register_queue(struct request_queue *q) { }
|
||||||
static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
|
|
||||||
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
|
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
|
||||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||||
int blk_throtl_init(struct request_queue *q);
|
int blk_throtl_init(struct request_queue *q);
|
||||||
void blk_throtl_exit(struct request_queue *q);
|
void blk_throtl_exit(struct request_queue *q);
|
||||||
void blk_throtl_register_queue(struct request_queue *q);
|
void blk_throtl_register_queue(struct request_queue *q);
|
||||||
void blk_throtl_charge_bio_split(struct bio *bio);
|
|
||||||
bool __blk_throtl_bio(struct bio *bio);
|
bool __blk_throtl_bio(struct bio *bio);
|
||||||
static inline bool blk_throtl_bio(struct bio *bio)
|
static inline bool blk_throtl_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user