mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "A few stragglers (and one due to me missing it originally), and fixes for changes in this merge window mostly. In particular: - blktrace cleanups (Chaitanya, Greg) - Kill dead blk_pm_* functions (Bart) - Fixes for the bio alloc changes (Christoph) - Fix for the partition changes (Christoph, Ming) - Fix for turning off iopoll with polled IO inflight (Jeffle) - nbd disconnect fix (Josef) - loop fsync error fix (Mauricio) - kyber update depth fix (Yang) - max_sectors alignment fix (Mikulas) - Add bio_max_segs helper (Matthew)" * tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block: (21 commits) block: Add bio_max_segs blktrace: fix documentation for blk_fill_rw() block: memory allocations in bounce_clone_bio must not fail block: remove the gfp_mask argument to bounce_clone_bio block: fix bounce_clone_bio for passthrough bios block-crypto-fallback: use a bio_set for splitting bios block: fix logging on capacity change blk-settings: align max_sectors on "logical_block_size" boundary block: reopen the device in blkdev_reread_part block: don't skip empty device in in disk_uevent blktrace: remove debugfs file dentries from struct blk_trace nbd: handle device refs for DESTROY_ON_DISCONNECT properly kyber: introduce kyber_depth_updated() loop: fix I/O error on fsync() in detached loop devices block: fix potential IO hang when turning off io_poll block: get rid of the trace rq insert wrapper blktrace: fix blk_rq_merge documentation blktrace: fix blk_rq_issue documentation blktrace: add blk_fill_rwbs documentation comment block: remove superfluous param in blk_fill_rwbs() ...
This commit is contained in:
@@ -125,6 +125,8 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/backing-dev.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-tag.h"
|
||||
@@ -5621,7 +5623,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
bfqq = bfq_init_rq(rq);
|
||||
|
@@ -59,6 +59,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
||||
|
@@ -80,6 +80,7 @@ static struct blk_crypto_keyslot {
|
||||
static struct blk_keyslot_manager blk_crypto_ksm;
|
||||
static struct workqueue_struct *blk_crypto_wq;
|
||||
static mempool_t *blk_crypto_bounce_page_pool;
|
||||
static struct bio_set crypto_bio_split;
|
||||
|
||||
/*
|
||||
* This is the key we set when evicting a keyslot. This *should* be the all 0's
|
||||
@@ -224,7 +225,8 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
struct bio *split_bio;
|
||||
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO,
|
||||
&crypto_bio_split);
|
||||
if (!split_bio) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
return false;
|
||||
@@ -538,9 +540,13 @@ static int blk_crypto_fallback_init(void)
|
||||
|
||||
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
|
||||
err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
|
||||
err = bioset_init(&crypto_bio_split, 64, 0, 0);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
|
||||
if (err)
|
||||
goto fail_free_bioset;
|
||||
err = -ENOMEM;
|
||||
|
||||
blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
|
||||
@@ -591,6 +597,8 @@ fail_free_wq:
|
||||
destroy_workqueue(blk_crypto_wq);
|
||||
fail_free_ksm:
|
||||
blk_ksm_destroy(&blk_crypto_ksm);
|
||||
fail_free_bioset:
|
||||
bioset_exit(&crypto_bio_split);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@@ -150,9 +150,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||
bmd->is_our_pages = !map_data;
|
||||
bmd->is_null_mapped = (map_data && map_data->null_mapped);
|
||||
|
||||
nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
||||
if (nr_pages > BIO_MAX_PAGES)
|
||||
nr_pages = BIO_MAX_PAGES;
|
||||
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
|
||||
|
||||
ret = -ENOMEM;
|
||||
bio = bio_kmalloc(gfp_mask, nr_pages);
|
||||
|
@@ -384,12 +384,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq)
|
||||
{
|
||||
trace_block_rq_insert(rq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
||||
|
||||
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
||||
bool has_sched,
|
||||
struct request *rq)
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
void blk_mq_sched_assign_ioc(struct request *rq);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq);
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs, struct request **merged_request);
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
|
@@ -21,31 +21,6 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
pm_runtime_mark_last_busy(rq->q->dev);
|
||||
}
|
||||
|
||||
static inline void blk_pm_requeue_request(struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->q->queue_lock);
|
||||
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
rq->q->nr_pending--;
|
||||
}
|
||||
|
||||
static inline void blk_pm_add_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&q->queue_lock);
|
||||
|
||||
if (q->dev && !(rq->rq_flags & RQF_PM))
|
||||
q->nr_pending++;
|
||||
}
|
||||
|
||||
static inline void blk_pm_put_request(struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->q->queue_lock);
|
||||
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
--rq->q->nr_pending;
|
||||
}
|
||||
#else
|
||||
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
|
||||
{
|
||||
@@ -55,19 +30,6 @@ static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
|
||||
static inline void blk_pm_mark_last_busy(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_requeue_request(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_add_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_put_request(struct request *rq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLOCK_BLK_PM_H_ */
|
||||
|
@@ -504,6 +504,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_io_opt);
|
||||
|
||||
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
|
||||
{
|
||||
sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
|
||||
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
|
||||
sectors = PAGE_SIZE >> SECTOR_SHIFT;
|
||||
return sectors;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_stack_limits - adjust queue_limits for stacked devices
|
||||
* @t: the stacking driver limits (top device)
|
||||
@@ -630,6 +638,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
|
||||
t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
|
||||
t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
|
||||
|
||||
/* Discard alignment and granularity */
|
||||
if (b->discard_granularity) {
|
||||
alignment = queue_limit_discard_alignment(b, start);
|
||||
|
@@ -434,10 +434,13 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (poll_on)
|
||||
if (poll_on) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
|
||||
else
|
||||
} else {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -214,8 +214,7 @@ static void bounce_end_io_read_isa(struct bio *bio)
|
||||
__bounce_end_io_read(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
struct bio_set *bs)
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
@@ -242,10 +241,12 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
* asking for trouble and would force extra work on
|
||||
* __bio_clone_fast() anyways.
|
||||
*/
|
||||
|
||||
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
if (bio_is_passthrough(bio_src))
|
||||
bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
|
||||
bio_segments(bio_src));
|
||||
else
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
|
||||
&bounce_bio_set);
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
@@ -269,11 +270,11 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
break;
|
||||
}
|
||||
|
||||
if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0)
|
||||
if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
|
||||
goto err_put;
|
||||
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, gfp_mask) < 0)
|
||||
bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
|
||||
goto err_put;
|
||||
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
@@ -296,7 +297,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
unsigned i = 0;
|
||||
bool bounce = false;
|
||||
int sectors = 0;
|
||||
bool passthrough = bio_is_passthrough(*bio_orig);
|
||||
|
||||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_PAGES)
|
||||
@@ -307,14 +307,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
if (!bounce)
|
||||
return;
|
||||
|
||||
if (!passthrough && sectors < bio_sectors(*bio_orig)) {
|
||||
if (!bio_is_passthrough(*bio_orig) &&
|
||||
sectors < bio_sectors(*bio_orig)) {
|
||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
||||
bio_chain(bio, *bio_orig);
|
||||
submit_bio_noacct(*bio_orig);
|
||||
*bio_orig = bio;
|
||||
}
|
||||
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
|
||||
&bounce_bio_set);
|
||||
bio = bounce_clone_bio(*bio_orig);
|
||||
|
||||
/*
|
||||
* Bvec table can't be updated by bio_for_each_segment_all(),
|
||||
|
@@ -74,7 +74,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
|
||||
return false;
|
||||
|
||||
pr_info("%s: detected capacity change from %lld to %lld\n",
|
||||
disk->disk_name, size, capacity);
|
||||
disk->disk_name, capacity, size);
|
||||
|
||||
/*
|
||||
* Historically we did not send a uevent for changes to/from an empty
|
||||
@@ -476,7 +476,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
kobject_uevent(bdev_kobj(part), action);
|
||||
disk_part_iter_exit(&piter);
|
||||
|
@@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int blkdev_reread_part(struct block_device *bdev)
|
||||
static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
struct block_device *tmp;
|
||||
|
||||
if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
ret = bdev_disk_changed(bdev, false);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
/*
|
||||
* Reopen the device to revalidate the driver state and force a
|
||||
* partition rescan.
|
||||
*/
|
||||
mode &= ~FMODE_EXCL;
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
|
||||
return ret;
|
||||
tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
|
||||
if (IS_ERR(tmp))
|
||||
return PTR_ERR(tmp);
|
||||
blkdev_put(tmp, mode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
|
||||
@@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKRRPART:
|
||||
return blkdev_reread_part(bdev);
|
||||
return blkdev_reread_part(bdev, mode);
|
||||
case BLKTRACESTART:
|
||||
case BLKTRACESTOP:
|
||||
case BLKTRACETEARDOWN:
|
||||
|
@@ -13,6 +13,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
@@ -353,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int kyber_sched_tags_shift(struct request_queue *q)
|
||||
{
|
||||
/*
|
||||
* All of the hardware queues have the same depth, so we can just grab
|
||||
* the shift of the first one.
|
||||
*/
|
||||
return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
|
||||
}
|
||||
|
||||
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
|
||||
{
|
||||
struct kyber_queue_data *kqd;
|
||||
unsigned int shift;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
|
||||
@@ -400,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
|
||||
kqd->latency_targets[i] = kyber_latency_targets[i];
|
||||
}
|
||||
|
||||
shift = kyber_sched_tags_shift(q);
|
||||
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
|
||||
|
||||
return kqd;
|
||||
|
||||
err_buckets:
|
||||
@@ -458,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
|
||||
INIT_LIST_HEAD(&kcq->rq_list[i]);
|
||||
}
|
||||
|
||||
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags->sb.shift;
|
||||
|
||||
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
|
||||
}
|
||||
|
||||
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
{
|
||||
struct kyber_hctx_data *khd;
|
||||
int i;
|
||||
|
||||
@@ -502,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
khd->batching = 0;
|
||||
|
||||
hctx->sched_data = khd;
|
||||
sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
|
||||
kqd->async_depth);
|
||||
kyber_depth_updated(hctx);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
list_move_tail(&rq->queuelist, head);
|
||||
sbitmap_set_bit(&khd->kcq_map[sched_domain],
|
||||
rq->mq_ctx->index_hw[hctx->type]);
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
spin_unlock(&kcq->lock);
|
||||
}
|
||||
}
|
||||
@@ -1022,6 +1020,7 @@ static struct elevator_type kyber_sched = {
|
||||
.completed_request = kyber_completed_request,
|
||||
.dispatch_request = kyber_dispatch_request,
|
||||
.has_work = kyber_has_work,
|
||||
.depth_updated = kyber_depth_updated,
|
||||
},
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
|
||||
|
@@ -18,6 +18,8 @@
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
@@ -496,7 +498,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
if (blk_mq_sched_try_insert_merge(q, rq))
|
||||
return;
|
||||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (at_head)
|
||||
|
Reference in New Issue
Block a user