mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'block-5.10-2020-10-12' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Series of merge handling cleanups (Baolin, Christoph) - Series of blk-throttle fixes and cleanups (Baolin) - Series cleaning up BDI, seperating the block device from the backing_dev_info (Christoph) - Removal of bdget() as a generic API (Christoph) - Removal of blkdev_get() as a generic API (Christoph) - Cleanup of is-partition checks (Christoph) - Series reworking disk revalidation (Christoph) - Series cleaning up bio flags (Christoph) - bio crypt fixes (Eric) - IO stats inflight tweak (Gabriel) - blk-mq tags fixes (Hannes) - Buffer invalidation fixes (Jan) - Allow soft limits for zone append (Johannes) - Shared tag set improvements (John, Kashyap) - Allow IOPRIO_CLASS_RT for CAP_SYS_NICE (Khazhismel) - DM no-wait support (Mike, Konstantin) - Request allocation improvements (Ming) - Allow md/dm/bcache to use IO stat helpers (Song) - Series improving blk-iocost (Tejun) - Various cleanups (Geert, Damien, Danny, Julia, Tetsuo, Tian, Wang, Xianting, Yang, Yufen, yangerkun) * tag 'block-5.10-2020-10-12' of git://git.kernel.dk/linux-block: (191 commits) block: fix uapi blkzoned.h comments blk-mq: move cancel of hctx->run_work to the front of blk_exit_queue blk-mq: get rid of the dead flush handle code path block: get rid of unnecessary local variable block: fix comment and add lockdep assert blk-mq: use helper function to test hw stopped block: use helper function to test queue register block: remove redundant mq check block: invoke blk_mq_exit_sched no matter whether have .exit_sched percpu_ref: don't refer to ref->data if it isn't allocated block: ratelimit handle_bad_sector() message blk-throttle: Re-use the throtl_set_slice_end() blk-throttle: Open code __throtl_de/enqueue_tg() blk-throttle: Move service tree validation out of the throtl_rb_first() blk-throttle: Move the list operation after list validation blk-throttle: Fix IO hang for a corner case blk-throttle: Avoid tracking latency if low limit is invalid blk-throttle: Avoid getting the current time if tg->last_finish_time is 0 blk-throttle: Remove a meaningless parameter for throtl_downgrade_state() block: Remove redundant 'return' statement ...
This commit is contained in:
@@ -24,6 +24,7 @@
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/blkzoned.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
struct module;
|
||||
struct scsi_ioctl_command;
|
||||
@@ -398,6 +399,8 @@ struct request_queue {
|
||||
struct request *last_merge;
|
||||
struct elevator_queue *elevator;
|
||||
|
||||
struct percpu_ref q_usage_counter;
|
||||
|
||||
struct blk_queue_stats *stats;
|
||||
struct rq_qos *rq_qos;
|
||||
|
||||
@@ -460,7 +463,7 @@ struct request_queue {
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
struct device *dev;
|
||||
int rpm_status;
|
||||
enum rpm_status rpm_status;
|
||||
unsigned int nr_pending;
|
||||
#endif
|
||||
|
||||
@@ -486,6 +489,8 @@ struct request_queue {
|
||||
struct timer_list timeout;
|
||||
struct work_struct timeout_work;
|
||||
|
||||
atomic_t nr_active_requests_shared_sbitmap;
|
||||
|
||||
struct list_head icq_list;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||
@@ -568,7 +573,6 @@ struct request_queue {
|
||||
* percpu_ref_kill() and percpu_ref_reinit().
|
||||
*/
|
||||
struct mutex mq_freeze_lock;
|
||||
struct percpu_ref q_usage_counter;
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
struct list_head tag_set_list;
|
||||
@@ -605,6 +609,7 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
|
||||
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
|
||||
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
|
||||
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
|
||||
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
|
||||
#define QUEUE_FLAG_WC 17 /* Write back caching */
|
||||
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
|
||||
@@ -617,9 +622,12 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
|
||||
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
|
||||
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
|
||||
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
|
||||
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
|
||||
|
||||
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP))
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
(1 << QUEUE_FLAG_NOWAIT))
|
||||
|
||||
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
|
||||
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
|
||||
@@ -633,6 +641,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
||||
#define blk_queue_noxmerges(q) \
|
||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
|
||||
#define blk_queue_stable_writes(q) \
|
||||
test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
|
||||
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
|
||||
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
|
||||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
@@ -659,6 +669,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
||||
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
|
||||
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
|
||||
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
|
||||
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
|
||||
|
||||
extern void blk_set_pm_only(struct request_queue *q);
|
||||
extern void blk_clear_pm_only(struct request_queue *q);
|
||||
@@ -1061,11 +1072,17 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
||||
static inline unsigned int blk_max_size_offset(struct request_queue *q,
|
||||
sector_t offset)
|
||||
{
|
||||
if (!q->limits.chunk_sectors)
|
||||
unsigned int chunk_sectors = q->limits.chunk_sectors;
|
||||
|
||||
if (!chunk_sectors)
|
||||
return q->limits.max_sectors;
|
||||
|
||||
return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
|
||||
(offset & (q->limits.chunk_sectors - 1))));
|
||||
if (likely(is_power_of_2(chunk_sectors)))
|
||||
chunk_sectors -= offset & (chunk_sectors - 1);
|
||||
else
|
||||
chunk_sectors -= sector_div(offset, chunk_sectors);
|
||||
|
||||
return min(q->limits.max_sectors, chunk_sectors);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
||||
@@ -1132,6 +1149,7 @@ extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
unsigned int alignment);
|
||||
void blk_queue_update_readahead(struct request_queue *q);
|
||||
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
@@ -1341,6 +1359,11 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
|
||||
|
||||
extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
|
||||
|
||||
static inline bool bdev_is_partition(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_partno;
|
||||
}
|
||||
|
||||
enum blk_default_limits {
|
||||
BLK_MAX_SEGMENTS = 128,
|
||||
BLK_SAFE_MAX_SECTORS = 255,
|
||||
@@ -1386,7 +1409,10 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
|
||||
|
||||
static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
|
||||
{
|
||||
return q->limits.max_zone_append_sectors;
|
||||
|
||||
const struct queue_limits *l = &q->limits;
|
||||
|
||||
return min(l->max_zone_append_sectors, l->max_sectors);
|
||||
}
|
||||
|
||||
static inline unsigned queue_logical_block_size(const struct request_queue *q)
|
||||
@@ -1457,10 +1483,9 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
|
||||
|
||||
if (q->limits.misaligned)
|
||||
return -1;
|
||||
|
||||
if (bdev != bdev->bd_contains)
|
||||
return bdev->bd_part->alignment_offset;
|
||||
|
||||
if (bdev_is_partition(bdev))
|
||||
return queue_limit_alignment_offset(&q->limits,
|
||||
bdev->bd_part->start_sect);
|
||||
return q->limits.alignment_offset;
|
||||
}
|
||||
|
||||
@@ -1499,9 +1524,9 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (bdev != bdev->bd_contains)
|
||||
return bdev->bd_part->discard_alignment;
|
||||
|
||||
if (bdev_is_partition(bdev))
|
||||
return queue_limit_discard_alignment(&q->limits,
|
||||
bdev->bd_part->start_sect);
|
||||
return q->limits.discard_alignment;
|
||||
}
|
||||
|
||||
@@ -1644,10 +1669,6 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
|
||||
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
|
||||
struct scatterlist *);
|
||||
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
||||
extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
|
||||
struct request *);
|
||||
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
|
||||
struct bio *);
|
||||
|
||||
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
|
||||
{
|
||||
@@ -1775,18 +1796,6 @@ static inline unsigned short queue_max_integrity_segments(const struct request_q
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
|
||||
struct request *r1,
|
||||
struct request *r2)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
|
||||
struct request *r,
|
||||
struct bio *b)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
@@ -1932,6 +1941,11 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
|
||||
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
|
||||
unsigned long start_time);
|
||||
|
||||
unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
|
||||
struct bio *bio);
|
||||
void part_end_io_acct(struct hd_struct *part, struct bio *bio,
|
||||
unsigned long start_time);
|
||||
|
||||
/**
|
||||
* bio_start_io_acct - start I/O accounting for bio based drivers
|
||||
* @bio: bio to start account for
|
||||
@@ -1969,7 +1983,6 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
|
||||
#define BLKDEV_MAJOR_MAX 0
|
||||
#endif
|
||||
|
||||
int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
|
||||
struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder);
|
||||
struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
|
||||
@@ -1980,17 +1993,24 @@ void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
|
||||
void blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||
|
||||
struct block_device *I_BDEV(struct inode *inode);
|
||||
struct block_device *bdget(dev_t);
|
||||
struct block_device *bdget_part(struct hd_struct *part);
|
||||
struct block_device *bdgrab(struct block_device *bdev);
|
||||
void bdput(struct block_device *);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
void invalidate_bdev(struct block_device *bdev);
|
||||
int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
|
||||
loff_t lend);
|
||||
int sync_blockdev(struct block_device *bdev);
|
||||
#else
|
||||
static inline void invalidate_bdev(struct block_device *bdev)
|
||||
{
|
||||
}
|
||||
static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
|
||||
loff_t lstart, loff_t lend)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int sync_blockdev(struct block_device *bdev)
|
||||
{
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user