diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-02-10 01:40:24 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-02-10 01:40:24 +0300 |
commit | d11a3998985b351aaab6bbdc23bc884bd5e815c8 (patch) | |
tree | 92340a95a76fe5e482824a5c5a4698f65a45e066 /include/linux/blkdev.h | |
parent | dc7292a5bcb4c878b076fca2ac3fc22f81b8f8df (diff) | |
download | linux-d11a3998985b351aaab6bbdc23bc884bd5e815c8.tar.xz |
block: kill QUEUE_FLAG_FLUSH_NQ
We have various helpers for setting/clearing this flag, and also
a helper to check if the queue supports queueable flushes or not.
But nobody uses them anymore, kill it with fire.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 7 |
1 files changed, 0 insertions, 7 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 338604dff7d0..24ccab51085f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -592,7 +592,6 @@ struct request_queue { #define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ #define QUEUE_FLAG_WC 20 /* Write back caching */ #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ -#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ #define QUEUE_FLAG_DAX 23 /* device supports DAX */ #define QUEUE_FLAG_STATS 24 /* track IO start and completion times */ #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ @@ -1069,7 +1068,6 @@ extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); -extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); /* @@ -1446,11 +1444,6 @@ static inline unsigned int block_size(struct block_device *bdev) return bdev->bd_block_size; } -static inline bool queue_flush_queueable(struct request_queue *q) -{ - return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); -} - typedef struct {struct page *v;} Sector; unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |