diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 04:55:15 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 04:55:15 +0400 |
commit | 355bbd8cb82e60a592f6cd86ce6dbe5677615cf4 (patch) | |
tree | 23678e50ad4687f1656edc972388ee8014e7b89d /include | |
parent | 39695224bd84dc4be29abad93a0ec232a16fc519 (diff) | |
parent | 746cd1e7e4a555ddaee53b19a46e05c9c61eaf09 (diff) | |
download | linux-355bbd8cb82e60a592f6cd86ce6dbe5677615cf4.tar.xz |
Merge branch 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block: (29 commits)
block: use blkdev_issue_discard in blk_ioctl_discard
Make DISCARD_BARRIER and DISCARD_NOBARRIER writes instead of reads
block: don't assume device has a request list backing in nr_requests store
block: Optimal I/O limit wrapper
cfq: choose a new next_req when a request is dispatched
Seperate read and write statistics of in_flight requests
aoe: end barrier bios with EOPNOTSUPP
block: trace bio queueing trial only when it occurs
block: enable rq CPU completion affinity by default
cfq: fix the log message after dispatched a request
block: use printk_once
cciss: memory leak in cciss_init_one()
splice: update mtime and atime on files
block: make blk_iopoll_prep_sched() follow normal 0/1 return convention
cfq-iosched: get rid of must_alloc flag
block: use interrupts disabled version of raise_softirq_irqoff()
block: fix comment in blk-iopoll.c
block: adjust default budget for blk-iopoll
block: fix long lines in block/blk-iopoll.c
block: add blk-iopoll, a NAPI like approach for block devices
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bio.h | 69 | ||||
-rw-r--r-- | include/linux/blk-iopoll.h | 48 | ||||
-rw-r--r-- | include/linux/blkdev.h | 44 | ||||
-rw-r--r-- | include/linux/fs.h | 4 | ||||
-rw-r--r-- | include/linux/genhd.h | 21 | ||||
-rw-r--r-- | include/linux/interrupt.h | 1 |
6 files changed, 131 insertions, 56 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index 2892b710771c..5be93f18d842 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -142,56 +142,51 @@ struct bio { * * bit 0 -- data direction * If not set, bio is a read from device. If set, it's a write to device. - * bit 1 -- rw-ahead when set - * bit 2 -- barrier + * bit 1 -- fail fast device errors + * bit 2 -- fail fast transport errors + * bit 3 -- fail fast driver errors + * bit 4 -- rw-ahead when set + * bit 5 -- barrier * Insert a serialization point in the IO queue, forcing previously * submitted IO to be completed before this one is issued. - * bit 3 -- synchronous I/O hint. - * bit 4 -- Unplug the device immediately after submitting this bio. - * bit 5 -- metadata request + * bit 6 -- synchronous I/O hint. + * bit 7 -- Unplug the device immediately after submitting this bio. + * bit 8 -- metadata request * Used for tracing to differentiate metadata and data IO. May also * get some preferential treatment in the IO scheduler - * bit 6 -- discard sectors + * bit 9 -- discard sectors * Informs the lower level device that this range of sectors is no longer * used by the file system and may thus be freed by the device. Used * for flash based storage. - * bit 7 -- fail fast device errors - * bit 8 -- fail fast transport errors - * bit 9 -- fail fast driver errors * Don't want driver retries for any fast fail whatever the reason. * bit 10 -- Tell the IO scheduler not to wait for more requests after this one has been submitted, even if it is a SYNC request. */ -#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ -#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ -#define BIO_RW_BARRIER 2 -#define BIO_RW_SYNCIO 3 -#define BIO_RW_UNPLUG 4 -#define BIO_RW_META 5 -#define BIO_RW_DISCARD 6 -#define BIO_RW_FAILFAST_DEV 7 -#define BIO_RW_FAILFAST_TRANSPORT 8 -#define BIO_RW_FAILFAST_DRIVER 9 -#define BIO_RW_NOIDLE 10 - -#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) +enum bio_rw_flags { + BIO_RW, + BIO_RW_FAILFAST_DEV, + BIO_RW_FAILFAST_TRANSPORT, + BIO_RW_FAILFAST_DRIVER, + /* above flags must match REQ_* */ + BIO_RW_AHEAD, + BIO_RW_BARRIER, + BIO_RW_SYNCIO, + BIO_RW_UNPLUG, + BIO_RW_META, + BIO_RW_DISCARD, + BIO_RW_NOIDLE, +}; /* - * Old defines, these should eventually be replaced by direct usage of - * bio_rw_flagged() + * First four bits must match between bio->bi_rw and rq->cmd_flags, make + * that explicit here. */ -#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER) -#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO) -#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG) -#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV) -#define bio_failfast_transport(bio) \ - bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT) -#define bio_failfast_driver(bio) \ - bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER) -#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) -#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) -#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) -#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE) +#define BIO_RW_RQ_MASK 0xf + +static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) +{ + return (bio->bi_rw & (1 << flag)) != 0; +} /* * upper 16 bits of bi_rw define the io priority of this bio @@ -216,7 +211,7 @@ struct bio { #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) +#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) static inline unsigned int bio_cur_bytes(struct bio *bio) { diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h new file mode 100644 index 000000000000..308734d3d4a2 --- /dev/null +++ b/include/linux/blk-iopoll.h @@ -0,0 +1,48 @@ +#ifndef BLK_IOPOLL_H +#define BLK_IOPOLL_H + +struct blk_iopoll; +typedef int (blk_iopoll_fn)(struct blk_iopoll *, int); + +struct blk_iopoll { + struct list_head list; + unsigned long state; + unsigned long data; + int weight; + int max; + blk_iopoll_fn *poll; +}; + +enum { + IOPOLL_F_SCHED = 0, + IOPOLL_F_DISABLE = 1, +}; + +/* + * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating + * that we were the first to acquire this iop for scheduling. If this iop + * is currently disabled, return "failure". + */ +static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop) +{ + if (!test_bit(IOPOLL_F_DISABLE, &iop->state)) + return test_and_set_bit(IOPOLL_F_SCHED, &iop->state); + + return 1; +} + +static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop) +{ + return test_bit(IOPOLL_F_DISABLE, &iop->state); +} + +extern void blk_iopoll_sched(struct blk_iopoll *); +extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *); +extern void blk_iopoll_complete(struct blk_iopoll *); +extern void __blk_iopoll_complete(struct blk_iopoll *); +extern void blk_iopoll_enable(struct blk_iopoll *); +extern void blk_iopoll_disable(struct blk_iopoll *); + +extern int blk_iopoll_enabled; + +#endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 69103e053c92..e23a86cae5ac 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -86,13 +86,14 @@ enum { }; /* - * request type modified bits. first two bits match BIO_RW* bits, important + * request type modified bits. first four bits match BIO_RW* bits, important */ enum rq_flag_bits { __REQ_RW, /* not set, read. set, write */ __REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + /* above flags must match BIO_RW_* */ __REQ_DISCARD, /* request to discard sectors */ __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ @@ -114,6 +115,7 @@ enum rq_flag_bits { __REQ_INTEGRITY, /* integrity metadata has been remapped */ __REQ_NOIDLE, /* Don't anticipate more IO after this one */ __REQ_IO_STAT, /* account I/O stat */ + __REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_NR_BITS, /* stops here */ }; @@ -142,6 +144,10 @@ enum rq_flag_bits { #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) #define REQ_NOIDLE (1 << __REQ_NOIDLE) #define REQ_IO_STAT (1 << __REQ_IO_STAT) +#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) + +#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ + REQ_FAILFAST_DRIVER) #define BLK_MAX_CDB 16 @@ -453,10 +459,12 @@ struct request_queue #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ +#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_CLUSTER) | \ - (1 << QUEUE_FLAG_STACKABLE)) + (1 << QUEUE_FLAG_STACKABLE) | \ + (1 << QUEUE_FLAG_SAME_COMP)) static inline int queue_is_locked(struct request_queue *q) { @@ -575,6 +583,7 @@ enum { #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) +#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) @@ -828,11 +837,13 @@ static inline void blk_run_address_space(struct address_space *mapping) } /* - * blk_rq_pos() : the current sector - * blk_rq_bytes() : bytes left in the entire request - * blk_rq_cur_bytes() : bytes left in the current segment - * blk_rq_sectors() : sectors left in the entire request - * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_err_sectors() : sectors left till the next error boundary */ static inline sector_t blk_rq_pos(const struct request *rq) { @@ -849,6 +860,8 @@ static inline int blk_rq_cur_bytes(const struct request *rq) return rq->bio ? bio_cur_bytes(rq->bio) : 0; } +extern unsigned int blk_rq_err_bytes(const struct request *rq); + static inline unsigned int blk_rq_sectors(const struct request *rq) { return blk_rq_bytes(rq) >> 9; @@ -859,6 +872,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> 9; } +static inline unsigned int blk_rq_err_sectors(const struct request *rq) +{ + return blk_rq_err_bytes(rq) >> 9; +} + /* * Request issue related functions. */ @@ -885,10 +903,12 @@ extern bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, int error); extern bool blk_end_request_cur(struct request *rq, int error); +extern bool blk_end_request_err(struct request *rq, int error); extern bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void __blk_end_request_all(struct request *rq, int error); extern bool __blk_end_request_cur(struct request *rq, int error); +extern bool __blk_end_request_err(struct request *rq, int error); extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); @@ -915,6 +935,7 @@ extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_default_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -977,15 +998,18 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, } extern int blkdev_issue_flush(struct block_device *, sector_t *); -extern int blkdev_issue_discard(struct block_device *, - sector_t sector, sector_t nr_sects, gfp_t); +#define DISCARD_FL_WAIT 0x01 /* wait for completion */ +#define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */ +extern int blkdev_issue_discard(struct block_device *, sector_t sector, + sector_t nr_sects, gfp_t, int flags); static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks) { block <<= (sb->s_blocksize_bits - 9); nr_blocks <<= (sb->s_blocksize_bits - 9); - return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); + return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, + DISCARD_FL_BARRIER); } extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); diff --git a/include/linux/fs.h b/include/linux/fs.h index 37f53216998a..b21cf6b9c80b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -161,8 +161,8 @@ struct inodes_stat_t { * These aren't really reads or writes, they pass down information about * parts of device that are now unused by the file system. */ -#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) -#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) +#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) +#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) #define SEL_IN 1 #define SEL_OUT 2 diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 45fc320a53c6..44263cb27121 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -98,7 +98,7 @@ struct hd_struct { int make_it_fail; #endif unsigned long stamp; - int in_flight; + int in_flight[2]; #ifdef CONFIG_SMP struct disk_stats *dkstats; #else @@ -322,18 +322,23 @@ static inline void free_part_stats(struct hd_struct *part) #define part_stat_sub(cpu, gendiskp, field, subnd) \ part_stat_add(cpu, gendiskp, field, -subnd) -static inline void part_inc_in_flight(struct hd_struct *part) +static inline void part_inc_in_flight(struct hd_struct *part, int rw) { - part->in_flight++; + part->in_flight[rw]++; if (part->partno) - part_to_disk(part)->part0.in_flight++; + part_to_disk(part)->part0.in_flight[rw]++; } -static inline void part_dec_in_flight(struct hd_struct *part) +static inline void part_dec_in_flight(struct hd_struct *part, int rw) { - part->in_flight--; + part->in_flight[rw]--; if (part->partno) - part_to_disk(part)->part0.in_flight--; + part_to_disk(part)->part0.in_flight[rw]--; +} + +static inline int part_in_flight(struct hd_struct *part) +{ + return part->in_flight[0] + part->in_flight[1]; } /* block/blk-core.c */ @@ -546,6 +551,8 @@ extern ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf); #ifdef CONFIG_FAIL_MAKE_REQUEST extern ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1ac57e522a1f..8e9e151f811e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -348,6 +348,7 @@ enum NET_TX_SOFTIRQ, NET_RX_SOFTIRQ, BLOCK_SOFTIRQ, + BLOCK_IOPOLL_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, HRTIMER_SOFTIRQ, |