summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 20:02:41 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 20:02:41 +0400
commit8cf1a3fce0b95050b63d451c9d561da0da2aa4d6 (patch)
tree0dc7f93474c3be601a5893900db1418dfd60ba5d /include/linux/blkdev.h
parentfcff06c438b60f415af5983efe92811d6aa02ad1 (diff)
parent80799fbb7d10c30df78015b3fa21f7ffcfc0eb2c (diff)
downloadlinux-8cf1a3fce0b95050b63d451c9d561da0da2aa4d6.tar.xz
Merge branch 'for-3.6/core' of git://git.kernel.dk/linux-block
Pull core block IO bits from Jens Axboe: "The most complicated part if this is the request allocation rework by Tejun, which has been queued up for a long time and has been in for-next ditto as well. There are a few commits from yesterday and today, mostly trivial and obvious fixes. So I'm pretty confident that it is sound. It's also smaller than usual." * 'for-3.6/core' of git://git.kernel.dk/linux-block: block: remove dead func declaration block: add partition resize function to blkpg ioctl block: uninitialized ioc->nr_tasks triggers WARN_ON block: do not artificially constrain max_sectors for stacking drivers blkcg: implement per-blkg request allocation block: prepare for multiple request_lists block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv blkcg: inline bio_blkcg() and friends block: allocate io_context upfront block: refactor get_request[_wait]() block: drop custom queue draining used by scsi_transport_{iscsi|fc} mempool: add @gfp_mask to mempool_create_node() blkcg: make root blkcg allocation use %GFP_KERNEL blkcg: __blkg_lookup_create() doesn't need radix preload
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h53
1 files changed, 32 insertions, 21 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 07954b05b86c..3816ce8a08fc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -46,16 +46,23 @@ struct blkcg_gq;
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
+#define BLK_RL_SYNCFULL (1U << 0)
+#define BLK_RL_ASYNCFULL (1U << 1)
+
struct request_list {
+ struct request_queue *q; /* the queue this rl belongs to */
+#ifdef CONFIG_BLK_CGROUP
+ struct blkcg_gq *blkg; /* blkg this request pool belongs to */
+#endif
/*
* count[], starved[], and wait[] are indexed by
* BLK_RW_SYNC/BLK_RW_ASYNC
*/
- int count[2];
- int starved[2];
- int elvpriv;
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
+ int count[2];
+ int starved[2];
+ mempool_t *rq_pool;
+ wait_queue_head_t wait[2];
+ unsigned int flags;
};
/*
@@ -138,6 +145,7 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
unsigned long long io_start_time_ns; /* when passed to hardware */
#endif
@@ -282,11 +290,16 @@ struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
+ int nr_rqs[2]; /* # allocated [a]sync rqs */
+ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
/*
- * the queue request freelist, one for reads and one for writes
+ * If blkcg is not used, @q->root_rl serves all requests. If blkcg
+ * is used, root blkg allocates from @q->root_rl and all other
+ * blkgs from their own blkg->rl. Which one to use should be
+ * determined using bio_request_list().
*/
- struct request_list rq;
+ struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq)
return rw_is_sync(rq->cmd_flags);
}
-static inline int blk_queue_full(struct request_queue *q, int sync)
+static inline bool blk_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
- return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ return rl->flags & flag;
}
-static inline void blk_set_queue_full(struct request_queue *q, int sync)
+static inline void blk_set_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
- else
- queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags |= flag;
}
-static inline void blk_clear_queue_full(struct request_queue *q, int sync)
+static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
- else
- queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags &= ~flag;
}