diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-11-11 14:30:24 +0300 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-15 06:00:50 +0300 |
commit | 6e39b69e7ea9205c5f80aeac3ef999ab8fb1a4cc (patch) | |
tree | abf6bf248970a249cc15e0c3df75ae42833be084 | |
parent | 9e1fe9314cb5649b2dc73690f2cd8d0068e633d9 (diff) | |
download | linux-6e39b69e7ea9205c5f80aeac3ef999ab8fb1a4cc.tar.xz |
[SCSI] export blk layer functions needed for blk_execute_rq_nowait
To send async requests we need these two functions exported.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r-- | block/ll_rw_blk.c | 6 | ||||
-rw-r--r-- | include/linux/blkdev.h | 5 |
2 files changed, 10 insertions, 1 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 99c9ca6d5992..c525b5a2b598 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2306,6 +2306,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, generic_unplug_device(q); } +EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); + /** * blk_execute_rq - insert a request into queue for execution * @q: queue to insert the request in @@ -2444,7 +2446,7 @@ void disk_round_stats(struct gendisk *disk) /* * queue lock must be held */ -static void __blk_put_request(request_queue_t *q, struct request *req) +void __blk_put_request(request_queue_t *q, struct request *req) { struct request_list *rl = req->rl; @@ -2473,6 +2475,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) } } +EXPORT_SYMBOL_GPL(__blk_put_request); + void blk_put_request(struct request *req) { unsigned long flags; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a33a31e71bbc..9a68716dcf75 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -558,6 +558,7 @@ extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); extern void blk_put_request(struct request *); +extern void __blk_put_request(request_queue_t *, struct request *); extern void blk_end_sync_rq(struct request *rq); extern void blk_attempt_remerge(request_queue_t *, struct request *); extern struct request *blk_get_request(request_queue_t *, int, gfp_t); @@ -579,6 +580,10 @@ extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *, int); +extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, + struct request *, int, + void (*done)(struct request *)); + static inline request_queue_t *bdev_get_queue(struct block_device *bdev) { return bdev->bd_disk->queue; |