summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2010-08-12 07:14:04 +0400
committerAlasdair G Kergon <agk@redhat.com>2010-08-12 07:14:04 +0400
commit57cba5d3658d9fdc019c6af14a2d80aefa651e56 (patch)
tree4905a162b6785e1a1228b8870d8011cf9035147a /drivers/md/dm.c
parent26803b9f06d365122fae82e7554a66ef8278e0bb (diff)
downloadlinux-57cba5d3658d9fdc019c6af14a2d80aefa651e56.tar.xz
dm: rename map_info flush_request to target_request_nr
'target_request_nr' is a more generic name that reflects the fact that it will be used for both flush and discard support. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5ae0a05b4811..0d4710175885 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1183,12 +1183,12 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
}
static void __flush_target(struct clone_info *ci, struct dm_target *ti,
- unsigned flush_nr)
+ unsigned request_nr)
{
struct dm_target_io *tio = alloc_tio(ci, ti);
struct bio *clone;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
__bio_clone(clone, ci->bio);
@@ -1199,13 +1199,13 @@ static void __flush_target(struct clone_info *ci, struct dm_target *ti,
static int __clone_and_map_empty_barrier(struct clone_info *ci)
{
- unsigned target_nr = 0, flush_nr;
+ unsigned target_nr = 0, request_nr;
struct dm_target *ti;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- for (flush_nr = 0; flush_nr < ti->num_flush_requests;
- flush_nr++)
- __flush_target(ci, ti, flush_nr);
+ for (request_nr = 0; request_nr < ti->num_flush_requests;
+ request_nr++)
+ __flush_target(ci, ti, request_nr);
ci->sector_count = 0;
@@ -2424,11 +2424,11 @@ static void dm_queue_flush(struct mapped_device *md)
queue_work(md->wq, &md->work);
}
-static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
+static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
}
/* Issue barrier requests to targets and wait for their completion. */
@@ -2446,7 +2446,7 @@ static int dm_rq_barrier(struct mapped_device *md)
ti = dm_table_get_target(map, i);
for (j = 0; j < ti->num_flush_requests; j++) {
clone = clone_rq(md->flush_request, md, GFP_NOIO);
- dm_rq_set_flush_nr(clone, j);
+ dm_rq_set_target_request_nr(clone, j);
atomic_inc(&md->pending[rq_data_dir(clone)]);
map_request(ti, clone, md);
}