summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c14
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-crypto-fallback.c2
-rw-r--r--block/blk-crypto.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/bounce.c2
7 files changed, 28 insertions, 30 deletions
diff --git a/block/bio.c b/block/bio.c
index fc1299f9d86a..ef91782fd668 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -358,7 +358,7 @@ static void bio_alloc_rescue(struct work_struct *work)
if (!bio)
break;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -416,19 +416,19 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
* submit the previously allocated bio for IO before attempting to allocate
* a new one. Failure to do so can cause deadlocks under memory pressure.
*
- * Note that when running under generic_make_request() (i.e. any block
+ * Note that when running under submit_bio_noacct() (i.e. any block
* driver), bios are not submitted until after you return - see the code in
- * generic_make_request() that converts recursion into iteration, to prevent
+ * submit_bio_noacct() that converts recursion into iteration, to prevent
* stack overflows.
*
* This would normally mean allocating multiple bios under
- * generic_make_request() would be susceptible to deadlocks, but we have
+ * submit_bio_noacct() would be susceptible to deadlocks, but we have
* deadlock avoidance code that resubmits any blocked bios from a rescuer
* thread.
*
* However, we do not guarantee forward progress for allocations from other
* mempools. Doing multiple allocations from the same mempool under
- * generic_make_request() should be avoided - instead, use bio_set's front_pad
+ * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
* for per bio allocations.
*
* RETURNS:
@@ -457,14 +457,14 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
nr_iovecs > 0))
return NULL;
/*
- * generic_make_request() converts recursion to iteration; this
+ * submit_bio_noacct() converts recursion to iteration; this
* means if we're running beneath it, any bios we allocate and
* submit will not be submitted (and thus freed) until after we
* return.
*
* This exposes us to a potential deadlock if we allocate
* multiple bios from the same bio_set() while running
- * underneath generic_make_request(). If we were to allocate
+ * underneath submit_bio_noacct(). If we were to allocate
* multiple bios (say a stacking block driver that was splitting
* bios), we would deadlock if we exhausted the mempool's
* reserve.
diff --git a/block/blk-core.c b/block/blk-core.c
index cb07a726dd71..ff9a88d2d244 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -956,8 +956,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_OK;
}
-static noinline_for_stack bool
-generic_make_request_checks(struct bio *bio)
+static noinline_for_stack bool submit_bio_checks(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
blk_status_t status = BLK_STS_IOERR;
@@ -985,9 +984,8 @@ generic_make_request_checks(struct bio *bio)
}
/*
- * Filter flush bio's early so that make_request based
- * drivers without flush support don't have to worry
- * about them.
+ * Filter flush bio's early so that bio based drivers without flush
+ * support don't have to worry about them.
*/
if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
@@ -1072,7 +1070,7 @@ end_io:
return false;
}
-static blk_qc_t do_make_request(struct bio *bio)
+static blk_qc_t __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_disk;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -1087,7 +1085,7 @@ static blk_qc_t do_make_request(struct bio *bio)
}
/**
- * generic_make_request - re-submit a bio to the block device layer for I/O
+ * submit_bio_noacct - re-submit a bio to the block device layer for I/O
* @bio: The bio describing the location in memory and on the device.
*
* This is a version of submit_bio() that shall only be used for I/O that is
@@ -1095,7 +1093,7 @@ static blk_qc_t do_make_request(struct bio *bio)
* systems and other upper level users of the block layer should use
* submit_bio() instead.
*/
-blk_qc_t generic_make_request(struct bio *bio)
+blk_qc_t submit_bio_noacct(struct bio *bio)
{
/*
* bio_list_on_stack[0] contains bios submitted by the current
@@ -1106,7 +1104,7 @@ blk_qc_t generic_make_request(struct bio *bio)
struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
- if (!generic_make_request_checks(bio))
+ if (!submit_bio_checks(bio))
goto out;
/*
@@ -1114,7 +1112,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a
* ->submit_bio method. current->bio_list is also used as a
- * flag to say if generic_make_request is currently active in this
+ * flag to say if submit_bio_noacct is currently active in this
* task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests
* should be added at the tail
@@ -1132,7 +1130,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. ->submit_bio() may indeed add some more bios
- * through a recursive call to generic_make_request. If it
+ * through a recursive call to submit_bio_noacct. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
@@ -1150,7 +1148,7 @@ blk_qc_t generic_make_request(struct bio *bio)
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
- ret = do_make_request(bio);
+ ret = __submit_bio(bio);
/* sort new bios into those for a lower level
* and those for the same level
@@ -1174,13 +1172,13 @@ blk_qc_t generic_make_request(struct bio *bio)
out:
return ret;
}
-EXPORT_SYMBOL(generic_make_request);
+EXPORT_SYMBOL(submit_bio_noacct);
/**
* direct_make_request - hand a buffer directly to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
*
- * This function behaves like generic_make_request(), but does not protect
+ * This function behaves like submit_bio_noacct(), but does not protect
* against recursion. Must only be used if the called driver is known
* to be blk-mq based.
*/
@@ -1192,7 +1190,7 @@ blk_qc_t direct_make_request(struct bio *bio)
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- if (!generic_make_request_checks(bio))
+ if (!submit_bio_checks(bio))
return BLK_QC_T_NONE;
if (unlikely(bio_queue_enter(bio)))
return BLK_QC_T_NONE;
@@ -1263,13 +1261,13 @@ blk_qc_t submit_bio(struct bio *bio)
blk_qc_t ret;
psi_memstall_enter(&pflags);
- ret = generic_make_request(bio);
+ ret = submit_bio_noacct(bio);
psi_memstall_leave(&pflags);
return ret;
}
- return generic_make_request(bio);
+ return submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 6e49688a2d80..c162b754efbd 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -228,7 +228,7 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
return false;
}
bio_chain(split_bio, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
*bio_ptr = split_bio;
}
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 6533c9b36ab8..2d5e60023b08 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -239,7 +239,7 @@ void __blk_crypto_free_request(struct request *rq)
* kernel crypto API. When the crypto API fallback is used for encryption,
* blk-crypto may choose to split the bio into 2 - the first one that will
* continue to be processed and the second one that will be resubmitted via
- * generic_make_request. A bounce bio will be allocated to encrypt the contents
+ * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
* of the aforementioned "first one", and *bio_ptr will be updated to this
* bounce bio.
*
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 20fa22906041..5196dc145270 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -338,7 +338,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
- generic_make_request(*bio);
+ submit_bio_noacct(*bio);
*bio = split;
}
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ad37043297ed..fee3325edf27 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1339,8 +1339,8 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
- while((bio = bio_list_pop(&bio_list_on_stack)))
- generic_make_request(bio);
+ while ((bio = bio_list_pop(&bio_list_on_stack)))
+ submit_bio_noacct(bio);
blk_finish_plug(&plug);
}
}
diff --git a/block/bounce.c b/block/bounce.c
index c3aaed070124..431be88a0240 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -309,7 +309,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!passthrough && sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig);
- generic_make_request(*bio_orig);
+ submit_bio_noacct(*bio_orig);
*bio_orig = bio;
}
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :