summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/iomap/bio.c135
-rw-r--r--fs/iomap/buffered-io.c8
-rw-r--r--fs/iomap/direct-io.c15
-rw-r--r--fs/iomap/internal.h14
-rw-r--r--fs/iomap/ioend.c30
-rw-r--r--fs/ntfs3/inode.c57
-rw-r--r--fs/xfs/xfs_aops.c47
-rw-r--r--fs/xfs/xfs_iomap.c9
-rw-r--r--include/linux/iomap.h20
10 files changed, 224 insertions, 116 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b1bb7153cb78..a9c836d7f586 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -947,7 +947,8 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
return ret;
}
-static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
+static void fuse_iomap_submit_read(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx)
{
struct fuse_fill_read_data *data = ctx->read_ctx;
@@ -958,7 +959,7 @@ static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
static const struct iomap_read_ops fuse_iomap_read_ops = {
.read_folio_range = fuse_iomap_read_folio_range_async,
- .submit_read = fuse_iomap_read_submit,
+ .submit_read = fuse_iomap_submit_read,
};
static int fuse_read_folio(struct file *file, struct folio *folio)
diff --git a/fs/iomap/bio.c b/fs/iomap/bio.c
index fc045f2e4c45..f989ffcaac96 100644
--- a/fs/iomap/bio.c
+++ b/fs/iomap/bio.c
@@ -3,74 +3,111 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (C) 2016-2023 Christoph Hellwig.
*/
+#include <linux/bio-integrity.h>
#include <linux/iomap.h>
#include <linux/pagemap.h>
#include "internal.h"
#include "trace.h"
-static void iomap_read_end_io(struct bio *bio)
+static u32 __iomap_read_end_io(struct bio *bio, int error)
{
- int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
+ u32 folio_count = 0;
- bio_for_each_folio_all(fi, bio)
+ bio_for_each_folio_all(fi, bio) {
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ folio_count++;
+ }
+ if (bio_integrity(bio))
+ fs_bio_integrity_free(bio);
bio_put(bio);
+ return folio_count;
+}
+
+static void iomap_read_end_io(struct bio *bio)
+{
+ __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
}
-static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
+{
+ return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
+}
+
+static void iomap_bio_submit_read(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
- if (bio)
- submit_bio(bio);
+ if (iter->iomap.flags & IOMAP_F_INTEGRITY)
+ fs_bio_integrity_alloc(bio);
+ submit_bio(bio);
+}
+
+static struct bio_set *iomap_read_bio_set(struct iomap_read_folio_ctx *ctx)
+{
+ if (ctx->ops && ctx->ops->bio_set)
+ return ctx->ops->bio_set;
+ return &fs_bio_set;
+}
+
+static void iomap_read_alloc_bio(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen)
+{
+ const struct iomap *iomap = &iter->iomap;
+ unsigned int nr_vecs = DIV_ROUND_UP(iomap_length(iter), PAGE_SIZE);
+ struct bio_set *bio_set = iomap_read_bio_set(ctx);
+ struct folio *folio = ctx->cur_folio;
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+ gfp_t orig_gfp = gfp;
+ struct bio *bio;
+
+ /* Submit the existing range if there was one. */
+ if (ctx->read_ctx)
+ ctx->ops->submit_read(iter, ctx);
+
+ /* Same as readahead_gfp_mask: */
+ if (ctx->rac)
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+
+ /*
+ * If the bio_alloc fails, try it again for a single page to avoid
+ * having to deal with partial page reads. This emulates what
+ * do_mpage_read_folio does.
+ */
+ bio = bio_alloc_bioset(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
+ gfp, bio_set);
+ if (!bio)
+ bio = bio_alloc_bioset(iomap->bdev, 1, REQ_OP_READ, orig_gfp,
+ bio_set);
+ if (ctx->rac)
+ bio->bi_opf |= REQ_RAHEAD;
+ bio->bi_iter.bi_sector = iomap_sector(iomap, iter->pos);
+ bio->bi_end_io = iomap_read_end_io;
+ bio_add_folio_nofail(bio, folio, plen,
+ offset_in_folio(folio, iter->pos));
+ ctx->read_ctx = bio;
+ ctx->read_ctx_file_offset = iter->pos;
}
-static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+int iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
struct folio *folio = ctx->cur_folio;
- const struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos;
- size_t poff = offset_in_folio(folio, pos);
- loff_t length = iomap_length(iter);
- sector_t sector;
struct bio *bio = ctx->read_ctx;
- sector = iomap_sector(iomap, pos);
- if (!bio || bio_end_sector(bio) != sector ||
- !bio_add_folio(bio, folio, plen, poff)) {
- gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
- gfp_t orig_gfp = gfp;
- unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
-
- if (bio)
- submit_bio(bio);
-
- if (ctx->rac) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
- gfp);
- /*
- * If the bio_alloc fails, try it again for a single page to
- * avoid having to deal with partial page reads. This emulates
- * what do_mpage_read_folio does.
- */
- if (!bio)
- bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
- if (ctx->rac)
- bio->bi_opf |= REQ_RAHEAD;
- bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = iomap_read_end_io;
- bio_add_folio_nofail(bio, folio, plen, poff);
- ctx->read_ctx = bio;
- }
+ if (!bio ||
+ bio_end_sector(bio) != iomap_sector(&iter->iomap, iter->pos) ||
+ bio->bi_iter.bi_size > iomap_max_bio_size(&iter->iomap) - plen ||
+ !bio_add_folio(bio, folio, plen, offset_in_folio(folio, iter->pos)))
+ iomap_read_alloc_bio(iter, ctx, plen);
return 0;
}
+EXPORT_SYMBOL_GPL(iomap_bio_read_folio_range);
const struct iomap_read_ops iomap_bio_read_ops = {
- .read_folio_range = iomap_bio_read_folio_range,
- .submit_read = iomap_bio_submit_read,
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
@@ -78,11 +115,21 @@ int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ sector_t sector = iomap_sector(srcmap, pos);
struct bio_vec bvec;
struct bio bio;
+ int error;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
+ bio.bi_iter.bi_sector = sector;
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
- return submit_bio_wait(&bio);
+ if (srcmap->flags & IOMAP_F_INTEGRITY)
+ fs_bio_integrity_alloc(&bio);
+ error = submit_bio_wait(&bio);
+ if (srcmap->flags & IOMAP_F_INTEGRITY) {
+ if (!error)
+ error = fs_bio_integrity_verify(&bio, sector, len);
+ fs_bio_integrity_free(&bio);
+ }
+ return error;
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 00f0efaf12b2..3cf93ab2e38a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -596,8 +596,8 @@ void iomap_read_folio(const struct iomap_ops *ops,
iter.status = iomap_read_folio_iter(&iter, ctx,
&bytes_submitted);
- if (ctx->ops->submit_read)
- ctx->ops->submit_read(ctx);
+ if (ctx->read_ctx && ctx->ops->submit_read)
+ ctx->ops->submit_read(&iter, ctx);
if (ctx->cur_folio)
iomap_read_end(ctx->cur_folio, bytes_submitted);
@@ -663,8 +663,8 @@ void iomap_readahead(const struct iomap_ops *ops,
iter.status = iomap_readahead_iter(&iter, ctx,
&cur_bytes_submitted);
- if (ctx->ops->submit_read)
- ctx->ops->submit_read(ctx);
+ if (ctx->read_ctx && ctx->ops->submit_read)
+ ctx->ops->submit_read(&iter, ctx);
if (ctx->cur_folio)
iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 2cb0c0f43215..c24d94349ca5 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -3,6 +3,7 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (c) 2016-2025 Christoph Hellwig.
*/
+#include <linux/bio-integrity.h>
#include <linux/blk-crypto.h>
#include <linux/fscrypt.h>
#include <linux/pagemap.h>
@@ -240,6 +241,9 @@ static void __iomap_dio_bio_end_io(struct bio *bio, bool inline_completion)
{
struct iomap_dio *dio = bio->bi_private;
+ if (bio_integrity(bio))
+ fs_bio_integrity_free(bio);
+
if (dio->flags & IOMAP_DIO_BOUNCE) {
bio_iov_iter_unbounce(bio, !!dio->error,
dio->flags & IOMAP_DIO_USER_BACKED);
@@ -350,8 +354,10 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
+
if (dio->flags & IOMAP_DIO_BOUNCE)
- ret = bio_iov_iter_bounce(bio, dio->submit.iter, BIO_MAX_SIZE);
+ ret = bio_iov_iter_bounce(bio, dio->submit.iter,
+ iomap_max_bio_size(&iter->iomap));
else
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
alignment - 1);
@@ -368,6 +374,13 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
goto out_put_bio;
}
+ if (iter->iomap.flags & IOMAP_F_INTEGRITY) {
+ if (dio->flags & IOMAP_DIO_WRITE)
+ fs_bio_integrity_generate(bio);
+ else
+ fs_bio_integrity_alloc(bio);
+ }
+
if (dio->flags & IOMAP_DIO_WRITE)
task_io_account_write(ret);
else if ((dio->flags & IOMAP_DIO_USER_BACKED) &&
diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h
index 3a4e4aad2bd1..74e898b196dc 100644
--- a/fs/iomap/internal.h
+++ b/fs/iomap/internal.h
@@ -4,6 +4,20 @@
#define IOEND_BATCH_SIZE 4096
+/*
+ * Normally we can build bios as big as the data structure supports.
+ *
+ * But for integrity protected I/O we need to respect the maximum size of the
+ * single contiguous allocation for the integrity buffer.
+ */
+static inline size_t iomap_max_bio_size(const struct iomap *iomap)
+{
+ if (iomap->flags & IOMAP_F_INTEGRITY)
+ return max_integrity_io_size(bdev_limits(iomap->bdev));
+ return BIO_MAX_SIZE;
+}
+
+u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend);
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
#ifdef CONFIG_BLOCK
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c
index 60546fa14dfe..7c034b6a583e 100644
--- a/fs/iomap/ioend.c
+++ b/fs/iomap/ioend.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2016-2025 Christoph Hellwig.
*/
+#include <linux/bio-integrity.h>
#include <linux/iomap.h>
#include <linux/list_sort.h>
#include <linux/pagemap.h>
@@ -37,7 +38,7 @@ EXPORT_SYMBOL_GPL(iomap_init_ioend);
* state, release holds on bios, and finally free up memory. Do not use the
* ioend after this.
*/
-static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
+static u32 iomap_finish_ioend_buffered_write(struct iomap_ioend *ioend)
{
struct inode *inode = ioend->io_inode;
struct bio *bio = &ioend->io_bio;
@@ -65,6 +66,8 @@ static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
folio_count++;
}
+ if (bio_integrity(bio))
+ fs_bio_integrity_free(bio);
bio_put(bio); /* frees the ioend */
return folio_count;
}
@@ -87,7 +90,7 @@ iomap_fail_ioends(
while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
io_list))) {
list_del_init(&ioend->io_list);
- iomap_finish_ioend_buffered(ioend);
+ iomap_finish_ioend_buffered_write(ioend);
cond_resched();
}
}
@@ -120,7 +123,7 @@ static void ioend_writeback_end_bio(struct bio *bio)
return;
}
- iomap_finish_ioend_buffered(ioend);
+ iomap_finish_ioend_buffered_write(ioend);
}
/*
@@ -144,6 +147,8 @@ int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
return error;
}
+ if (wpc->iomap.flags & IOMAP_F_INTEGRITY)
+ fs_bio_integrity_generate(&ioend->io_bio);
submit_bio(&ioend->io_bio);
return 0;
}
@@ -165,10 +170,13 @@ static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
}
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
- u16 ioend_flags)
+ unsigned int map_len, u16 ioend_flags)
{
struct iomap_ioend *ioend = wpc->wb_ctx;
+ if (ioend->io_bio.bi_iter.bi_size >
+ iomap_max_bio_size(&wpc->iomap) - map_len)
+ return false;
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
return false;
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
@@ -234,7 +242,7 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
ioend_flags |= IOMAP_IOEND_BOUNDARY;
- if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
+ if (!ioend || !iomap_can_add_to_ioend(wpc, pos, map_len, ioend_flags)) {
new_ioend:
if (ioend) {
error = wpc->ops->writeback_submit(wpc, 0);
@@ -311,9 +319,19 @@ static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
if (!atomic_dec_and_test(&ioend->io_remaining))
return 0;
+
+ if (!ioend->io_error &&
+ bio_integrity(&ioend->io_bio) &&
+ bio_op(&ioend->io_bio) == REQ_OP_READ) {
+ ioend->io_error = fs_bio_integrity_verify(&ioend->io_bio,
+ ioend->io_sector, ioend->io_size);
+ }
+
if (ioend->io_flags & IOMAP_IOEND_DIRECT)
return iomap_finish_ioend_direct(ioend);
- return iomap_finish_ioend_buffered(ioend);
+ if (bio_op(&ioend->io_bio) == REQ_OP_READ)
+ return iomap_finish_ioend_buffered_read(ioend);
+ return iomap_finish_ioend_buffered_write(ioend);
}
/*
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 6e65066ebcc1..60af9f8e0366 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -605,63 +605,18 @@ static void ntfs_iomap_read_end_io(struct bio *bio)
bio_put(bio);
}
-/*
- * Copied from iomap/bio.c.
- */
-static int ntfs_iomap_bio_read_folio_range(const struct iomap_iter *iter,
- struct iomap_read_folio_ctx *ctx,
- size_t plen)
-{
- struct folio *folio = ctx->cur_folio;
- const struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos;
- size_t poff = offset_in_folio(folio, pos);
- loff_t length = iomap_length(iter);
- sector_t sector;
- struct bio *bio = ctx->read_ctx;
-
- sector = iomap_sector(iomap, pos);
- if (!bio || bio_end_sector(bio) != sector ||
- !bio_add_folio(bio, folio, plen, poff)) {
- gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
- gfp_t orig_gfp = gfp;
- unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
-
- if (bio)
- submit_bio(bio);
-
- if (ctx->rac) /* same as readahead_gfp_mask */
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
- gfp);
- /*
- * If the bio_alloc fails, try it again for a single page to
- * avoid having to deal with partial page reads. This emulates
- * what do_mpage_read_folio does.
- */
- if (!bio)
- bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
- if (ctx->rac)
- bio->bi_opf |= REQ_RAHEAD;
- bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = ntfs_iomap_read_end_io;
- bio_add_folio_nofail(bio, folio, plen, poff);
- ctx->read_ctx = bio;
- }
- return 0;
-}
-
-static void ntfs_iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+static void ntfs_iomap_bio_submit_read(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
- if (bio)
- submit_bio(bio);
+ bio->bi_end_io = ntfs_iomap_read_end_io;
+ submit_bio(bio);
}
static const struct iomap_read_ops ntfs_iomap_bio_read_ops = {
- .read_folio_range = ntfs_iomap_bio_read_folio_range,
- .submit_read = ntfs_iomap_bio_submit_read,
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = ntfs_iomap_bio_submit_read,
};
static int ntfs_read_folio(struct file *file, struct folio *folio)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 76678814f46f..f279055fcea0 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -22,6 +22,7 @@
#include "xfs_icache.h"
#include "xfs_zone_alloc.h"
#include "xfs_rtgroup.h"
+#include <linux/bio-integrity.h>
struct xfs_writepage_ctx {
struct iomap_writepage_ctx ctx;
@@ -661,6 +662,8 @@ xfs_zoned_writeback_submit(
bio_endio(&ioend->io_bio);
return error;
}
+ if (wpc->iomap.flags & IOMAP_F_INTEGRITY)
+ fs_bio_integrity_generate(&ioend->io_bio);
xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone);
return 0;
}
@@ -741,12 +744,45 @@ xfs_vm_bmap(
return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
}
+static void
+xfs_bio_submit_read(
+ const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx)
+{
+ struct bio *bio = ctx->read_ctx;
+
+ /* defer read completions to the ioend workqueue */
+ iomap_init_ioend(iter->inode, bio, ctx->read_ctx_file_offset, 0);
+ bio->bi_end_io = xfs_end_bio;
+ submit_bio(bio);
+}
+
+static const struct iomap_read_ops xfs_iomap_read_ops = {
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = xfs_bio_submit_read,
+ .bio_set = &iomap_ioend_bioset,
+};
+
+static inline const struct iomap_read_ops *
+xfs_get_iomap_read_ops(
+ const struct address_space *mapping)
+{
+ struct xfs_inode *ip = XFS_I(mapping->host);
+
+ if (bdev_has_integrity_csum(xfs_inode_buftarg(ip)->bt_bdev))
+ return &xfs_iomap_read_ops;
+ return &iomap_bio_read_ops;
+}
+
STATIC int
xfs_vm_read_folio(
- struct file *unused,
- struct folio *folio)
+ struct file *file,
+ struct folio *folio)
{
- iomap_bio_read_folio(folio, &xfs_read_iomap_ops);
+ struct iomap_read_folio_ctx ctx = { .cur_folio = folio };
+
+ ctx.ops = xfs_get_iomap_read_ops(folio->mapping);
+ iomap_read_folio(&xfs_read_iomap_ops, &ctx, NULL);
return 0;
}
@@ -754,7 +790,10 @@ STATIC void
xfs_vm_readahead(
struct readahead_control *rac)
{
- iomap_bio_readahead(rac, &xfs_read_iomap_ops);
+ struct iomap_read_folio_ctx ctx = { .rac = rac };
+
+ ctx.ops = xfs_get_iomap_read_ops(rac->mapping),
+ iomap_readahead(&xfs_read_iomap_ops, &ctx, NULL);
}
static int
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index be86d43044df..9c2f12d5fec9 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -143,11 +143,14 @@ xfs_bmbt_to_iomap(
}
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
- if (mapping_flags & IOMAP_DAX)
+ iomap->flags = iomap_flags;
+ if (mapping_flags & IOMAP_DAX) {
iomap->dax_dev = target->bt_daxdev;
- else
+ } else {
iomap->bdev = target->bt_bdev;
- iomap->flags = iomap_flags;
+ if (bdev_has_integrity_csum(iomap->bdev))
+ iomap->flags |= IOMAP_F_INTEGRITY;
+ }
/*
* If the inode is dirty for datasync purposes, let iomap know so it
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 99b7209dabd7..531f9ebdeeae 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -65,6 +65,8 @@ struct vm_fault;
*
* IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
* bio, i.e. set REQ_ATOMIC.
+ *
+ * IOMAP_F_INTEGRITY indicates that the filesystems handles integrity metadata.
*/
#define IOMAP_F_NEW (1U << 0)
#define IOMAP_F_DIRTY (1U << 1)
@@ -79,6 +81,11 @@ struct vm_fault;
#define IOMAP_F_BOUNDARY (1U << 6)
#define IOMAP_F_ANON_WRITE (1U << 7)
#define IOMAP_F_ATOMIC_BIO (1U << 8)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+#define IOMAP_F_INTEGRITY (1U << 9)
+#else
+#define IOMAP_F_INTEGRITY 0
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
* Flag reserved for file system specific usage
@@ -493,6 +500,7 @@ struct iomap_read_folio_ctx {
struct folio *cur_folio;
struct readahead_control *rac;
void *read_ctx;
+ loff_t read_ctx_file_offset;
};
struct iomap_read_ops {
@@ -512,7 +520,14 @@ struct iomap_read_ops {
*
* This is optional.
*/
- void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+ void (*submit_read)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx);
+
+ /*
+ * Optional, allows filesystem to specify own bio_set, so new bio's
+ * can be allocated from the provided bio_set.
+ */
+ struct bio_set *bio_set;
};
/*
@@ -598,6 +613,9 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
extern struct bio_set iomap_ioend_bioset;
#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen);
+
extern const struct iomap_read_ops iomap_bio_read_ops;
static inline void iomap_bio_read_folio(struct folio *folio,