diff options
| -rw-r--r-- | fs/xfs/xfs_zone_gc.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 30bcc415eaeb..e7b33d5a8b3d 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -706,7 +706,6 @@ xfs_zone_gc_start_chunk( struct xfs_inode *ip; struct bio *bio; xfs_daddr_t daddr; - unsigned int len; bool is_seq; if (!xfs_zone_gc_can_start_chunk(data)) @@ -722,15 +721,16 @@ xfs_zone_gc_start_chunk( return false; } - len = XFS_FSB_TO_B(mp, irec.rm_blockcount); - bio = bio_alloc_bioset(bdev, - min(howmany(len, XFS_GC_BUF_SIZE) + 1, XFS_GC_NR_BUFS), - REQ_OP_READ, GFP_NOFS, &data->bio_set); - + /* + * Scratch allocation can wrap around to the same buffer again, + * provision an extra bvec for that case. + */ + bio = bio_alloc_bioset(bdev, XFS_GC_NR_BUFS + 1, REQ_OP_READ, GFP_NOFS, + &data->bio_set); chunk = container_of(bio, struct xfs_gc_bio, bio); chunk->ip = ip; chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); - chunk->len = len; + chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount); chunk->old_startblock = xfs_rgbno_to_rtb(iter->victim_rtg, irec.rm_startblock); chunk->new_daddr = daddr; @@ -744,8 +744,9 @@ xfs_zone_gc_start_chunk( bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); bio->bi_end_io = xfs_zone_gc_end_io; xfs_zone_gc_add_data(chunk); - data->scratch_head = (data->scratch_head + len) % data->scratch_size; - data->scratch_available -= len; + data->scratch_head = + (data->scratch_head + chunk->len) % data->scratch_size; + data->scratch_available -= chunk->len; XFS_STATS_INC(mp, xs_gc_read_calls); |
