diff options
author | Max Gurtovoy <maxg@mellanox.com> | 2018-07-30 00:15:32 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-07-30 17:27:02 +0300 |
commit | 10c41ddd61323b27b447bc8e18296ac6c06107ad (patch) | |
tree | 116e298295cfafda21080e0c83b501862753fcf1 /block/t10-pi.c | |
parent | ddd0bc756983dc4d19000a4fe021b4c7f9d59aab (diff) | |
download | linux-10c41ddd61323b27b447bc8e18296ac6c06107ad.tar.xz |
block: move dif_prepare/dif_complete functions to block layer
Currently these functions are implemented in the scsi layer, but their
actual place should be the block layer since T10-PI is a general data
integrity feature that is used in the nvme protocol as well. Also, use
the tuple size from the integrity profile since it may vary between
integrity types.
Suggested-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/t10-pi.c')
-rw-r--r-- | block/t10-pi.c | 110 |
1 files changed, 110 insertions, 0 deletions
diff --git a/block/t10-pi.c b/block/t10-pi.c index a98db384048f..62aed77d0bb9 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -184,3 +184,113 @@ const struct blk_integrity_profile t10_pi_type3_ip = { .verify_fn = t10_pi_type3_verify_ip, }; EXPORT_SYMBOL(t10_pi_type3_ip); + +/** + * t10_pi_prepare - prepare PI prior submitting request to device + * @rq: request with PI that should be prepared + * @protection_type: PI type (Type 1/Type 2/Type 3) + * + * For Type 1/Type 2, the virtual start sector is the one that was + * originally submitted by the block layer for the ref_tag usage. Due to + * partitioning, MD/DM cloning, etc. the actual physical start sector is + * likely to be different. Remap protection information to match the + * physical LBA. + * + * Type 3 does not have a reference tag so no remapping is required. + */ +void t10_pi_prepare(struct request *rq, u8 protection_type) +{ + const int tuple_sz = rq->q->integrity.tuple_size; + u32 ref_tag = t10_pi_ref_tag(rq); + struct bio *bio; + + if (protection_type == T10_PI_TYPE3_PROTECTION) + return; + + __rq_for_each_bio(bio, rq) { + struct bio_integrity_payload *bip = bio_integrity(bio); + u32 virt = bip_get_seed(bip) & 0xffffffff; + struct bio_vec iv; + struct bvec_iter iter; + + /* Already remapped? */ + if (bip->bip_flags & BIP_MAPPED_INTEGRITY) + break; + + bip_for_each_vec(iv, bip, iter) { + void *p, *pmap; + unsigned int j; + + pmap = kmap_atomic(iv.bv_page); + p = pmap + iv.bv_offset; + for (j = 0; j < iv.bv_len; j += tuple_sz) { + struct t10_pi_tuple *pi = p; + + if (be32_to_cpu(pi->ref_tag) == virt) + pi->ref_tag = cpu_to_be32(ref_tag); + virt++; + ref_tag++; + p += tuple_sz; + } + + kunmap_atomic(pmap); + } + + bip->bip_flags |= BIP_MAPPED_INTEGRITY; + } +} +EXPORT_SYMBOL(t10_pi_prepare); + +/** + * t10_pi_complete - prepare PI prior returning request to the block layer + * @rq: request with PI that should be prepared + * @protection_type: PI type (Type 1/Type 2/Type 3) + * @intervals: total elements to prepare + * + * For Type 1/Type 2, the virtual start sector is the one that was + * originally submitted by the block layer for the ref_tag usage. Due to + * partitioning, MD/DM cloning, etc. the actual physical start sector is + * likely to be different. Since the physical start sector was submitted + * to the device, we should remap it back to virtual values expected by the + * block layer. + * + * Type 3 does not have a reference tag so no remapping is required. + */ +void t10_pi_complete(struct request *rq, u8 protection_type, + unsigned int intervals) +{ + const int tuple_sz = rq->q->integrity.tuple_size; + u32 ref_tag = t10_pi_ref_tag(rq); + struct bio *bio; + + if (protection_type == T10_PI_TYPE3_PROTECTION) + return; + + __rq_for_each_bio(bio, rq) { + struct bio_integrity_payload *bip = bio_integrity(bio); + u32 virt = bip_get_seed(bip) & 0xffffffff; + struct bio_vec iv; + struct bvec_iter iter; + + bip_for_each_vec(iv, bip, iter) { + void *p, *pmap; + unsigned int j; + + pmap = kmap_atomic(iv.bv_page); + p = pmap + iv.bv_offset; + for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { + struct t10_pi_tuple *pi = p; + + if (be32_to_cpu(pi->ref_tag) == ref_tag) + pi->ref_tag = cpu_to_be32(virt); + virt++; + ref_tag++; + intervals--; + p += tuple_sz; + } + + kunmap_atomic(pmap); + } + } +} +EXPORT_SYMBOL(t10_pi_complete); |