/* * Type 1 and Type 2 protection use the same format: 16 bit guard tag, * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref * tag.
*/ staticvoid t10_pi_generate(struct blk_integrity_iter *iter, struct blk_integrity *bi)
{
u8 offset = bi->pi_offset; unsignedint i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) { struct t10_pi_tuple *pi = iter->prot_buf + offset;
/** * t10_pi_type1_prepare - prepare PI prior submitting request to device * @rq: request with PI that should be prepared * * For Type 1/Type 2, the virtual start sector is the one that was * originally submitted by the block layer for the ref_tag usage. Due to * partitioning, MD/DM cloning, etc. the actual physical start sector is * likely to be different. Remap protection information to match the * physical LBA.
*/ staticvoid t10_pi_type1_prepare(struct request *rq)
{ struct blk_integrity *bi = &rq->q->limits.integrity; constint tuple_sz = bi->metadata_size;
u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset; struct bio *bio;
p = bvec_kmap_local(&iv); for (j = 0; j < iv.bv_len; j += tuple_sz) { struct t10_pi_tuple *pi = p + offset;
if (be32_to_cpu(pi->ref_tag) == virt)
pi->ref_tag = cpu_to_be32(ref_tag);
virt++;
ref_tag++;
p += tuple_sz;
}
kunmap_local(p);
}
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
/** * t10_pi_type1_complete - prepare PI prior returning request to the blk layer * @rq: request with PI that should be prepared * @nr_bytes: total bytes to prepare * * For Type 1/Type 2, the virtual start sector is the one that was * originally submitted by the block layer for the ref_tag usage. Due to * partitioning, MD/DM cloning, etc. the actual physical start sector is * likely to be different. Since the physical start sector was submitted * to the device, we should remap it back to virtual values expected by the * block layer.
*/ staticvoid t10_pi_type1_complete(struct request *rq, unsignedint nr_bytes)
{ struct blk_integrity *bi = &rq->q->limits.integrity; unsigned intervals = nr_bytes >> bi->interval_exp; constint tuple_sz = bi->metadata_size;
u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset; struct bio *bio;
/* * At the moment verify is called bi_iter has been advanced during split * and completion, so use the copy created during submission here.
*/
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.seed = saved_iter->bi_sector;
iter.prot_buf = bvec_virt(bip->bip_vec);
__bio_for_each_segment(bv, bio, bviter, *saved_iter) { void *kaddr = bvec_kmap_local(&bv);
blk_status_t ret = BLK_STS_OK;
iter.data_buf = kaddr;
iter.data_size = bv.bv_len; switch (bi->csum_type) { case BLK_INTEGRITY_CSUM_CRC64:
ret = ext_pi_crc64_verify(&iter, bi); break; case BLK_INTEGRITY_CSUM_CRC: case BLK_INTEGRITY_CSUM_IP:
ret = t10_pi_verify(&iter, bi); break; default: break;
}
kunmap_local(kaddr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.