/** * bio_copy_from_iter - copy all pages from iov_iter to bio * @bio: The &struct bio which describes the I/O as destination * @iter: iov_iter as source * * Copy all pages from iov_iter to bio. * Returns 0 on success, or error on failure.
*/ staticint bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{ struct bio_vec *bvec; struct bvec_iter_all iter_all;
ret = copy_page_from_iter(bvec->bv_page,
bvec->bv_offset,
bvec->bv_len,
iter);
if (!iov_iter_count(iter)) break;
if (ret < bvec->bv_len) return -EFAULT;
}
return 0;
}
/** * bio_copy_to_iter - copy all pages from bio to iov_iter * @bio: The &struct bio which describes the I/O as source * @iter: iov_iter as destination * * Copy all pages from bio to iov_iter. * Returns 0 on success, or error on failure.
*/ staticint bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{ struct bio_vec *bvec; struct bvec_iter_all iter_all;
ret = copy_page_to_iter(bvec->bv_page,
bvec->bv_offset,
bvec->bv_len,
&iter);
if (!iov_iter_count(&iter)) break;
if (ret < bvec->bv_len) return -EFAULT;
}
return 0;
}
/** * bio_uncopy_user - finish previously mapped bio * @bio: bio being terminated * * Free pages allocated from bio_copy_user_iov() and write back data * to user space in case of a read.
*/ staticint bio_uncopy_user(struct bio *bio)
{ struct bio_map_data *bmd = bio->bi_private; int ret = 0;
if (!bmd->is_null_mapped) { /* * if we're in a workqueue, the request is orphaned, so * don't copy into a random user address space, just free * and return -EINTR so user space doesn't expect any data.
*/ if (!current->mm)
ret = -EINTR; elseif (bio_data_dir(bio) == READ)
ret = bio_copy_to_iter(bio, bmd->iter); if (bmd->is_our_pages)
bio_free_pages(bio);
}
kfree(bmd); return ret;
}
staticint bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask)
{ struct bio_map_data *bmd; struct page *page; struct bio *bio; int i = 0, ret; int nr_pages; unsignedint len = iter->count; unsignedint offset = map_data ? offset_in_page(map_data->offset) : 0;
bmd = bio_alloc_map_data(iter, gfp_mask); if (!bmd) return -ENOMEM;
/* * We need to do a deep copy of the iov_iter including the iovecs. * The caller provided iov might point to an on-stack or otherwise * shortlived one.
*/
bmd->is_our_pages = !map_data;
bmd->is_null_mapped = (map_data && map_data->null_mapped);
/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
iter2.data_source = ITER_SOURCE;
ret = bio_copy_from_iter(bio, &iter2); if (ret) goto cleanup;
} else { if (bmd->is_our_pages)
zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}
bio->bi_private = bmd;
ret = blk_rq_append_bio(rq, bio); if (ret) goto cleanup; return 0;
cleanup: if (!map_data)
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
out_bmd:
kfree(bmd); return ret;
}
staticvoid blk_mq_map_bio_put(struct bio *bio)
{ if (bio->bi_opf & REQ_ALLOC_CACHE) {
bio_put(bio);
} else {
bio_uninit(bio);
kfree(bio);
}
}
staticstruct bio *blk_rq_map_bio_alloc(struct request *rq, unsignedint nr_vecs, gfp_t gfp_mask)
{ struct bio *bio;
if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
&fs_bio_set); if (!bio) return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask); if (!bio) return NULL;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
} return bio;
}
staticint bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask)
{ unsignedint nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); struct bio *bio; int ret;
if (!iov_iter_count(iter)) return -EINVAL;
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); if (!bio) return -ENOMEM;
ret = bio_iov_iter_get_pages(bio, iter); if (ret) goto out_put;
ret = blk_rq_append_bio(rq, bio); if (ret) goto out_release; return 0;
staticvoid bio_invalidate_vmalloc_pages(struct bio *bio)
{ #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE if (bio->bi_private && !op_is_write(bio_op(bio))) { unsignedlong i, len = 0;
for (i = 0; i < bio->bi_vcnt; i++)
len += bio->bi_io_vec[i].bv_len;
invalidate_kernel_vmap_range(bio->bi_private, len);
} #endif
}
staticvoid bio_map_kern_endio(struct bio *bio)
{
bio_invalidate_vmalloc_pages(bio);
bio_uninit(bio);
kfree(bio);
}
staticstruct bio *bio_map_kern(void *data, unsignedint len, enum req_op op,
gfp_t gfp_mask)
{ unsignedint nr_vecs = bio_add_max_vecs(data, len); struct bio *bio;
bio_for_each_segment_all(bvec, bio, iter_all) {
memcpy_from_bvec(p, bvec);
p += bvec->bv_len;
}
bio_copy_kern_endio(bio);
}
/** * bio_copy_kern - copy kernel address into bio * @data: pointer to buffer to copy * @len: length in bytes * @op: bio/request operation * @gfp_mask: allocation flags for bio and page allocation * * copy the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error.
*/ staticstruct bio *bio_copy_kern(void *data, unsignedint len, enum req_op op,
gfp_t gfp_mask)
{ unsignedlong kaddr = (unsignedlong)data; unsignedlong end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsignedlong start = kaddr >> PAGE_SHIFT; struct bio *bio; void *p = data; int nr_pages = 0;
/* * Append a bio to a passthrough request. Only works if the bio can be merged * into the request based on the driver constraints.
*/ int blk_rq_append_bio(struct request *rq, struct bio *bio)
{ conststruct queue_limits *lim = &rq->q->limits; unsignedint max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; unsignedint nr_segs = 0; int ret;
/* check that the data layout matches the hardware restrictions */
ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes, 0); if (ret) { /* if we would have to split the bio, copy instead */ if (ret > 0)
ret = -EREMOTEIO; return ret;
}
/* Prepare bio for passthrough IO given ITER_BVEC iter */ staticint blk_rq_map_user_bvec(struct request *rq, conststruct iov_iter *iter)
{ unsignedint max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT; struct bio *bio; int ret;
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) return -EINVAL;
/* reuse the bvecs from the iterator instead of allocating new ones */
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); if (!bio) return -ENOMEM;
bio_iov_bvec_set(bio, iter);
ret = blk_rq_append_bio(rq, bio); if (ret)
blk_mq_map_bio_put(bio); return ret;
}
/** * blk_rq_map_user_iov - map user data to a request, for passthrough requests * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) * @iter: iovec iterator * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context.
*/ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, conststruct iov_iter *iter, gfp_t gfp_mask)
{ bool copy = false, map_bvec = false; unsignedlong align = blk_lim_dma_alignment_and_pad(&q->limits); struct bio *bio = NULL; struct iov_iter i; int ret = -EINVAL;
if (map_bvec) {
ret = blk_rq_map_user_bvec(rq, iter); if (!ret) return 0; if (ret != -EREMOTEIO) goto fail; /* fall back to copying the data on limits mismatches */
copy = true;
}
i = *iter; do { if (copy)
ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); else
ret = bio_map_user_iov(rq, &i, gfp_mask); if (ret) { if (ret == -EREMOTEIO)
ret = -EINVAL; goto unmap_rq;
} if (!bio)
bio = rq->bio;
} while (iov_iter_count(&i));
ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
UIO_FASTIOV, &iov, &iter); if (ret < 0) return ret;
if (iov_count) { /* SG_IO howto says that the shorter of the two wins */
iov_iter_truncate(&iter, buf_len); if (check_iter_count && !iov_iter_count(&iter)) {
kfree(iov); return -EINVAL;
}
}
/** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since * the I/O completion may have changed rq->bio.
*/ int blk_rq_unmap_user(struct bio *bio)
{ struct bio *next_bio; int ret = 0, ret2;
while (bio) { if (bio->bi_private) {
ret2 = bio_uncopy_user(bio); if (ret2 && !ret)
ret = ret2;
} else {
bio_release_pages(bio, bio_data_dir(bio) == READ);
}
if (bio_integrity(bio))
bio_integrity_unmap_user(bio);
next_bio = bio;
bio = bio->bi_next;
blk_mq_map_bio_put(next_bio);
}
return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
/** * blk_rq_map_kern - map kernel data to a request, for passthrough requests * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly if possible. Otherwise a bounce * buffer is used. Can be called multiple times to append multiple * buffers.
*/ int blk_rq_map_kern(struct request *rq, void *kbuf, unsignedint len,
gfp_t gfp_mask)
{ unsignedlong addr = (unsignedlong) kbuf; struct bio *bio; int ret;
if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT)) return -EINVAL; if (!len || !kbuf) return -EINVAL;
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask); else
bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
if (IS_ERR(bio)) return PTR_ERR(bio);
ret = blk_rq_append_bio(rq, bio); if (unlikely(ret)) {
bio_uninit(bio);
kfree(bio);
} return ret;
}
EXPORT_SYMBOL(blk_rq_map_kern);
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.