struct iov_iter {
u8 iter_type; bool nofault; bool data_source;
size_t iov_offset; /* * Hack alert: overlay ubuf_iovec with iovec + count, so * that the members resolve correctly regardless of the type * of iterator used. This means that you can use: * * &iter->__ubuf_iovec or iter->__iov * * interchangably for the user_backed cases, hence simplifying * some of the cases that need to deal with both.
*/ union { /* * This really should be a const, but we cannot do that without * also modifying any of the zero-filling iter init functions. * Leave it non-const for now, but it should be treated as such.
*/ struct iovec __ubuf_iovec; struct { union { /* use iter_iov() to get the current vec */ conststruct iovec *__iov; conststruct kvec *kvec; conststruct bio_vec *bvec; conststruct folio_queue *folioq; struct xarray *xarray; void __user *ubuf;
};
size_t count;
};
}; union { unsignedlong nr_segs;
u8 folioq_slot;
loff_t xarray_start;
};
};
/* * Total number of bytes covered by an iovec. * * NOTE that it is not safe to use this function until all the iovec's * segment lengths have been validated. Because the individual lengths can * overflow a size_t when added together.
*/ staticinline size_t iov_length(conststruct iovec *iov, unsignedlong nr_segs)
{ unsignedlong seg;
size_t ret = 0;
for (seg = 0; seg < nr_segs; seg++)
ret += iov[seg].iov_len; return ret;
}
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE /* * Note, users like pmem that depend on the stricter semantics of * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the * destination is flushed from the cache on return.
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #else #define _copy_from_iter_flushcache _copy_from_iter_nocache #endif
/* * Cap the iov_iter by given limit; note that the second argument is * *not* the new size - it's upper limit for such. Passing it a value * greater than the amount of data in iov_iter is fine - it'll just do * nothing in that case.
*/ staticinlinevoid iov_iter_truncate(struct iov_iter *i, u64 count)
{ /* * count doesn't have to fit in size_t - comparison extends both * operands to u64 here and any value that would be truncated by * conversion in assignement is by definition greater than all * values of size_t, including old i->count.
*/ if (i->count > count)
i->count = count;
}
/* * reexpand a previously truncated iterator; count must be no more than how much * we had shrunk it.
*/ staticinlinevoid iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
staticinlineint
iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
{
size_t shorted = 0; int npages;
/** * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained * @iter: The iterator * * Examine the iterator and indicate by returning true or false as to how, if * at all, pages extracted from the iterator will be retained by the extraction * function. * * %true indicates that the pages will have a pin placed in them that the * caller must unpin. This is must be done for DMA/async DIO to force fork() * to forcibly copy a page for the child (the parent must retain the original * page). * * %false indicates that no measures are taken and that it's up to the caller * to retain the pages.
*/ staticinlinebool iov_iter_extract_will_pin(conststruct iov_iter *iter)
{ return user_backed_iter(iter);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.