/** * __sg_next - return the next scatterlist entry in a list * @sg: The current sg entry * * Description: * If the entry is the last, return NULL; otherwise, step to the next * element in the array (@sg@+1). If that's a chain pointer, follow it; * otherwise just return the pointer to the current element.
**/ staticinlinestruct scatterlist *__sg_next(struct scatterlist *sg)
{ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/** * for_each_sgt_page - iterate over the pages of the given sg_table * @__pp: page pointer (output) * @__iter: 'struct sgt_iter' (iterator state, internal) * @__sgt: sg_table to iterate over (input)
*/ #define for_each_sgt_page(__pp, __iter, __sgt) \ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
((__pp) = (__iter).pfn == 0 ? NULL : \
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
/** * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist * @sg: The scatterlist * * Return: An unsigned int with segment sizes logically or'ed together. * A caller can use this information to determine what hardware page table * entry sizes can be used to map the memory represented by the scatterlist.
*/ staticinlineunsignedint i915_sg_dma_sizes(struct scatterlist *sg)
{ unsignedint page_sizes;
/* * For Xen PV guests pages aren't contiguous in DMA (machine) address * space. The DMA API takes care of that both in dma_alloc_* (by * calling into the hypervisor to make the pages contiguous) and in * dma_map_* (by bounce buffering). But i915 abuses ignores the * coherency aspects of the DMA API and thus can't cope with bounce * buffering actually happening, so add a hack here to force small * allocations and mappings when running in PV mode on Xen. * * Note this will still break if bounce buffering is required for other * reasons, like confidential computing hypervisors or PCIe root ports * with addressing limitations.
*/ if (xen_pv_domain())
max = PAGE_SIZE; return round_down(max, PAGE_SIZE);
}
bool i915_sg_trim(struct sg_table *orig_st);
/** * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
*/ struct i915_refct_sgt_ops { /** * @release: Free the memory of the struct i915_refct_sgt
*/ void (*release)(struct kref *ref);
};
/** * struct i915_refct_sgt - A refcounted scatter-gather table * @kref: struct kref for refcounting * @table: struct sg_table holding the scatter-gather table itself. Note that * @table->sgl = NULL can be used to determine whether a scatter-gather table * is present or not. * @size: The size in bytes of the underlying memory buffer * @ops: The operations structure.
*/ struct i915_refct_sgt { struct kref kref; struct sg_table table;
size_t size; conststruct i915_refct_sgt_ops *ops;
};
/** * i915_refct_sgt_put - Put a refcounted sg-table * @rsgt: the struct i915_refct_sgt to put.
*/ staticinlinevoid i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
{ if (rsgt)
kref_put(&rsgt->kref, rsgt->ops->release);
}
/** * i915_refct_sgt_get - Get a refcounted sg-table * @rsgt: the struct i915_refct_sgt to get.
*/ staticinlinestruct i915_refct_sgt *
i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
{
kref_get(&rsgt->kref); return rsgt;
}
/** * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom * operations structure * @rsgt: The struct i915_refct_sgt to initialize. * @size: Size in bytes of the underlying memory buffer. * @ops: A customized operations structure in case the refcounted sg-list * is embedded into another structure.
*/ staticinlinevoid __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
size_t size, conststruct i915_refct_sgt_ops *ops)
{
kref_init(&rsgt->kref);
rsgt->table.sgl = NULL;
rsgt->size = size;
rsgt->ops = ops;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.