staticinlinestruct xe_bo *xe_bo_get(struct xe_bo *bo)
{ if (bo)
drm_gem_object_get(&bo->ttm.base);
return bo;
}
void xe_bo_put(struct xe_bo *bo);
/* * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an * xe bo * @bo: The bo for which we want to obtain a refcount. * * There is a short window between where the bo's GEM object refcount reaches * zero and where we put the final ttm_bo reference. Code in the eviction- and * shrinking path should therefore attempt to grab a gem object reference before * trying to use members outside of the base class ttm object. This function is * intended for that purpose. On successful return, this function must be paired * with an xe_bo_put(). * * Return: @bo on success, NULL on failure.
*/ staticinline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
{ if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount)) return NULL;
return bo;
}
staticinlinevoid __xe_bo_unset_bulk_move(struct xe_bo *bo)
{ if (bo)
ttm_bo_set_bulk_move(&bo->ttm, NULL);
}
staticinlinevoid xe_bo_assert_held(struct xe_bo *bo)
{ if (bo)
dma_resv_assert_held((bo)->ttm.base.resv);
}
int xe_bo_lock(struct xe_bo *bo, bool intr);
void xe_bo_unlock(struct xe_bo *bo);
staticinlinevoid xe_bo_unlock_vm_held(struct xe_bo *bo)
{ if (bo) {
XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm)); if (bo->vm)
xe_vm_assert_held(bo->vm); else
dma_resv_unlock(bo->ttm.base.resv);
}
}
int xe_bo_pin_external(struct xe_bo *bo, bool in_place); int xe_bo_pin(struct xe_bo *bo); void xe_bo_unpin_external(struct xe_bo *bo); void xe_bo_unpin(struct xe_bo *bo); int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type); int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo); int xe_bo_notifier_prepare_pinned(struct xe_bo *bo); int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo); int xe_bo_restore_pinned(struct xe_bo *bo);
/** * xe_bo_put_deferred() - Put a buffer object with delayed final freeing * @bo: The bo to put. * @deferred: List to which to add the buffer object if we cannot put, or * NULL if the function is to put unconditionally. * * Since the final freeing of an object includes both sleeping and (!) * memory allocation in the dma_resv individualization, it's not ok * to put an object from atomic context nor from within a held lock * tainted by reclaim. In such situations we want to defer the final * freeing until we've exited the restricting context, or in the worst * case to a workqueue. * This function either puts the object if possible without the refcount * reaching zero, or adds it to the @deferred list if that was not possible. * The caller needs to follow up with a call to xe_bo_put_commit() to actually * put the bo iff this function returns true. It's safe to always * follow up with a call to xe_bo_put_commit(). * TODO: It's TTM that is the villain here. Perhaps TTM should add an * interface like this. * * Return: true if @bo was the first object put on the @freed list, * false otherwise.
*/ staticinlinebool
xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
{ if (!deferred) {
xe_bo_put(bo); returnfalse;
}
if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy)) returnfalse;
/** * xe_bo_put_async() - Put BO async * @bo: The bo to put. * * Put BO async, the final put is deferred to a worker to exit an IRQ context.
*/ staticinlinevoid
xe_bo_put_async(struct xe_bo *bo)
{ struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
if (xe_bo_put_deferred(bo, &bo_device->async_list))
schedule_work(&bo_device->async_free);
}
void xe_bo_dev_init(struct xe_bo_dev *bo_device);
void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
struct sg_table *xe_bo_sg(struct xe_bo *bo);
/* * xe_sg_segment_size() - Provides upper limit for sg segment size. * @dev: device pointer * * Returns the maximum segment size for the 'struct scatterlist' * elements.
*/ staticinlineunsignedint xe_sg_segment_size(struct device *dev)
{ struct scatterlist __maybe_unused sg;
size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
max = min_t(size_t, max, dma_max_mapping_size(dev));
/* * The iommu_dma_map_sg() function ensures iova allocation doesn't * cross dma segment boundary. It does so by padding some sg elements. * This can cause overflow, ending up with sg->length being set to 0. * Avoid this by ensuring maximum segment size is half of 'max' * rounded down to PAGE_SIZE.
*/ return round_down(max / 2, PAGE_SIZE);
}
/** * struct xe_bo_shrink_flags - flags governing the shrink behaviour. * @purge: Only purging allowed. Don't shrink if bo not purgeable. * @writeback: Attempt to immediately move content to swap.
*/ struct xe_bo_shrink_flags {
u32 purge : 1;
u32 writeback : 1;
};
/** * xe_bo_is_mem_type - Whether the bo currently resides in the given * TTM memory type * @bo: The bo to check. * @mem_type: The TTM memory type. * * Return: true iff the bo resides in @mem_type, false otherwise.
*/ staticinlinebool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
{
xe_bo_assert_held(bo); return bo->ttm.resource->mem_type == mem_type;
} #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.