/** * DOC: Local Memory Translation Table * * The Local Memory Translation Table (LMTT) provides additional abstraction * when Virtual Function (VF) is accessing device Local Memory (VRAM). * * The Root LMTT Page Directory contains one entry for each VF. Entries are * indexed by the function number (1-based, index 0 is unused). * * See `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
*/
/** * xe_lmtt_init - LMTT software initialization. * @lmtt: the &xe_lmtt to initialize * * The LMTT initialization requires two steps. * * The xe_lmtt_init() checks if LMTT is required on current device and selects * and initialize proper variant of the LMTT Root Directory. Currently supported * variants are `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_. * * In next step xe_lmtt_init_hw() will register this directory on the hardware. * * Notes: * The LMTT allocations are managed and will be implicitly released on driver unload. * This function shall be called only once and only when running as a PF driver. * Any LMTT initialization failure should block VFs enabling. * * Return: 0 on success or a negative error code on failure.
*/ int xe_lmtt_init(struct xe_lmtt *lmtt)
{ struct xe_device *xe = lmtt_to_xe(lmtt); int err;
/** * xe_lmtt_init_hw - Perform LMTT hardware initialization. * @lmtt: the &xe_lmtt to initialize * * This function is a second step of the LMTT initialization. * This function registers LMTT Root Directory prepared in xe_lmtt_init(). * * This function shall be called after every hardware reset. * This function shall be called only when running as a PF driver.
*/ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
{ if (!lmtt->pd) return;
lmtt_setup_dir_ptr(lmtt);
}
staticint lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{ struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE]; struct xe_gt_tlb_invalidation_fence *fence = fences; struct xe_tile *tile = lmtt_to_tile(lmtt); struct xe_gt *gt; int result = 0; int err;
u8 id;
for_each_gt_on_tile(gt, tile, id) {
xe_gt_tlb_invalidation_fence_init(gt, fence, true);
err = xe_gt_tlb_invalidation_all(gt, fence);
result = result ?: err;
fence++;
}
/* * It is fine to wait for all fences, even for those which covers the * invalidation request that failed, as such fence should be already * marked as signaled.
*/
fence = fences;
for_each_gt_on_tile(gt, tile, id)
xe_gt_tlb_invalidation_fence_wait(fence++);
return result;
}
/** * xe_lmtt_invalidate_hw - Invalidate LMTT hardware. * @lmtt: the &xe_lmtt to invalidate * * Send requests to all GuCs on this tile to invalidate all TLBs. * * This function should be called only when running as a PF driver.
*/ void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{ struct xe_device *xe = lmtt_to_xe(lmtt); int err;
/** * xe_lmtt_prepare_pages - Create VF's LMTT Page Tables. * @lmtt: the &xe_lmtt to update * @vfid: the VF identifier (1-based) * @range: top range of LMEM offset to be supported * * This function creates empty LMTT page tables for given VF to support * up to maximum #range LMEM offset. The LMTT page tables created by this * function must be released using xe_lmtt_drop_pages() function. * * Notes: * This function shall be called only after successful LMTT initialization. * See xe_lmtt_init(). * * Return: 0 on success or a negative error code on failure.
*/ int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsignedint vfid, u64 range)
{
lmtt_assert(lmtt, lmtt->pd);
lmtt_assert(lmtt, vfid);
return lmtt_alloc_range(lmtt, vfid, 0, range);
}
/** * xe_lmtt_populate_pages - Update VF's LMTT Page Table Entries. * @lmtt: the &xe_lmtt to update * @vfid: the VF identifier (1-based) * @bo: the buffer object with LMEM allocation to be mapped * @offset: the offset at which #bo should be mapped * * This function updates VF's LMTT entries to use given buffer object as a backstore. * * Notes: * This function shall be called only after successful preparation of the * VF's LMTT Page Tables. See xe_lmtt_prepare(). * * Return: 0 on success or a negative error code on failure.
*/ int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsignedint vfid, struct xe_bo *bo, u64 offset)
{
lmtt_assert(lmtt, lmtt->pd);
lmtt_assert(lmtt, vfid);
/** * xe_lmtt_drop_pages - Remove VF's LMTT Pages. * @lmtt: the &xe_lmtt to update * @vfid: the VF identifier (1-based) * * This function removes all LMTT Page Tables prepared by xe_lmtt_prepare_pages(). * * This function shall be called only after successful LMTT initialization. * See xe_lmtt_init().
*/ void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsignedint vfid)
{
lmtt_assert(lmtt, lmtt->pd);
lmtt_assert(lmtt, vfid);
lmtt_drop_pages(lmtt, vfid);
}
/** * xe_lmtt_estimate_pt_size - Estimate size of LMTT PT allocations. * @lmtt: the &xe_lmtt * @size: the size of the LMEM to be mapped over LMTT (including any offset) * * This function shall be called only by PF. * * Return: size of the PT allocation(s) needed to support given LMEM size.
*/
u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
{ unsignedint level = 0;
u64 pt_size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.