// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
**************************************************************************/ #include"vmwgfx_bo.h" #include"vmwgfx_drv.h"
/* * Different methods for tracking dirty: * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write- * accesses in the VM mkwrite() callback
*/ enum vmw_bo_dirty_method {
VMW_BO_DIRTY_PAGETABLE,
VMW_BO_DIRTY_MKWRITE,
};
/* * No dirtied pages at scan trigger a transition to the _MKWRITE method, * similarly a certain percentage of dirty pages trigger a transition to * the _PAGETABLE method. How many triggers should we wait for before * changing method?
*/ #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
/* Percentage to trigger a transition to the _PAGETABLE method */ #define VMW_DIRTY_PERCENTAGE 10
/** * struct vmw_bo_dirty - Dirty information for buffer objects * @start: First currently dirty bit * @end: Last currently dirty bit + 1 * @method: The currently used dirty method * @change_count: Number of consecutive method change triggers * @ref_count: Reference count for this structure * @bitmap_size: The size of the bitmap in bits. Typically equal to the * nuber of pages in the bo. * @bitmap: A bitmap where each bit represents a page. A set bit means a * dirty page.
*/ struct vmw_bo_dirty { unsignedlong start; unsignedlong end; enum vmw_bo_dirty_method method; unsignedint change_count; unsignedint ref_count; unsignedlong bitmap_size; unsignedlong bitmap[];
};
/** * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits * @vbo: The buffer object to scan * * Scans the pagetable for dirty bits. Clear those bits and modify the * dirty structure with the results. This function may change the * dirty-tracking method.
*/ staticvoid vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
{ struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
/** * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method * @vbo: The buffer object to scan * * Write-protect pages written to so that consecutive write accesses will * trigger a call to mkwrite. * * This function may change the dirty-tracking method.
*/ staticvoid vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo)
{ struct vmw_bo_dirty *dirty = vbo->dirty; unsignedlong offset = drm_vma_node_start(&vbo->tbo.base.vma_node); struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
/** * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty * tracking structure * @vbo: The buffer object to scan * * This function may change the dirty tracking method.
*/ void vmw_bo_dirty_scan(struct vmw_bo *vbo)
{ struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
vmw_bo_dirty_scan_pagetable(vbo); else
vmw_bo_dirty_scan_mkwrite(vbo);
}
/** * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before * an unmap_mapping_range operation. * @vbo: The buffer object, * @start: First page of the range within the buffer object. * @end: Last page of the range within the buffer object + 1. * * If we're using the _PAGETABLE scan method, we may leak dirty pages * when calling unmap_mapping_range(). This function makes sure we pick * up all dirty pages.
*/ staticvoid vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{ struct vmw_bo_dirty *dirty = vbo->dirty; unsignedlong offset = drm_vma_node_start(&vbo->tbo.base.vma_node); struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) return;
wp_shared_mapping_range(mapping, start + offset, end - start);
clean_record_shared_mapping_range(mapping, start + offset,
end - start, offset,
&dirty->bitmap[0], &dirty->start,
&dirty->end);
}
/** * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo * @vbo: The buffer object, * @start: First page of the range within the buffer object. * @end: Last page of the range within the buffer object + 1. * * This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/ void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{ unsignedlong offset = drm_vma_node_start(&vbo->tbo.base.vma_node); struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
/** * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object * @vbo: The buffer object * * This function registers a dirty-tracking user to a buffer object. * A user can be for example a resource or a vma in a special user-space * mapping. * * Return: Zero on success, -ENOMEM on memory allocation failure.
*/ int vmw_bo_dirty_add(struct vmw_bo *vbo)
{ struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
size_t size; int ret;
if (dirty) {
dirty->ref_count++; return 0;
}
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
dirty = kvzalloc(size, GFP_KERNEL); if (!dirty) {
ret = -ENOMEM; goto out_no_dirty;
}
/* Write-protect and then pick up already dirty bits */
wp_shared_mapping_range(mapping, offset, num_pages);
clean_record_shared_mapping_range(mapping, offset, num_pages,
offset,
&dirty->bitmap[0],
&dirty->start, &dirty->end);
}
vbo->dirty = dirty;
return 0;
out_no_dirty: return ret;
}
/** * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object * @vbo: The buffer object * * This function releases a dirty-tracking user from a buffer object. * If the reference count reaches zero, then the dirty-tracking object is * freed and the pointer to it cleared. * * Return: Zero on success, -ENOMEM on memory allocation failure.
*/ void vmw_bo_dirty_release(struct vmw_bo *vbo)
{ struct vmw_bo_dirty *dirty = vbo->dirty;
/** * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from * its backing mob. * @res: The resource * * This function will pick up all dirty ranges affecting the resource from * it's backup mob, and call vmw_resource_dirty_update() once for each * range. The transferred ranges will be cleared from the backing mob's * dirty tracking.
*/ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
{ struct vmw_bo *vbo = res->guest_memory_bo; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t start, cur, end; unsignedlong res_start = res->guest_memory_offset; unsignedlong res_end = res->guest_memory_offset + res->guest_memory_size;
if (res_start >= dirty->end || res_end <= dirty->start) return;
cur = max(res_start, dirty->start);
res_end = max(res_end, dirty->end); while (cur < res_end) { unsignedlong num;
start = find_next_bit(&dirty->bitmap[0], res_end, cur); if (start >= res_end) break;
end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
cur = end + 1;
num = end - start;
bitmap_clear(&dirty->bitmap[0], start, num);
vmw_resource_dirty_update(res, start, end);
}
/* * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. * So make sure the TTM helpers are aware.
*/
save_flags = vmf->flags;
vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
ret = ttm_bo_vm_reserve(bo, vmf);
vmf->flags = save_flags; if (ret) return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
ret = VM_FAULT_SIGBUS; goto out_unlock;
}
/* * If we don't track dirty using the MKWRITE method, make sure * sure the page protection is write-enabled so we don't get * a lot of unnecessary write faults.
*/ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); else
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.