// SPDX-License-Identifier: GPL-2.0-only /* * psb GEM interface * * Copyright (c) 2011, Intel Corporation. * * Authors: Alan Cox * * TODO: * - we need to work out if the MMU is relevant (eg for * accelerated operations on a GEM object)
*/
/** * psb_gem_dumb_create - create a dumb buffer * @file: our client file * @dev: our device * @args: the requested arguments copied from userspace * * Allocate a buffer suitable for use for a frame buffer of the * form described by user space. Give userspace a handle by which * to reference it.
*/ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args)
{
size_t pitch, size; struct psb_gem_object *pobj; struct drm_gem_object *obj;
u32 handle; int ret;
/** * psb_gem_fault - pagefault handler for GEM objects * @vmf: fault detail * * Invoked when a fault occurs on an mmap of a GEM managed area. GEM * does most of the work for us including the actual map/unmap calls * but we need to do the actual page work. * * This code eventually needs to handle faulting objects in and out * of the GTT and repacking it when we run out of space. We can put * that off for now and for our simple uses * * The VMA was set up by GEM. In doing so it also ensured that the * vma->vm_private_data points to the GEM object that is backing this * mapping.
*/ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{ struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; struct psb_gem_object *pobj; int err;
vm_fault_t ret; unsignedlong pfn;
pgoff_t page_offset; struct drm_device *dev; struct drm_psb_private *dev_priv;
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
mutex_lock(&dev_priv->mmap_mutex);
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */ if (pobj->mmapping == 0) {
err = psb_gem_pin(pobj); if (err < 0) {
dev_err(dev->dev, "gma500: pin failed: %d\n", err);
ret = vmf_error(err); goto fail;
}
pobj->mmapping = 1;
}
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */ if (pobj->stolen)
pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT; else
pfn = page_to_pfn(pobj->pages[page_offset]);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
return ret;
}
/* * Memory management
*/
/* Insert vram stolen pages into the GTT. */ staticvoid psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
{ struct drm_device *dev = &pdev->dev; unsignedint pfn_base; unsignedint i, num_pages;
uint32_t pte;
while (r) { /* * TODO: GTT restoration needs a refactoring, so that we don't have to touch * struct psb_gem_object here. The type represents a GEM object and is * not related to the GTT itself.
*/
pobj = container_of(r, struct psb_gem_object, resource); if (pobj->pages) {
psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
size += resource_size(&pobj->resource);
++restored;
}
r = r->sibling;
++total;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.