/* * Copyright (C) 2008 Ben Skeggs. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
*/
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) return 0;
if (nvbo->no_share && uvmm &&
drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv) return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); if (ret) return ret;
ret = pm_runtime_get_sync(dev); if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev); goto out;
}
/* only create a VMA on binding */ if (!nouveau_cli_uvmm(cli))
ret = nouveau_vma_new(nvbo, vmm, &vma); else
ret = 0;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
ttm_bo_unreserve(&nvbo->bo); return ret;
}
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); if (ret) {
drm_gem_object_release(&nvbo->bo.base);
kfree(nvbo); return ret;
}
if (resv)
dma_resv_lock(resv, NULL);
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
if (resv)
dma_resv_unlock(resv);
if (ret) return ret;
/* we restrict allowed domains on nv50+ to only the types * that were requested at creation time. not possibly on * earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
if (nvbo->no_share) {
nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
drm_gem_object_get(nvbo->r_obj);
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0;
/* If uvmm wasn't initialized until now disable it completely to prevent * userspace from mixing up UAPIs.
*/
nouveau_cli_disable_uvmm_noinit(cli);
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo); if (ret) return ret;
ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
&req->info.handle); if (ret == 0) {
ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&nvbo->bo.base); return ret;
}
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate_init\n"); return ret;
}
ret = validate_list(chan, &op->list, pbbo); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, chan, NULL, NULL); return ret;
} elseif (ret > 0) {
*apply_relocs = true;
}
return 0;
}
staticint
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_reloc *reloc, struct drm_nouveau_gem_pushbuf_bo *bo)
{ int ret = 0; unsigned i;
for (i = 0; i < req->nr_relocs; i++) { struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; struct drm_nouveau_gem_pushbuf_bo *b; struct nouveau_bo *nvbo;
uint32_t data; long lret;
if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL; break;
}
b = &bo[r->bo_index]; if (b->presumed.valid) continue;
if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL; break;
}
nvbo = (void *)(unsignedlong)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.base.size)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL; break;
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
&nvbo->kmap); if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n"); break;
}
nvbo->validate_mapped = true;
}
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
data = b->presumed.offset + r->data; else if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
data = (b->presumed.offset + r->data) >> 32; else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) { if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor; else
data |= r->vor;
}
lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
DMA_RESV_USAGE_BOOKKEEP, false, 15 * HZ); if (!lret)
ret = -EBUSY; elseif (lret > 0)
ret = 0; else
ret = lret;
push = u_memcpya(req->push, req->nr_push, sizeof(*push)); if (IS_ERR(push)) return nouveau_abi16_put(abi16, PTR_ERR(push));
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); if (IS_ERR(bo)) {
u_free(push); return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
/* Ensure all push buffers are on validate list */ for (i = 0; i < req->nr_push; i++) { if (push[i].bo_index >= req->nr_buffers) {
NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
ret = -EINVAL; goto out_prevalid;
}
}
/* Validate buffer list */
revalidate:
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
req->nr_buffers, &op, &do_reloc); if (ret) { if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate: %d\n", ret); goto out_prevalid;
}
/* Apply any relocations that are required */ if (do_reloc) { if (!reloc) {
validate_fini(&op, chan, NULL, bo);
reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc); goto out_prevalid;
}
goto revalidate;
}
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo); if (ret) {
NV_PRINTK(err, cli, "reloc apply: %d\n", ret); goto out;
}
}
if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16); if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret); goto out;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.