// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com>
*/
staticbool separate_gpu_kms;
MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
module_param(separate_gpu_kms, bool, 0400);
/* * Shutdown the hw if we're far enough along where things might be on. * If we run this too early, we'll end up panicking in any variety of * places. Since we don't register the drm device until late in * msm_drm_init, drm_dev->registered is used as an indicator that the * shutdown will be successful.
*/ if (ddev->registered) {
drm_dev_unregister(ddev); if (priv->kms)
msm_drm_kms_unregister(dev);
}
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev); if (ret) goto err_put_dev;
}
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */ if (gpu_ops)
ret = gpu_ops->bind(dev, dev, NULL); else
ret = component_bind_all(dev, ddev); if (ret) goto err_put_dev;
ret = msm_gem_shrinker_init(ddev); if (ret) goto err_msm_uninit;
if (priv->kms_init) {
ret = msm_drm_kms_init(dev, drv); if (ret) goto err_msm_uninit;
}
ret = drm_dev_register(ddev, 0); if (ret) goto err_msm_uninit;
ret = msm_debugfs_late_init(ddev); if (ret) goto err_msm_uninit;
/** * msm_context_vm - lazily create the context's VM * * @dev: the drm device * @ctx: the context * * The VM is lazily created, so that userspace has a chance to opt-in to having * a userspace managed VM before the VM is created. * * Note that this does not return a reference to the VM. Once the VM is created, * it exists for the lifetime of the context.
*/ struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
{ static DEFINE_MUTEX(init_lock); struct msm_drm_private *priv = dev->dev_private;
/* Once ctx->vm is created it is valid for the lifetime of the context: */ if (ctx->vm) return ctx->vm;
mutex_lock(&init_lock); if (!ctx->vm) {
ctx->vm = msm_gpu_create_private_vm(
priv->gpu, current, !ctx->userspace_managed_vm);
staticint msm_open(struct drm_device *dev, struct drm_file *file)
{ /* For now, load gpu on open.. to avoid the requirement of having * firmware in the initrd.
*/
load_gpu(dev);
/* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module:
*/ if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0)) return -EINVAL;
/* * Uncached CPU mappings are deprecated, as of: * * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)") * * So promote them to WC.
*/ if (flags & MSM_BO_UNCACHED) {
flags &= ~MSM_BO_CACHED;
flags |= MSM_BO_WC;
}
if (should_fail(&fail_gem_alloc, args->size)) return -ENOMEM;
if (msm_context_is_vmbind(ctx)) return UERR(EINVAL, dev, "VM_BIND is enabled");
if (should_fail(&fail_gem_iova, obj->size)) return -ENOMEM;
/* * Don't pin the memory here - just get an address so that userspace can * be productive
*/ return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
}
if (msm_context_is_vmbind(ctx)) return UERR(EINVAL, dev, "VM_BIND is enabled");
/* Only supported if per-process address space is supported: */ if (priv->gpu->vm == vm) return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size)) return -ENOMEM;
/* Impose a moderate upper bound on metadata size: */ if (metadata_size > 128) { return -EOVERFLOW;
}
/* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
buf = memdup_user(metadata, metadata_size); if (IS_ERR(buf)) return PTR_ERR(buf);
ret = msm_gem_lock_interruptible(obj); if (ret) goto out;
new_metadata =
krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL); if (!new_metadata) {
ret = -ENOMEM; goto out;
}
if (!metadata) { /* * Querying the size is inherently racey, but * EXT_external_objects expects the app to confirm * via device and driver UUIDs that the exporter and * importer versions match. All we can do from the * kernel side is check the length under obj lock * when userspace tries to retrieve the metadata
*/
*metadata_size = msm_obj->metadata_size; return 0;
}
ret = msm_gem_lock_interruptible(obj); if (ret) return ret;
/* Avoid copy_to_user() under gem obj lock: */
len = msm_obj->metadata_size;
buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
msm_gem_unlock(obj);
if (*metadata_size < len) {
ret = -ETOOSMALL;
} elseif (copy_to_user(metadata, buf, len)) {
ret = -EFAULT;
} else {
*metadata_size = len;
}
kfree(buf);
return 0;
}
staticint msm_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
{ struct drm_msm_gem_info *args = data; struct drm_gem_object *obj; struct msm_gem_object *msm_obj; int i, ret = 0;
if (args->pad) return -EINVAL;
switch (args->info) { case MSM_INFO_GET_OFFSET: case MSM_INFO_GET_IOVA: case MSM_INFO_SET_IOVA: case MSM_INFO_GET_FLAGS: /* value returned as immediate, not pointer, so len==0: */ if (args->len) return -EINVAL; break; case MSM_INFO_SET_NAME: case MSM_INFO_GET_NAME: case MSM_INFO_SET_METADATA: case MSM_INFO_GET_METADATA: break; default: return -EINVAL;
}
obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT;
msm_obj = to_msm_bo(obj);
switch (args->info) { case MSM_INFO_GET_OFFSET:
args->value = msm_gem_mmap_offset(obj); break; case MSM_INFO_GET_IOVA:
ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value); break; case MSM_INFO_SET_IOVA:
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); break; case MSM_INFO_GET_FLAGS: if (drm_gem_is_imported(obj)) {
ret = -EINVAL; break;
} /* Hide internal kernel-only flags: */
args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
ret = 0; break; case MSM_INFO_SET_NAME: /* length check should leave room for terminating null: */ if (args->len >= sizeof(msm_obj->name)) {
ret = -EINVAL; break;
} if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
args->len)) {
msm_obj->name[0] = '\0';
ret = -EFAULT; break;
}
msm_obj->name[args->len] = '\0'; for (i = 0; i < args->len; i++) { if (!isprint(msm_obj->name[i])) {
msm_obj->name[i] = '\0'; break;
}
} break; case MSM_INFO_GET_NAME: if (args->value && (args->len < strlen(msm_obj->name))) {
ret = -ETOOSMALL; break;
}
args->len = strlen(msm_obj->name); if (args->value) { if (copy_to_user(u64_to_user_ptr(args->value),
msm_obj->name, args->len))
ret = -EFAULT;
} break; case MSM_INFO_SET_METADATA:
ret = msm_ioctl_gem_info_set_metadata(
obj, u64_to_user_ptr(args->value), args->len); break; case MSM_INFO_GET_METADATA:
ret = msm_ioctl_gem_info_get_metadata(
obj, u64_to_user_ptr(args->value), &args->len); break;
}
if (fence_after(fence_id, queue->last_fence)) {
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
fence_id, queue->last_fence); return -EINVAL;
}
/* * Map submitqueue scoped "seqno" (which is actually an idr key) * back to underlying dma-fence * * The fence is removed from the fence_idr when the submit is * retired, so if the fence is not found it means there is nothing * to wait for
*/
spin_lock(&queue->idr_lock);
fence = idr_find(&queue->fence_idr, fence_id); if (fence)
fence = dma_fence_get_rcu(fence);
spin_unlock(&queue->idr_lock);
if (!fence) return 0;
if (flags & MSM_WAIT_FENCE_BOOST)
dma_fence_set_deadline(fence, ktime_get());
ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout)); if (ret == 0) {
ret = -ETIMEDOUT;
} elseif (ret != -ERESTARTSYS) {
ret = 0;
}
/* * Identify what components need to be added by parsing what remote-endpoints * our MDP output ports are connected to. In the case of LVDS on MDP4, there * is no external component that we need to add since LVDS is within MDP4 * itself.
*/ staticint add_mdp_components(struct device *master_dev, struct component_match **matchptr)
{ struct device_node *np = master_dev->of_node; struct device_node *ep_node;
ret = of_graph_parse_endpoint(ep_node, &ep); if (ret) {
DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
of_node_put(ep_node); return ret;
}
/* * The LCDC/LVDS port on MDP4 is a speacial case where the * remote-endpoint isn't a component that we need to add
*/ if (of_device_is_compatible(np, "qcom,mdp4") &&
ep.port == 0) continue;
/* * It's okay if some of the ports don't have a remote endpoint * specified. It just means that the port isn't connected to * any external interface.
*/
intf = of_graph_get_remote_port_parent(ep_node); if (!intf) continue;
if (of_device_is_available(intf))
drm_of_component_match_add(master_dev, matchptr,
component_compare_of, intf);
of_node_put(intf);
}
return 0;
}
#if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU) bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
{ /* If just a single driver is enabled, use it no matter what */ returntrue;
} #else
staticbool prefer_mdp5 = true;
MODULE_PARM_DESC(prefer_mdp5, "Select whether MDP5 or DPU driver should be preferred");
module_param(prefer_mdp5, bool, 0444);
/* list all platforms supported by both mdp5 and dpu drivers */ staticconstchar *const msm_mdp5_dpu_migration[] = { "qcom,msm8917-mdp5", "qcom,msm8937-mdp5", "qcom,msm8953-mdp5", "qcom,msm8996-mdp5", "qcom,sdm630-mdp5", "qcom,sdm660-mdp5",
NULL,
};
bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
{ /* If it is not an MDP5 device, do not try MDP5 driver */ if (!of_device_is_compatible(dev->of_node, "qcom,mdp5")) return dpu_driver;
/* If it is not in the migration list, use MDP5 */ if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration)) return !dpu_driver;
/* * We don't know what's the best binding to link the gpu with the drm device. * Fow now, we just hunt for all the possible gpus that we support, and add them * as components.
*/ staticconststruct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
{ .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
/* Add mdp components if we have KMS. */ if (kms_init) {
ret = add_mdp_components(master_dev, &match); if (ret) return ret;
}
if (!msm_gpu_no_components()) {
ret = add_gpu_components(master_dev, &match); if (ret) return ret;
}
/* on all devices that I am aware of, iommu's which can map * any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(master_dev, ~0); if (ret) return ret;
ret = component_master_add_with_match(master_dev, &msm_drm_ops, match); if (ret) return ret;
return 0;
}
int msm_gpu_probe(struct platform_device *pdev, conststruct component_ops *ops)
{ struct msm_drm_private *priv; int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
platform_set_drvdata(pdev, priv);
/* on all devices that I am aware of, iommu's which can map * any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(&pdev->dev, ~0); if (ret) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.