/* Check if the ACE-Lite coherency protocol is actually supported by the GPU. * ACE protocol has never been supported for command stream frontend GPUs.
*/ if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
GPU_COHERENCY_PROT_BIT(ACE_LITE))) return 0;
drm_err(&ptdev->base, "Coherency not supported by the device"); return -ENOTSUPP;
}
void panthor_device_unplug(struct panthor_device *ptdev)
{ /* This function can be called from two different path: the reset work * and the platform device remove callback. drm_dev_unplug() doesn't * deal with concurrent callers, so we have to protect drm_dev_unplug() * calls with our own lock, and bail out if the device is already * unplugged.
*/
mutex_lock(&ptdev->unplug.lock); if (drm_dev_is_unplugged(&ptdev->base)) { /* Someone beat us, release the lock and wait for the unplug * operation to be reported as done.
**/
mutex_unlock(&ptdev->unplug.lock);
wait_for_completion(&ptdev->unplug.done); return;
}
/* Call drm_dev_unplug() so any access to HW blocks happening after * that point get rejected.
*/
drm_dev_unplug(&ptdev->base);
/* We do the rest of the unplug with the unplug lock released, * future callers will wait on ptdev->unplug.done anyway.
*/
mutex_unlock(&ptdev->unplug.lock);
/* Now, try to cleanly shutdown the GPU before the device resources * get reclaimed.
*/
panthor_sched_unplug(ptdev);
panthor_fw_unplug(ptdev);
panthor_mmu_unplug(ptdev);
panthor_gpu_unplug(ptdev);
staticvoid panthor_device_reset_work(struct work_struct *work)
{ struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work); int ret = 0, cookie;
/* If the device is entering suspend, we don't reset. A slow reset will * be forced at resume time instead.
*/ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) return;
if (!drm_dev_enter(&ptdev->base, &cookie)) return;
panthor_sched_pre_reset(ptdev);
panthor_fw_pre_reset(ptdev, true);
panthor_mmu_pre_reset(ptdev);
panthor_gpu_soft_reset(ptdev);
panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
atomic_set(&ptdev->reset.pending, 0);
panthor_sched_post_reset(ptdev, ret != 0);
drm_dev_exit(cookie);
if (ret) {
panthor_device_unplug(ptdev);
drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
}
}
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!p) return -ENOMEM;
ptdev->pm.dummy_latest_flush = p;
dummy_page_virt = page_address(p);
ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
ptdev->pm.dummy_latest_flush); if (ret) return ret;
/* * Set the dummy page holding the latest flush to 1. This will cause the * flush to avoided as we know it isn't necessary if the submission * happens while the dummy page is mapped. Zero cannot be used because * that means 'always flush'.
*/
*dummy_page_virt = 1;
INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0); if (!ptdev->reset.wq) return -ENOMEM;
ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL); if (ret) return ret;
ret = panthor_clk_init(ptdev); if (ret) return ret;
ret = panthor_devfreq_init(ptdev); if (ret) return ret;
ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
0, &res); if (IS_ERR(ptdev->iomem)) return PTR_ERR(ptdev->iomem);
ptdev->phys_addr = res->start;
ret = devm_pm_runtime_enable(ptdev->base.dev); if (ret) return ret;
ret = pm_runtime_resume_and_get(ptdev->base.dev); if (ret) return ret;
/* If PM is disabled, we need to call panthor_device_resume() manually. */ if (!IS_ENABLED(CONFIG_PM)) {
ret = panthor_device_resume(ptdev->base.dev); if (ret) return ret;
}
ret = panthor_gpu_init(ptdev); if (ret) goto err_rpm_put;
ret = panthor_gpu_coherency_init(ptdev); if (ret) goto err_unplug_gpu;
ret = panthor_mmu_init(ptdev); if (ret) goto err_unplug_gpu;
ret = panthor_fw_init(ptdev); if (ret) goto err_unplug_mmu;
ret = panthor_sched_init(ptdev); if (ret) goto err_unplug_fw;
ret = clk_prepare_enable(ptdev->clks.core); if (ret) goto err_set_suspended;
ret = clk_prepare_enable(ptdev->clks.stacks); if (ret) goto err_disable_core_clk;
ret = clk_prepare_enable(ptdev->clks.coregroup); if (ret) goto err_disable_stacks_clk;
panthor_devfreq_resume(ptdev);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) { /* If there was a reset pending at the time we suspended the * device, we force a slow reset.
*/ if (atomic_read(&ptdev->reset.pending)) {
ptdev->reset.fast = false;
atomic_set(&ptdev->reset.pending, 0);
}
ret = panthor_device_resume_hw_components(ptdev); if (ret && ptdev->reset.fast) {
drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
ptdev->reset.fast = false;
ret = panthor_device_resume_hw_components(ptdev);
}
if (!ret)
panthor_sched_resume(ptdev);
drm_dev_exit(cookie);
if (ret) goto err_suspend_devfreq;
}
/* Clear all IOMEM mappings pointing to this device after we've * resumed. This way the fake mappings pointing to the dummy pages * are removed and the real iomem mapping will be restored on next * access.
*/
mutex_lock(&ptdev->pm.mmio_lock);
unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
mutex_unlock(&ptdev->pm.mmio_lock); return 0;
int panthor_device_suspend(struct device *dev)
{ struct panthor_device *ptdev = dev_get_drvdata(dev); int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) return -EINVAL;
/* Clear all IOMEM mappings pointing to this device before we * shutdown the power-domain and clocks. Failing to do that results * in external aborts when the process accesses the iomem region. * We change the state and call unmap_mapping_range() with the * mmio_lock held to make sure the vm_fault handler won't set up * invalid mappings.
*/
mutex_lock(&ptdev->pm.mmio_lock);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
mutex_unlock(&ptdev->pm.mmio_lock);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
cancel_work_sync(&ptdev->reset.work);
/* We prepare everything as if we were resetting the GPU. * The end of the reset will happen in the resume path though.
*/
panthor_sched_suspend(ptdev);
panthor_fw_suspend(ptdev);
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
drm_dev_exit(cookie);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.