/** * enum panthor_device_pm_state - PM state
*/ enum panthor_device_pm_state { /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
/** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
PANTHOR_DEVICE_PM_STATE_RESUMING,
/** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
PANTHOR_DEVICE_PM_STATE_ACTIVE,
/** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
PANTHOR_DEVICE_PM_STATE_SUSPENDING,
};
/** * struct panthor_irq - IRQ data * * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
*/ struct panthor_irq { /** @ptdev: Panthor device */ struct panthor_device *ptdev;
/** @irq: IRQ number. */ int irq;
/** @mask: Current mask being applied to xxx_INT_MASK. */
u32 mask;
/** @suspended: Set to true when the IRQ is suspended. */
atomic_t suspended;
};
/** * enum panthor_device_profiling_mode - Profiling state
*/ enum panthor_device_profiling_flags { /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */
PANTHOR_DEVICE_PROFILING_DISABLED = 0,
/** @pending: Set to true if a reset is pending. */
atomic_t pending;
/** * @fast: True if the post_reset logic can proceed with a fast reset. * * A fast reset is just a reset where the driver doesn't reload the FW sections. * * Any time the firmware is properly suspended, a fast reset can take place. * On the other hand, if the halt operation failed, the driver will reload * all FW sections to make sure we start from a fresh state.
*/ bool fast;
} reset;
/** @pm: Power management related data. */ struct { /** @state: Power state. */
atomic_t state;
/** * @mmio_lock: Lock protecting MMIO userspace CPU mappings. * * This is needed to ensure we map the dummy IO pages when * the device is being suspended, and the real IO pages when * the device is being resumed. We can't just do with the * state atomicity to deal with this race.
*/ struct mutex mmio_lock;
/** * @dummy_latest_flush: Dummy LATEST_FLUSH page. * * Used to replace the real LATEST_FLUSH page when the GPU * is suspended.
*/ struct page *dummy_latest_flush;
/** @recovery_needed: True when a resume attempt failed. */
atomic_t recovery_needed;
} pm;
/** @current_frequency: Device clock frequency at present. Set by DVFS*/ unsignedlong current_frequency;
/** @fast_rate: Maximum device clock frequency. Set by DVFS */ unsignedlong fast_rate;
#ifdef CONFIG_DEBUG_FS /** @gems: Device-wide list of GEM objects owned by at least one file. */ struct { /** @gems.lock: Protects the device-wide list of GEM objects. */ struct mutex lock;
/** @node: Used to keep track of all the device's DRM objects */ struct list_head node;
} gems; #endif
};
/** @user_mmio: User MMIO related fields. */ struct { /** * @offset: Offset used for user MMIO mappings. * * This offset should not be used to check the type of mapping * except in panthor_mmap(). After that point, MMIO mapping * offsets have been adjusted to match * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used * instead. * Make sure this rule is followed at all times, because * userspace is in control of the offset, and can change the * value behind our back. Otherwise it can lead to erroneous * branching happening in kernel space.
*/
u64 offset;
} user_mmio;
/** @vms: VM pool attached to this file. */ struct panthor_vm_pool *vms;
/** @groups: Scheduling group pool attached to this file. */ struct panthor_group_pool *groups;
/** @stats: cycle and timestamp measures for job execution. */ struct panthor_gpu_usage stats;
};
int panthor_device_init(struct panthor_device *ptdev); void panthor_device_unplug(struct panthor_device *ptdev);
/** * panthor_device_reset_is_pending() - Checks if a reset is pending. * * Return: true if a reset is pending, false otherwise.
*/ staticinlinebool panthor_device_reset_is_pending(struct panthor_device *ptdev)
{ return atomic_read(&ptdev->reset.pending) != 0;
}
int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma);
int panthor_device_resume(struct device *dev); int panthor_device_suspend(struct device *dev);
staticinlineint panthor_device_resume_and_get(struct panthor_device *ptdev)
{ int ret = pm_runtime_resume_and_get(ptdev->base.dev);
/* If the resume failed, we need to clear the runtime_error, which * can done by forcing the RPM state to suspended. If multiple * threads called panthor_device_resume_and_get(), we only want * one of them to update the state, hence the cmpxchg. Note that a * thread might enter panthor_device_resume_and_get() and call * pm_runtime_resume_and_get() after another thread had attempted * to resume and failed. This means we will end up with an error * without even attempting a resume ourselves. The only risk here * is to report an error when the second resume attempt might have * succeeded. Given resume errors are not expected, this is probably * something we can live with.
*/ if (ret && atomic_cmpxchg(&ptdev->pm.recovery_needed, 1, 0) == 1)
pm_runtime_set_suspended(ptdev->base.dev);
/** * panthor_exception_is_fault() - Checks if an exception is a fault. * * Return: true if the exception is a fault, false otherwise.
*/ staticinlinebool
panthor_exception_is_fault(u32 exception_code)
{ return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.