/** * pvr_fw_process() - Process firmware image, allocate FW memory and create boot * arguments * @pvr_dev: Device pointer. * * Returns: * * 0 on success, or * * Any error returned by pvr_fw_object_create_and_map_offset(), or * * Any error returned by pvr_fw_object_create_and_map().
*/ staticint
pvr_fw_process(struct pvr_device *pvr_dev)
{ struct drm_device *drm_dev = from_pvr_device(pvr_dev); struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; const u8 *fw = pvr_dev->fw_dev.firmware->data; conststruct pvr_fw_layout_entry *private_data;
u8 *fw_code_ptr;
u8 *fw_data_ptr;
u8 *fw_core_code_ptr;
u8 *fw_core_data_ptr; int err;
layout_get_sizes(pvr_dev);
private_data = pvr_fw_find_private_data(pvr_dev); if (!private_data) return -EINVAL;
/* Allocate and map memory for firmware sections. */
/* * Code allocation must be at the start of the firmware heap, otherwise * firmware processor will be unable to boot. * * This has the useful side-effect that for every other object in the * driver, a firmware address of 0 is invalid.
*/
fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size,
PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
NULL, NULL, &fw_mem->code_obj); if (IS_ERR(fw_code_ptr)) {
drm_err(drm_dev, "Unable to allocate FW code memory\n"); return PTR_ERR(fw_code_ptr);
}
if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
if (fw_core_data_ptr)
pvr_fw_object_vunmap(fw_mem->core_data_obj); if (fw_mem->core_data_obj)
pvr_fw_object_destroy(fw_mem->core_data_obj);
err_free_fw_core_code_obj: if (fw_core_code_ptr)
pvr_fw_object_vunmap(fw_mem->core_code_obj); if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
err_free_fw_data_obj: if (fw_data_ptr)
pvr_fw_object_vunmap(fw_mem->data_obj);
pvr_fw_object_destroy(fw_mem->data_obj);
err_free_fw_code_obj: if (fw_code_ptr)
pvr_fw_object_vunmap(fw_mem->code_obj);
pvr_fw_object_destroy(fw_mem->code_obj);
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj); if (fw_mem->core_data_obj)
pvr_fw_object_destroy(fw_mem->core_data_obj);
pvr_fw_object_destroy(fw_mem->code_obj);
pvr_fw_object_destroy(fw_mem->data_obj);
}
/** * pvr_wait_for_fw_boot() - Wait for firmware to finish booting * @pvr_dev: Target PowerVR device. * * Returns: * * 0 on success, or * * -%ETIMEDOUT if firmware fails to boot within timeout.
*/ int
pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
{
ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC); struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) { if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started)) return 0;
}
return -ETIMEDOUT;
}
/* * pvr_fw_heap_info_init() - Calculate size and masks for FW heap * @pvr_dev: Target PowerVR device. * @log2_size: Log2 of raw heap size. * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
*/ void
pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
{ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
/** * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information * @pvr_dev: Target PowerVR device. * * This function must be called before querying device information. * * Returns: * * 0 on success, or * * -%EINVAL if firmware validation fails.
*/ int
pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
{ int err;
err = pvr_fw_validate(pvr_dev); if (err) return err;
return pvr_fw_get_device_info(pvr_dev);
}
/** * pvr_fw_init() - Initialise and boot firmware * @pvr_dev: Target PowerVR device * * On successful completion of the function the PowerVR device will be * initialised and ready to use. * * Returns: * * 0 on success, * * -%EINVAL on invalid firmware image, * * -%ENOMEM on out of memory, or * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
*/ int
pvr_fw_init(struct pvr_device *pvr_dev)
{ staticconststruct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
[PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
[PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
[PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
};
/* * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has * been unregistered at this point so no new work should be being submitted.
*/
pvr_ccb_fini(&pvr_dev->fwccb);
pvr_kccb_fini(pvr_dev);
pvr_fw_cleanup(pvr_dev);
if (fw_dev->defs->fini)
fw_dev->defs->fini(pvr_dev);
}
/** * pvr_fw_mts_schedule() - Schedule work via an MTS kick * @pvr_dev: Target PowerVR device * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
*/ void
pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
{ /* Ensure memory is flushed before kicking MTS. */
wmb();
/* Ensure the MTS kick goes through before continuing. */
mb();
}
/** * pvr_fw_structure_cleanup() - Send FW cleanup request for an object * @pvr_dev: Target PowerVR device. * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type. * @fw_obj: Pointer to FW object containing object to cleanup. * @offset: Offset within FW object of object to cleanup. * * Returns: * * 0 on success, * * -EBUSY if object is busy, * * -ETIMEDOUT on timeout, or * * -EIO if device is lost.
*/ int
pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
u32 offset)
{ struct rogue_fwif_kccb_cmd cmd; int slot_nr; int idx; int err;
u32 rtn;
switch (type) { case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
&cleanup_req->cleanup_data.context_fw_addr); break; case ROGUE_FWIF_CLEANUP_HWRTDATA:
pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
&cleanup_req->cleanup_data.hwrt_data_fw_addr); break; case ROGUE_FWIF_CLEANUP_FREELIST:
pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
&cleanup_req->cleanup_data.freelist_fw_addr); break; default:
err = -EINVAL; goto err_drm_dev_exit;
}
err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr); if (err) goto err_drm_dev_exit;
err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn); if (err) goto err_drm_dev_exit;
if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
err = -EBUSY;
err_drm_dev_exit:
drm_dev_exit(idx);
err_up_read:
up_read(&pvr_dev->reset_sem);
return err;
}
/** * pvr_fw_object_fw_map() - Map a FW object in firmware address space * @pvr_dev: Device pointer. * @fw_obj: FW object to map. * @dev_addr: Desired address in device space, if a specific address is * required. 0 otherwise. * * Returns: * * 0 on success, or * * -%EINVAL if @fw_obj is already mapped but has no references, or * * Any error returned by DRM.
*/ staticint
pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr)
{ struct pvr_gem_object *pvr_obj = fw_obj->gem; struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
int err;
spin_lock(&fw_dev->fw_mm_lock);
if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
err = -EINVAL; goto err_unlock;
}
if (!dev_addr) { /* * Allocate from the main heap only (firmware heap minus * config space).
*/
err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
gem_obj->size, 0, 0,
fw_dev->fw_heap_info.gpu_addr,
fw_dev->fw_heap_info.gpu_addr +
fw_dev->fw_heap_info.size, 0); if (err) goto err_unlock;
} else {
fw_obj->fw_mm_node.start = dev_addr;
fw_obj->fw_mm_node.size = gem_obj->size;
err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node); if (err) goto err_unlock;
}
spin_unlock(&fw_dev->fw_mm_lock);
/* Map object on GPU. */
err = fw_dev->defs->vm_map(pvr_dev, fw_obj); if (err) goto err_remove_node;
/** * pvr_fw_object_create() - Create a FW object and map to firmware * @pvr_dev: PowerVR device pointer. * @size: Size of object, in bytes. * @flags: Options which affect both this operation and future mapping * operations performed on the returned object. Must be a combination of * DRM_PVR_BO_* and/or PVR_BO_* flags. * @init: Initialisation callback. * @init_priv: Private pointer to pass to initialisation callback. * @fw_obj_out: Pointer to location to store created object pointer. * * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS * set. * * Returns: * * 0 on success, or * * Any error returned by pvr_fw_object_create_common().
*/ int
pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags, void (*init)(void *cpu_ptr, void *priv), void *init_priv, struct pvr_fw_object **fw_obj_out)
{ void *cpu_ptr;
/** * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU * @pvr_dev: PowerVR device pointer. * @size: Size of object, in bytes. * @flags: Options which affect both this operation and future mapping * operations performed on the returned object. Must be a combination of * DRM_PVR_BO_* and/or PVR_BO_* flags. * @init: Initialisation callback. * @init_priv: Private pointer to pass to initialisation callback. * @fw_obj_out: Pointer to location to store created object pointer. * * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS * set. * * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU * mapping. * * Returns: * * Pointer to CPU mapping of newly created object, or * * Any error returned by pvr_fw_object_create(), or * * Any error returned by pvr_fw_object_vmap().
*/ void *
pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags, void (*init)(void *cpu_ptr, void *priv), void *init_priv, struct pvr_fw_object **fw_obj_out)
{ return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
fw_obj_out);
}
/** * pvr_fw_object_create_and_map_offset() - Create a FW object and map to * firmware at the provided offset and to the CPU. * @pvr_dev: PowerVR device pointer. * @dev_offset: Base address of desired FW mapping, offset from start of FW heap. * @size: Size of object, in bytes. * @flags: Options which affect both this operation and future mapping * operations performed on the returned object. Must be a combination of * DRM_PVR_BO_* and/or PVR_BO_* flags. * @init: Initialisation callback. * @init_priv: Private pointer to pass to initialisation callback. * @fw_obj_out: Pointer to location to store created object pointer. * * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS * set. * * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU * mapping. * * Returns: * * Pointer to CPU mapping of newly created object, or * * Any error returned by pvr_fw_object_create(), or * * Any error returned by pvr_fw_object_vmap().
*/ void *
pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev,
u32 dev_offset, size_t size, u64 flags, void (*init)(void *cpu_ptr, void *priv), void *init_priv, struct pvr_fw_object **fw_obj_out)
{
u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
/* * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW * structures * @pvr_dev: Device pointer * * If this function returns an error then the caller must regard the device as lost. * * Returns: * * 0 on success, or * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
*/ int
pvr_fw_hard_reset(struct pvr_device *pvr_dev)
{ struct list_head *pos; int err;
/* Reset all FW objects */
mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.