u32 data __aligned(8);
u32 reserved; struct rogue_fwif_file_info_buf fault_buf;
} __aligned(8);
enum rogue_fwif_pow_state {
ROGUE_FWIF_POW_OFF, /* idle and ready to full power down */
ROGUE_FWIF_POW_ON, /* running HW commands */
ROGUE_FWIF_POW_FORCED_IDLE, /* forced idle */
ROGUE_FWIF_POW_IDLE, /* idle waiting for host handshake */
};
/* Firmware HWR states */ /* The HW state is ok or locked up */ #define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0) /* Tells if a HWR reset is in progress */ #define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1) /* A DM unrelated lockup has been detected */ #define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3) /* At least one DM is running without being close to a lockup */ #define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4) /* At least one DM is close to lockup */ #define ROGUE_FWIF_HWR_DM_STALLING BIT(5) /* The FW has faulted and needs to restart */ #define ROGUE_FWIF_HWR_FW_FAULT BIT(6) /* The FW has requested the host to restart it */ #define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7)
#define ROGUE_FWIF_PHR_STATE_SHIFT (8U) /* The FW has requested the host to restart it, per PHR configuration */ #define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT) /* A PHR triggered GPU reset has just finished */ #define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT) #define ROGUE_FWIF_PHR_RESTART_MASK \
(ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED)
/* Firmware per-DM HWR states */ /* DM is working if all flags are cleared */ #define ROGUE_FWIF_DM_STATE_WORKING (0) /* DM is idle and ready for HWR */ #define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0) /* DM need to skip to next cmd before resuming processing */ #define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2) /* DM need partial render cleanup before resuming processing */ #define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3) /* DM need to increment Recovery Count once fully recovered */ #define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4) /* DM was identified as locking up and causing HWR */ #define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5) /* DM was innocently affected by another lockup which caused HWR */ #define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6) /* DM was identified as over-running and causing HWR */ #define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7) /* DM was innocently affected by another DM over-running which caused HWR */ #define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8) /* DM was forced into HWR as it delayed more important workloads */ #define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9) /* DM was forced into HWR due to an uncorrected GPU ECC error */ #define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10)
/* Firmware's connection state */ enum rogue_fwif_connection_fw_state { /* Firmware is offline */
ROGUE_FW_CONNECTION_FW_OFFLINE = 0, /* Firmware is initialised */
ROGUE_FW_CONNECTION_FW_READY, /* Firmware connection is fully established */
ROGUE_FW_CONNECTION_FW_ACTIVE, /* Firmware is clearing up connection data*/
ROGUE_FW_CONNECTION_FW_OFFLOADING,
ROGUE_FW_CONNECTION_FW_STATE_COUNT
};
/* OS' connection state */ enum rogue_fwif_connection_os_state { /* OS is offline */
ROGUE_FW_CONNECTION_OS_OFFLINE = 0, /* OS's KM driver is setup and waiting */
ROGUE_FW_CONNECTION_OS_READY, /* OS connection is fully established */
ROGUE_FW_CONNECTION_OS_ACTIVE,
ROGUE_FW_CONNECTION_OS_STATE_COUNT
};
/* firmware trace control data */ struct rogue_fwif_tracebuf {
u32 log_type; struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM]; /* * Member initialised only when sTraceBuf is actually allocated (in * ROGUETraceBufferInitOnDemandResources)
*/
u32 tracebuf_size_in_dwords; /* Compatibility and other flags */
u32 tracebuf_flags;
} __aligned(8);
/* firmware system data shared with the Host driver */ struct rogue_fwif_sysdata { /* Configuration flags from host */
u32 config_flags; /* Extended configuration flags from host */
u32 config_flags_ext; enum rogue_fwif_pow_state pow_state;
u32 hw_perf_ridx;
u32 hw_perf_widx;
u32 hw_perf_wrap_count; /* Constant after setup, needed in FW */
u32 hw_perf_size; /* The number of times the FW drops a packet due to buffer full */
u32 hw_perf_drop_count;
/* * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid * when FW is built with ROGUE_HWPERF_UTILIZATION & * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c
*/ /* Buffer utilisation, high watermark of bytes in use */
u32 hw_perf_ut; /* The ordinal of the first packet the FW dropped */
u32 first_drop_ordinal; /* The ordinal of the last packet the FW dropped */
u32 last_drop_ordinal; /* State flags for each Operating System mirrored from Fw coremem */ struct rogue_fwif_os_runtime_flags
os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS];
#ifdefined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK) # define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8) # define ROGUE_FWIF_STATS_FRAMEWORK_MAX \
(2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE)
u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8); #endif
u32 hwr_state_flags;
u32 hwr_recovery_flags[PVR_FWIF_DM_MAX]; /* Compatibility and other flags */
u32 fw_sys_data_flags; /* Identify whether MC config is P-P or P-S */
u32 mc_config;
} __aligned(8);
/* per-os firmware shared data */ struct rogue_fwif_osdata { /* Configuration flags from an OS */
u32 fw_os_config_flags; /* Markers to signal that the host should perform a full sync check */
u32 fw_sync_check_mark;
u32 host_sync_check_mark;
/* Number of first HWR logs recorded (never overwritten by newer logs) */ #define ROGUE_FWIF_HWINFO_MAX_FIRST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ #define ROGUE_FWIF_HWINFO_MAX_LAST 8U /* Total number of HWR logs stored in a buffer */ #define ROGUE_FWIF_HWINFO_MAX \
(ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST) /* Index of the last log in the HWR log buffer */ #define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U)
/* Flag definitions affecting only workloads submitted by a particular OS */ #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0) #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1) #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2) #define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3)
struct rogue_fw_register_list { /* Register number */
u16 reg_num; /* Indirect register number (or 0 if not used) */
u16 indirect_reg_num; /* Start value for indirect register */
u16 indirect_start_val; /* End value for indirect register */
u16 indirect_end_val;
};
struct rogue_fwif_dllist_node {
u32 p;
u32 n;
};
/* * This number is used to represent an invalid page catalogue physical address
*/ #define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
/* This number is used to represent unallocated page catalog base register */ #define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU
/* Firmware memory context. */ struct rogue_fwif_fwmemcontext { /* device physical address of context's page catalogue */
aligned_u64 pc_dev_paddr; /* * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET == * unallocated)
*/
u32 page_cat_base_reg_set; /* breakpoint address */
u32 breakpoint_addr; /* breakpoint handler address */
u32 bp_handler_addr; /* DM and enable control for BP */
u32 breakpoint_ctl; /* Compatibility and other flags */
u32 fw_mem_ctx_flags;
u32 padding;
} __aligned(8);
/* * FW-accessible TA state which must be written out to memory on context store
*/ struct rogue_fwif_geom_ctx_state_per_geom { /* To store in mid-TA */
aligned_u64 geom_reg_vdm_call_stack_pointer; /* Initial value (in case is 'lost' due to a lock-up */
aligned_u64 geom_reg_vdm_call_stack_pointer_init;
u32 geom_reg_vbs_so_prim[4];
u16 geom_current_idx;
u16 padding[3];
} __aligned(8);
struct rogue_fwif_geom_ctx_state { /* FW-accessible TA state which must be written out to memory on context store */ struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX];
} __aligned(8);
/* * FW-accessible ISP state which must be written out to memory on context store
*/ struct rogue_fwif_frag_ctx_state {
u32 frag_reg_pm_deallocated_mask_status;
u32 frag_reg_dm_pds_mtilefree_status; /* Compatibility and other flags */
u32 ctx_state_flags; /* * frag_reg_isp_store should be the last element of the structure as this * is an array whose size is determined at runtime after detecting the * ROGUE core
*/
u32 frag_reg_isp_store[];
} __aligned(8);
struct rogue_fwif_compute_ctx_state {
u32 ctx_state_flags; /* Target buffer and other flags */
};
struct rogue_fwif_fwcommoncontext { /* CCB details for this firmware context */
u32 ccbctl_fw_addr; /* CCB control */
u32 ccb_fw_addr; /* CCB base */ struct rogue_fwif_dma_addr ccb_meta_dma_addr;
/* Context suspend state */ /* geom/frag context suspend state, read/written by FW */
u32 context_state_addr __aligned(8);
/* Flags e.g. for context switching */
u32 fw_com_ctx_flags;
u32 priority;
u32 priority_seq_num;
/* Framework state */ /* Register updates for Framework */
u32 rf_cmd_addr __aligned(8);
/* Statistic updates waiting to be passed back to the host... */ /* True when some stats are pending */ bool stats_pending __aligned(4); /* Number of stores on this context since last update */
s32 stats_num_stores; /* Number of OOMs on this context since last update */
s32 stats_num_out_of_memory; /* Number of PRs on this context since last update */
s32 stats_num_partial_renders; /* Data Master type */
u32 dm; /* Device Virtual Address of the signal the context is waiting on */
aligned_u64 wait_signal_address; /* List entry for the wait-signal list */ struct rogue_fwif_dllist_node wait_signal_node __aligned(8); /* List entry for the buffer stalled list */ struct rogue_fwif_dllist_node buf_stalled_node __aligned(8); /* Address of the circular buffer queue pointers */
aligned_u64 cbuf_queue_ctrl_addr;
aligned_u64 robustness_address; /* Max HWR deadline limit in ms */
u32 max_deadline_ms; /* Following HWR circular buffer read-offset needs resetting */ bool read_offset_needs_reset;
/* List entry for the waiting list */ struct rogue_fwif_dllist_node waiting_node __aligned(8); /* List entry for the run list */ struct rogue_fwif_dllist_node run_node __aligned(8); /* UFO that last failed (or NULL) */ struct rogue_fwif_ufo last_failed_ufo;
/* Memory context */
u32 fw_mem_context_fw_addr;
/* References to the host side originators */ /* the Server Common Context */
u32 server_common_context_id; /* associated process ID */
u32 pid;
/* True when Geom DM OOM is not allowed */ bool geom_oom_disabled __aligned(4);
} __aligned(8);
/* Kernel CCB control for ROGUE */ struct rogue_fwif_ccb_ctl { /* write offset into array of commands (MUST be aligned to 16 bytes!) */
u32 write_offset; /* Padding to ensure read and write offsets are in separate cache lines. */
u8 padding[128 - sizeof(u32)]; /* read offset into array of commands */
u32 read_offset; /* Offset wrapping mask (Total capacity of the CCB - 1) */
u32 wrap_mask; /* size of each command in bytes */
u32 cmd_size;
u32 padding2;
} __aligned(8);
/* * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT * bit from BIF_CTRL reg
*/ #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* BIF_CTRL_INVAL_TLB1_EN */ #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \
(ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800)
/* indicates FW should interrupt the host */ #define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U)
struct rogue_fwif_power_request { /* Type of power request */ enum rogue_fwif_power_type pow_type; union { /* Number of active Dusts */
u32 num_of_dusts; /* If the operation is mandatory */ bool forced __aligned(4); /* * Type of Request. Consolidating Force Idle, Cancel Forced * Idle, Host Timeout
*/ enum rogue_fwif_power_force_idle_type pow_request_type;
} power_req_data;
};
struct rogue_fwif_slcflushinvaldata { /* Context to fence on (only useful when bDMContext == TRUE) */
u32 context_fw_addr; /* Invalidate the cache as well as flushing */ bool inval __aligned(4); /* The data to flush/invalidate belongs to a specific DM context */ bool dm_context __aligned(4); /* Optional address of range (only useful when bDMContext == FALSE) */
aligned_u64 address; /* Optional size of range (only useful when bDMContext == FALSE) */
aligned_u64 size;
};
struct rogue_fwif_hwperf_ctrl { enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */
aligned_u64 mask; /* Mask of events to toggle */
};
struct rogue_fwif_hwperf_config_enable_blks { /* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */
u32 num_blocks; /* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */
u32 block_configs_fw_addr;
};
struct rogue_fwif_hwperf_config_da_blks { /* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */
u32 num_blocks; /* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */
u32 block_configs_fw_addr;
};
struct rogue_fwif_hwperf_ctrl_blks { bool enable; /* Number of block IDs in the array */
u32 num_blocks; /* Array of ROGUE_HWPERF_CNTBLK_ID values */
u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX];
};
struct rogue_fwif_freelist_gs_data { /* Freelist FW address */
u32 freelist_fw_addr; /* Amount of the Freelist change */
u32 delta_pages; /* New amount of pages on the freelist (including ready pages) */
u32 new_pages; /* Number of ready pages to be held in reserve until OOM */
u32 ready_pages;
};
struct rogue_fwif_write_offset_update_data { /* * Context to that may need to be resumed following write offset update
*/
u32 context_fw_addr;
} __aligned(8);
enum rogue_fwif_kccb_cmd_type { /* Common commands */
ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* SLC flush and invalidation request */
ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U |
ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* * Requests cleanup of a FW resource (type specified in the command * data)
*/
ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Power request */
ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Backing for on-demand ZS-Buffer done */
ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE =
108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Unbacking for on-demand ZS-Buffer done */
ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE =
109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Freelist Grow done */
ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE =
110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Freelists Reconstruction done */
ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE =
112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* * Informs the firmware that the host has added more data to a CDM2 * Circular Buffer
*/
ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE =
114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Health check request */
ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Forcing signalling of all unmet UFOs for a given CCB offset */
ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* There is a geometry and a fragment command in this single kick */
ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Informs the FW that a Guest OS has come online / offline. */
ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* Commands only permitted to the native or host OS */
ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* * Ask the firmware to update its cached ui32LogType value from the (shared) * tracebuf control structure
*/
ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Set a maximum frequency/OPP point */
ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* * Changes the relative scheduling priority for a particular OSid. It can * only be serviced for the Host DDK
*/
ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Set or clear firmware state flags */
ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* Set a minimum frequency/OPP point */
ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, /* Configure Periodic Hardware Reset behaviour */
ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
/* Kernel CCB command packet */ struct rogue_fwif_kccb_cmd { /* Command type */ enum rogue_fwif_kccb_cmd_type cmd_type; /* Compatibility and other flags */
u32 kccb_flags;
/* * NOTE: Make sure that uCmdData is the last member of this struct * This is to calculate actual command size for device mem copy. * (Refer ROGUEGetCmdMemCopySize())
*/ union { /* Data for Kick command */ struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data; /* Data for combined geom/frag Kick command */ struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data
combined_geom_frag_cmd_kick_data; /* Data for MMU cache command */ struct rogue_fwif_mmucachedata mmu_cache_data; /* Data for Breakpoint Commands */ struct rogue_fwif_bpdata bp_data; /* Data for SLC Flush/Inval commands */ struct rogue_fwif_slcflushinvaldata slc_flush_inval_data; /* Data for cleanup commands */ struct rogue_fwif_cleanup_request cleanup_data; /* Data for power request commands */ struct rogue_fwif_power_request pow_data; /* Data for HWPerf control command */ struct rogue_fwif_hwperf_ctrl hw_perf_ctrl; /* * Data for HWPerf configure, clear and enable performance * counter block command
*/ struct rogue_fwif_hwperf_config_enable_blks
hw_perf_cfg_enable_blks; /* * Data for HWPerf enable or disable performance counter block * commands
*/ struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks; /* Data for HWPerf configure the custom counters to read */ struct rogue_fwif_hwperf_select_custom_cntrs
hw_perf_select_cstm_cntrs; /* Data for HWPerf configure Directly Addressable blocks */ struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks; /* Data for core clock speed change */ struct rogue_fwif_coreclkspeedchange_data
core_clk_speed_change_data; /* Feedback for Z/S Buffer backing/unbacking */ struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data; /* Feedback for Freelist grow/shrink */ struct rogue_fwif_freelist_gs_data free_list_gs_data; /* Feedback for Freelists reconstruction*/ struct rogue_fwif_freelists_reconstruction_data
free_lists_reconstruction_data; /* Data for custom register configuration */ struct rogue_fwif_regconfig_data reg_config_data; /* Data for informing the FW about the write offset update */ struct rogue_fwif_write_offset_update_data
write_offset_update_data; /* Data for setting the max frequency/OPP */ struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data; /* Data for setting the min frequency/OPP */ struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data; /* Data for updating the Guest Online states */ struct rogue_fwif_os_state_change_data cmd_os_online_state_data; /* Dev address for TBI buffer allocated on demand */
u32 tbi_buffer_fw_addr; /* Data for dumping of register ranges */ struct rogue_fwif_counter_dump_data counter_dump_config_data; /* Data for signalling all unmet fences for a given CCB */ struct rogue_fwif_kccb_cmd_force_update_data force_update_data;
} cmd_data __aligned(8);
} __aligned(8);
/* 1 if a page fault happened */ #define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0) /* 1 if applicable to all contexts */ #define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1)
struct rogue_fwif_fwccb_cmd_context_reset_data { /* Context affected by the reset */
u32 server_common_context_id; /* Reason for reset */ enum rogue_context_reset_reason reset_reason; /* Data Master affected by the reset */
u32 dm; /* Job ref running at the time of reset */
u32 reset_job_ref; /* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */
u32 flags; /* At what page catalog address */
aligned_u64 pc_address; /* Page fault address (only when applicable) */
aligned_u64 fault_address;
};
/* Notifies host of a FW pagefault */
ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION =
112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
};
enum rogue_fwif_fwccb_cmd_update_stats_type { /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32TotalNumPartialRenders stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1, /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32TotalNumOutOfMemory stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32NumGeomStores stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES, /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32NumFragStores stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES, /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32NumCDMStores stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /* * PVRSRVStatsUpdateRenderContextStats should increase the value of the * ui32NumTDMStores stat
*/
ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES
};
struct rogue_fwif_fwccb_cmd_update_stats_data { /* Element to update */ enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update; /* The pid of the process whose stats are being updated */
u32 pid_owner; /* Adjustment to be made to the statistic */
s32 adjustment_value;
};
/* ****************************************************************************** * Workload estimation Firmware CCB command structure for ROGUE ******************************************************************************
*/ struct rogue_fwif_workest_fwccb_cmd { /* Index for return data array */
u16 return_data_index; /* The cycles the workload took on the hardware */
u32 cycles_taken;
};
/* ****************************************************************************** * Client CCB commands for ROGUE ******************************************************************************
*/
/* * Required memory alignment for 64-bit variables accessible by Meta * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared * between the host and meta that contains 64-bit variables has to maintain * this alignment)
*/ #define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64)
/* Leave a gap between CCB specific commands and generic commands */ #define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \
(214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) /* * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The * padding code with the CCB wrap upsets the FW if we don't have the task type * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
*/ #define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \
(217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \
(218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) #define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \
(219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
struct rogue_fwif_workest_kick_data { /* Index for the KM Workload estimation return data array */
u16 return_data_index __aligned(8); /* Predicted time taken to do the work in cycles */
u32 cycles_prediction __aligned(8); /* Deadline for the workload */
aligned_u64 deadline;
};
struct rogue_fwif_ccb_cmd_header {
u32 cmd_type;
u32 cmd_size; /* * external job reference - provided by client and used in debug for * tracking submitted work
*/
u32 ext_job_ref; /* * internal job reference - generated by services and used in debug for * tracking submitted work
*/
u32 int_job_ref; /* Workload Estimation - Workload Estimation Data */ struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8);
};
/* ****************************************************************************** * Client CCB commands which are only required by the kernel ******************************************************************************
*/ struct rogue_fwif_cmd_priority {
s32 priority;
};
/* ****************************************************************************** * Signature and Checksums Buffer ******************************************************************************
*/ struct rogue_fwif_sigbuf_ctl { /* Ptr to Signature Buffer memory */
u32 buffer_fw_addr; /* Amount of space left for storing regs in the buffer */
u32 left_size_in_regs;
} __aligned(8);
struct rogue_fwif_counter_dump_ctl { /* Ptr to counter dump buffer */
u32 buffer_fw_addr; /* Amount of space for storing in the buffer */
u32 size_in_dwords;
} __aligned(8);
struct rogue_fwif_firmware_gcov_ctl { /* Ptr to firmware gcov buffer */
u32 buffer_fw_addr; /* Amount of space for storing in the buffer */
u32 size;
} __aligned(8);
/* * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the * following define should be increased by 1 to indicate to the compatibility * logic that layout has changed.
*/ #define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3
struct rogue_fwif_compchecks_bvnc { /* WARNING: This field must be defined as first one in this structure */
u32 layout_version;
aligned_u64 bvnc;
} __aligned(8);
/* ****************************************************************************** * Updated configuration post FW data init. ******************************************************************************
*/ struct rogue_fwif_runtime_cfg { /* APM latency in ms before signalling IDLE to the host */
u32 active_pm_latency_ms; /* Compatibility and other flags */
u32 runtime_cfg_flags; /* * If set, APM latency does not reset to system default each GPU power * transition
*/ bool active_pm_latency_persistant __aligned(4); /* Core clock speed, currently only used to calculate timer ticks */
u32 core_clock_speed; /* Last number of dusts change requested by the host */
u32 default_dusts_num_init; /* Periodic Hardware Reset configuration values */
u32 phr_mode; /* New number of milliseconds C/S is allowed to last */
u32 hcs_deadline_ms; /* The watchdog period in microseconds */
u32 wdg_period_us; /* Array of priorities per OS */
u32 osid_priority[ROGUE_FW_MAX_NUM_OS]; /* On-demand allocated HWPerf buffer address, to be passed to the FW */
u32 hwperf_buf_fw_addr;
bool padding __aligned(4);
};
/* ***************************************************************************** * Control data for ROGUE *****************************************************************************
*/
enum rogue_fwif_gpio_val_mode { /* No GPIO validation */
ROGUE_FWIF_GPIO_VAL_OFF = 0, /* * Simple test case that initiates by sending data via the GPIO and then * sends back any data received over the GPIO
*/
ROGUE_FWIF_GPIO_VAL_GENERAL = 1, /* * More complex test case that writes and reads data across the entire * GPIO AP address range.
*/
ROGUE_FWIF_GPIO_VAL_AP = 2, /* Validates the GPIO Testbench. */
ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5, /* Send and then receive each byte in the range 0-255. */
ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6, /* Send and then receive each power-of-2 byte in the range 0-255. */
ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7,
ROGUE_FWIF_GPIO_VAL_LAST
};
/* * Kernel CCB return slot responses. Usage of bit-fields instead of bare * integers allows FW to possibly pack-in several responses for each single kCCB * command.
*/ /* Command executed (return status from FW) */ #define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0) /* A cleanup was requested but resource busy */ #define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1) /* Poll failed in FW for a HW operation to complete */ #define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2) /* Reset value of a kCCB return slot (set by host) */ #define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U
/* * FW Pointer to memory containing core clock rate in Hz. * Firmware (PDVFS) updates the memory when running on non primary FW * thread to communicate to host driver.
*/
u32 core_clock_rate_fw_addr;
/* * Utility variable used to convert CR timer deltas to OS timer deltas * (nS), where the deltas are relative to the timestamps above: * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below
*/
aligned_u64 cr_delta_to_os_delta_kns;
/* * The following macros are used to help converting FW timestamps to the Host * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of * time; it increments by 1 every 256 GPU clock ticks, so the general * formula to perform the conversion is: * * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, * otherwise if (scale == 10^6) then deltaOS is in uS ] * * deltaCR * 256 256 * scale * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] * GPUclockspeed GPUclockspeed * * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) * to get some better accuracy and to avoid returning 0 in the integer * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. * This is the same as keeping K as a decimal number. * * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies * (deltaCR * K is more or less a constant), and it's relative to the base * OS timestamp sampled as a part of the timer correlation data. * This base is refreshed on GPU power-on, DVFS transition and periodic * frequency calibration (executed every few seconds if the FW is doing * some work), so as long as the GPU is doing something and one of these * events is triggered then deltaCR * K will not overflow and deltaOS will be * correct.
*/
/* * The OS timestamps computed by the FW are approximations of the real time, * which means they could be slightly behind or ahead the real timer on the * Host. In some cases we can perform subtractions between FW approximated * timestamps and real OS timestamps, so we need a form of protection against * negative results if for instance the FW one is a bit ahead of time.
*/ #define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \
(((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
/* * The timer correlation array must be big enough to ensure old entries won't be * overwritten before all the HWPerf events linked to those entries are * processed by the MISR. The update frequency of this array depends on how fast
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.