/* * If we are to assume that the GMU firmware is in fact a rational actor * and is programmed to not send us a larger response than we expect * then we can also assume that if the header size is unexpectedly large * that it is due to memory corruption and/or hardware failure. In this * case the only reasonable course of action is to BUG() to help harden * the failure.
*/
BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
data[i] = queue->data[index];
index = (index + 1) % header->size;
}
if (!gmu->legacy)
index = ALIGN(index, 4) % header->size;
for (i = 0; i < dwords; i++) {
queue->data[index] = data[i];
index = (index + 1) % header->size;
}
/* Cookify any non used data at the end of the write buffer */ if (!gmu->legacy) { for (; index % 4; index = (index + 1) % header->size)
queue->data[index] = 0xfafafafa;
}
ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum); if (ret) return ret;
for (;;) { struct a6xx_hfi_msg_response resp;
/* Get the next packet */
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, sizeof(resp) >> 2);
/* If the queue is empty, there may have been previous missed * responses that preceded the response to our packet. Wait * further before we give up.
*/ if (!ret) {
ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum); if (ret) {
DRM_DEV_ERROR(gmu->dev, "The HFI response queue is unexpectedly empty\n"); return ret;
} continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
DRM_DEV_ERROR(gmu->dev, "Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header)); continue;
}
if (resp.error) {
DRM_DEV_ERROR(gmu->dev, "Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error); return -EINVAL;
}
/* All is well, copy over the buffer */ if (payload && payload_size)
memcpy(payload, resp.payload,
min_t(u32, payload_size, sizeof(resp.payload)));
for (i = 0; i < GMU_MAX_BCMS; i++) { if (!info->bcms[i].name) break;
msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
}
msg->ddr_cmds_num = i;
for (i = 0; i < gmu->nr_gpu_bws; ++i) for (j = 0; j < msg->ddr_cmds_num; j++)
msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
msg->bw_level_num = gmu->nr_gpu_bws;
/* Compute the wait bitmask with each BCM having the commit bit */
msg->ddr_wait_bitmask = 0; for (j = 0; j < msg->ddr_cmds_num; j++) if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
msg->ddr_wait_bitmask |= BIT(j);
/* * These are the CX (CNOC) votes - these are used by the GMU * The 'CN0' BCM is used on all targets, and votes are basically * 'off' and 'on' states with first bit to enable the path.
*/
/* Compute the wait bitmask with each BCM having the commit bit */
msg->cnoc_wait_bitmask = 0; for (j = 0; j < msg->cnoc_cmds_num; j++) if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
msg->cnoc_wait_bitmask |= BIT(j);
}
staticvoid a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 3;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* * Send a single "off" entry just to get things running * TODO: bus scaling
*/
msg->bw_level_num = 1;
/* * These are the CX (CNOC) votes - these are used by the GMU but the * votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
staticvoid a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
msg->bw_level_num = 1;
staticint a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
{ int ret;
ret = a6xx_hfi_send_gmu_init(gmu, boot_state); if (ret) return ret;
ret = a6xx_hfi_get_fw_version(gmu, NULL); if (ret) return ret;
/* * We have to get exchange version numbers per the sequence but at this * point th kernel driver doesn't need to know the exact version of * the GMU firmware
*/
ret = a6xx_hfi_send_perf_table_v1(gmu); if (ret) return ret;
ret = a6xx_hfi_send_bw_table(gmu); if (ret) return ret;
/* * Let the GMU know that there won't be any more HFI messages until next * boot
*/
a6xx_hfi_send_test(gmu);
return 0;
}
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
{ int ret;
if (gmu->legacy) return a6xx_hfi_start_v1(gmu, boot_state);
ret = a6xx_hfi_send_perf_table(gmu); if (ret) return ret;
ret = a6xx_hfi_send_bw_table(gmu); if (ret) return ret;
ret = a6xx_hfi_enable_acd(gmu); if (ret) return ret;
ret = a6xx_hfi_send_core_fw_start(gmu); if (ret) return ret;
/* * Downstream driver sends this in its "a6xx_hw_init" equivalent, * but seems to be no harm in sending it here
*/
ret = a6xx_hfi_send_start(gmu); if (ret) return ret;
return 0;
}
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
{ int i;
for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { struct a6xx_hfi_queue *queue = &gmu->queues[i];
if (!queue->header) continue;
if (queue->header->read_index != queue->header->write_index)
DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
/* * The table size is the size of the table header plus all of the queue * headers
*/
table_size = sizeof(*table);
table_size += (ARRAY_SIZE(gmu->queues) * sizeof(struct a6xx_hfi_queue_header));
table->version = 0;
table->size = table_size; /* First queue header is located immediately after the table header */
table->qhdr0_offset = sizeof(*table) >> 2;
table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
table->num_queues = ARRAY_SIZE(gmu->queues);
table->active_queues = ARRAY_SIZE(gmu->queues);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.