/* Wait for bit to be self cleared */ return cdns_set_wait(cdns, offset, value, 0);
}
/* * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL * need to be confirmed with a write to MCP_CONFIG_UPDATE
*/ staticint cdns_config_update(struct sdw_cdns *cdns)
{ int ret;
if (sdw_cdns_is_clock_stop(cdns)) {
dev_err(cdns->dev, "Cannot program MCP_CONFIG_UPDATE in ClockStopMode\n"); return -EINVAL;
}
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
CDNS_MCP_CONFIG_UPDATE_BIT); if (ret < 0)
dev_err(cdns->dev, "Config update timedout\n");
/** * sdw_cdns_config_update_set_wait() - wait until configuration update bit is self-cleared * @cdns: Cadence instance
*/ int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns)
{ /* the hardware recommendation is to wait at least 300us */ return cdns_set_wait(cdns, CDNS_MCP_CONFIG_UPDATE,
CDNS_MCP_CONFIG_UPDATE_BIT, 0);
}
EXPORT_SYMBOL(sdw_cdns_config_update_set_wait);
staticint cdns_reg_show(struct seq_file *s, void *data)
{ struct sdw_cdns *cdns = s->private;
ssize_t ret; int num_ports; int i, j;
char *buf __free(kfree) = kzalloc(RD_BUF, GFP_KERNEL); if (!buf) return -ENOMEM;
ret = scnprintf(buf, RD_BUF, "Register Value\n");
ret += scnprintf(buf + ret, RD_BUF - ret, "\nMCP Registers\n"); /* 8 MCP registers */ for (i = CDNS_MCP_CONFIG; i <= CDNS_MCP_PHYCTRL; i += sizeof(u32))
ret += cdns_sprintf(cdns, buf, ret, i);
ret += scnprintf(buf + ret, RD_BUF - ret, "\nStatus & Intr Registers\n"); /* 13 Status & Intr registers (offsets 0x70 and 0x74 not defined) */ for (i = CDNS_MCP_STAT; i <= CDNS_MCP_FIFOSTAT; i += sizeof(u32))
ret += cdns_sprintf(cdns, buf, ret, i);
ret += scnprintf(buf + ret, RD_BUF - ret, "\nSSP & Clk ctrl Registers\n");
ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_SSP_CTRL0);
ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_SSP_CTRL1);
ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_CLK_CTRL0);
ret += cdns_sprintf(cdns, buf, ret, CDNS_MCP_CLK_CTRL1);
for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret, "\nDP-%d\n", i); for (j = CDNS_DPN_B0_CONFIG(i);
j < CDNS_DPN_B0_ASYNC_CTRL(i); j += sizeof(u32))
ret += cdns_sprintf(cdns, buf, ret, j);
}
ret += scnprintf(buf + ret, RD_BUF - ret, "\nDPn B1 Registers\n"); for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret, "\nDP-%d\n", i);
for (j = CDNS_DPN_B1_CONFIG(i);
j < CDNS_DPN_B1_ASYNC_CTRL(i); j += sizeof(u32))
ret += cdns_sprintf(cdns, buf, ret, j);
}
ret += scnprintf(buf + ret, RD_BUF - ret, "\nDPn Control Registers\n"); for (i = 0; i < num_ports; i++)
ret += cdns_sprintf(cdns, buf, ret,
CDNS_PORTCTRL + i * CDNS_PORT_OFFSET);
/* * Resume Master device. If this results in a bus reset, the * Slave devices will re-attach and be re-enumerated.
*/
ret = pm_runtime_resume_and_get(bus->dev); if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(cdns->dev, "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret); return ret;
}
/* * wait long enough for Slave(s) to be in steady state. This * does not need to be super precise.
*/
msleep(200);
/* * Take the bus lock here to make sure that any bus transactions * will be queued while we inject a parity error on a dummy read
*/
mutex_lock(&bus->bus_lock);
/* program hardware to inject parity error */
cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR);
/* commit changes */
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT); if (ret < 0) goto unlock;
/* do a broadcast dummy read to avoid bus clashes */
ret = sdw_bread_no_pm_unlocked(&cdns->bus, 0xf, SDW_SCP_DEVID_0);
dev_info(cdns->dev, "parity error injection, read: %d\n", ret);
/* program hardware to disable parity error */
cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
0);
/* commit changes */
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT); if (ret < 0) goto unlock;
/* Userspace changed the hardware state behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
unlock: /* Continue bus operation with parity error injection disabled */
mutex_unlock(&bus->bus_lock);
/* * allow Master device to enter pm_runtime suspend. This may * also result in Slave devices suspending.
*/
pm_runtime_mark_last_busy(bus->dev);
pm_runtime_put_autosuspend(bus->dev);
/* * IO Calls
*/ staticenum sdw_command_response
cdns_fill_msg_resp(struct sdw_cdns *cdns, struct sdw_msg *msg, int count, int offset)
{ int nack = 0, no_ack = 0; int i;
/* check message response */ for (i = 0; i < count; i++) { if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
no_ack = 1;
dev_vdbg(cdns->dev, "Msg Ack not received, cmd %d\n", i);
} if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
nack = 1;
dev_err_ratelimited(cdns->dev, "Msg NACK received, cmd %d\n", i);
}
}
if (nack) {
dev_err_ratelimited(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num); return SDW_CMD_FAIL;
}
if (no_ack) {
dev_dbg_ratelimited(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num); return SDW_CMD_IGNORED;
}
if (msg->flags == SDW_MSG_FLAG_READ) { /* fill response */ for (i = 0; i < count; i++)
msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA,
cdns->response_buf[i]);
}
return SDW_CMD_OK;
}
staticvoid cdns_read_response(struct sdw_cdns *cdns)
{
u32 num_resp, cmd_base; int i;
/* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
for (i = 0; i < num_resp; i++) {
cdns->response_buf[i] = cdns_ip_readl(cdns, cmd_base);
cmd_base += CDNS_MCP_CMD_WORD_LEN;
}
}
staticenum sdw_command_response
_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, int offset, int count, bool defer)
{ unsignedlong time;
u32 base, i, data;
u16 addr;
/* Program the watermark level for RX FIFO */ if (cdns->msg_count != count) {
cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, count);
cdns->msg_count = count;
}
base = CDNS_IP_MCP_CMD_BASE;
addr = msg->addr + offset;
for (i = 0; i < count; i++) {
data = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
data |= FIELD_PREP(CDNS_MCP_CMD_COMMAND, cmd);
data |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, addr);
addr++;
if (msg->flags == SDW_MSG_FLAG_WRITE)
data |= msg->buf[i + offset];
data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync);
cdns_ip_writel(cdns, base, data);
base += CDNS_MCP_CMD_WORD_LEN;
}
if (defer) return SDW_CMD_OK;
/* wait for timeout or response */
time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT)); if (!time) {
dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
cmd, msg->dev_num, msg->addr, msg->len);
msg->len = 0;
/* Drain anything in the RX_FIFO */
cdns_read_response(cdns);
base = CDNS_IP_MCP_CMD_BASE;
cdns_ip_writel(cdns, base, data[0]);
base += CDNS_MCP_CMD_WORD_LEN;
cdns_ip_writel(cdns, base, data[1]);
time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT)); if (!time) {
dev_err(cdns->dev, "SCP Msg trf timed out\n");
msg->len = 0; return SDW_CMD_TIMEOUT;
}
/* check response the writes */ for (i = 0; i < 2; i++) { if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
no_ack = 1;
dev_err(cdns->dev, "Program SCP Ack not received\n"); if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
nack = 1;
dev_err(cdns->dev, "Program SCP NACK received\n");
}
}
}
/* For NACK, NO ack, don't return err if we are in Broadcast mode */ if (nack) {
dev_err_ratelimited(cdns->dev, "SCP_addrpage NACKed for Slave %d\n", msg->dev_num); return SDW_CMD_FAIL;
}
if (no_ack) {
dev_dbg_ratelimited(cdns->dev, "SCP_addrpage ignored for Slave %d\n", msg->dev_num); return SDW_CMD_IGNORED;
}
return SDW_CMD_OK;
}
staticint cdns_prep_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int *cmd)
{ int ret;
if (msg->page) {
ret = cdns_program_scp_addr(cdns, msg); if (ret) {
msg->len = 0; return ret;
}
}
switch (msg->flags) { case SDW_MSG_FLAG_READ:
*cmd = CDNS_MCP_CMD_READ; break;
case SDW_MSG_FLAG_WRITE:
*cmd = CDNS_MCP_CMD_WRITE; break;
ret = cdns_prep_msg(cdns, msg, &cmd); if (ret) return SDW_CMD_FAIL_OTHER;
for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
CDNS_MCP_CMD_LEN, false); if (ret != SDW_CMD_OK) return ret;
}
if (!(msg->len % CDNS_MCP_CMD_LEN)) return SDW_CMD_OK;
/* * check that there was a single reported Slave status and when * there is not use the latest status extracted from PING commands
*/ if (set_status != 1) {
val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
val >>= (i * 2);
switch (val & 0x3) { case 0:
status[i] = SDW_SLAVE_UNATTACHED; break; case 1:
status[i] = SDW_SLAVE_ATTACHED; break; case 2:
status[i] = SDW_SLAVE_ALERT; break; case 3: default:
status[i] = SDW_SLAVE_RESERVED; break;
}
}
}
if (is_slave) { int ret;
mutex_lock(&cdns->status_update_lock);
ret = sdw_handle_slave_status(&cdns->bus, status);
mutex_unlock(&cdns->status_update_lock); return ret;
}
if (int_status & CDNS_MCP_INT_PARITY) { /* Parity error detected by Master */
dev_err_ratelimited(cdns->dev, "Parity error\n");
}
if (int_status & CDNS_MCP_INT_CTRL_CLASH) { /* Slave is driving bit slot during control word */
dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
}
if (int_status & CDNS_MCP_INT_DATA_CLASH) { /* * Multiple slaves trying to drive bit slot, or issue with * ownership of data bits or Slave gone bonkers
*/
dev_err_ratelimited(cdns->dev, "Bus clash for data word\n");
}
/* just log which ports report an error */
port_intstat = cdns_readl(cdns, CDNS_MCP_PORT_INTSTAT);
dev_err_ratelimited(cdns->dev, "DP interrupt: PortIntStat %8x\n",
port_intstat);
/* clear status w/ write1 */
cdns_writel(cdns, CDNS_MCP_PORT_INTSTAT, port_intstat);
}
if (int_status & CDNS_MCP_INT_SLAVE_MASK) { /* Mask the Slave interrupt and wake thread */
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, 0);
int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
/* * Deal with possible race condition between interrupt * handling and disabling interrupts on suspend. * * If the master is in the process of disabling * interrupts, don't schedule a workqueue
*/ if (cdns->interrupt_enabled)
schedule_work(&cdns->work);
}
for (i = 0; i <= SDW_MAX_DEVICES; i++) {
status[i] = val & 0x3; if (status[i])
dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]);
val >>= 2;
}
mutex_lock(&cdns->status_update_lock);
ret = sdw_handle_slave_status(&cdns->bus, status);
mutex_unlock(&cdns->status_update_lock); if (ret < 0)
dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret);
}
/** * cdns_update_slave_status_work - update slave status in a work since we will need to handle * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave * process. * @work: cdns worker thread
*/ staticvoid cdns_update_slave_status_work(struct work_struct *work)
{ struct sdw_cdns *cdns =
container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
u64 slave_intstat;
u32 device0_status; int retry_count = 0;
/* * Clear main interrupt first so we don't lose any assertions * that happen during this function.
*/
cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
/* * Clear the bits before handling so we don't lose any * bits that re-assert.
*/
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
/* combine the two status */
slave_intstat = ((u64)slave1 << 32) | slave0;
dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
/* * When there is more than one peripheral per link, it's * possible that a deviceB becomes attached after we deal with * the attachment of deviceA. Since the hardware does a * logical AND, the attachment of the second device does not * change the status seen by the driver. * * In that case, clearing the registers above would result in * the deviceB never being detected - until a change of status * is observed on the bus. * * To avoid this race condition, re-check if any device0 needs * attention with PING commands. There is no need to check for * ALERTS since they are not allowed until a non-zero * device_number is assigned. * * Do not clear the INTSTAT0/1. While looping to enumerate devices on * #0 there could be status changes on other devices - these must * be kept in the INTSTAT so they can be handled when all #0 devices * have been handled.
*/
/* the following bits should be cleared immediately */ if (ip_mcp_control & CDNS_IP_MCP_CONTROL_SW_RST)
dev_err(cdns->dev, "%s failed: IP_MCP_CONTROL_SW_RST is not cleared\n", string);
mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL);
/* the following bits should be cleared immediately */ if (mcp_control & CDNS_MCP_CONTROL_CMD_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CMD_RST is not cleared\n", string); if (mcp_control & CDNS_MCP_CONTROL_SOFT_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_SOFT_RST is not cleared\n", string); if (mcp_control & CDNS_MCP_CONTROL_CLK_STOP_CLR)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CLK_STOP_CLR is not cleared\n", string);
mcp_config_update = cdns_readl(cdns, CDNS_MCP_CONFIG_UPDATE); if (mcp_config_update & CDNS_MCP_CONFIG_UPDATE_BIT)
dev_err(cdns->dev, "%s failed: MCP_CONFIG_UPDATE_BIT is not cleared\n", string);
i = 0; while (mcp_control & CDNS_MCP_CONTROL_HW_RST) { if (i == reset_iterations) {
dev_err(cdns->dev, "%s failed: MCP_CONTROL_HW_RST is not cleared\n", string); break;
}
dev_dbg(cdns->dev, "%s: MCP_CONTROL_HW_RST is not cleared at iteration %d\n", string, i);
i++;
/* * Complete any on-going status updates before updating masks, * and cancel queued status updates. * * There could be a race with a new interrupt thrown before * the 3 mask updates below are complete, so in the interrupt * we use the 'interrupt_enabled' status to prevent new work * from being queued.
*/ if (!state)
cancel_work_sync(&cdns->work);
/* Set frame shape base on the actual bus frequency. */
prop->default_col = bus->params.curr_dr_freq /
prop->default_frame_rate / prop->default_row;
/* * Frame shape changes after initialization have to be done * with the bank switch mechanism
*/
val = cdns_set_initial_frame_shape(prop->default_row,
prop->default_col);
cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, val);
/* Set SSP interval to default value */
ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
return 0;
}
/** * sdw_cdns_soft_reset() - Cadence soft-reset * @cdns: Cadence instance
*/ int sdw_cdns_soft_reset(struct sdw_cdns *cdns)
{ int ret;
/* use port params if there is no loopback, otherwise use source as is */ if (!override) {
u32p_replace_bits(&dpn_config, p_params->bps - 1, CDNS_DPN_CONFIG_WL);
u32p_replace_bits(&dpn_config, p_params->flow_mode, CDNS_DPN_CONFIG_PORT_FLOW);
u32p_replace_bits(&dpn_config, p_params->data_mode, CDNS_DPN_CONFIG_PORT_DAT);
}
staticint cdns_transport_params(struct sdw_bus *bus, struct sdw_transport_params *t_params, enum sdw_reg_bank bank)
{ struct sdw_cdns *cdns = bus_to_cdns(bus); int dpn_config; int dpn_config_off_source; int dpn_config_off_target; int dpn_hctrl; int dpn_hctrl_off_source; int dpn_hctrl_off_target; int dpn_offsetctrl; int dpn_offsetctrl_off_source; int dpn_offsetctrl_off_target; int dpn_samplectrl; int dpn_samplectrl_off_source; int dpn_samplectrl_off_target; int source_num = t_params->port_num; int target_num = t_params->port_num; bool override = false;
/* Check suspend status */ if (sdw_cdns_is_clock_stop(cdns)) {
dev_dbg(cdns->dev, "Clock is already stopped\n"); return 0;
}
/* * Before entering clock stop we mask the Slave * interrupts. This helps avoid having to deal with e.g. a * Slave becoming UNATTACHED while the clock is being stopped
*/
cdns_enable_slave_interrupts(cdns, false);
/* * For specific platforms, it is required to be able to put * master into a state in which it ignores wake-up trials * in clock stop state
*/ if (block_wake)
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP,
CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP);
/* commit changes */
ret = cdns_config_update(cdns); if (ret < 0) {
dev_err(cdns->dev, "%s: config_update failed\n", __func__); return ret;
}
/* Prepare slaves for clock stop */ if (slave_present) {
ret = sdw_bus_prep_clk_stop(&cdns->bus); if (ret < 0 && ret != -ENODATA) {
dev_err(cdns->dev, "prepare clock stop failed %d\n", ret); return ret;
}
}
/* * Enter clock stop mode and only report errors if there are * Slave devices present (ALERT or ATTACHED)
*/
ret = sdw_bus_clk_stop(&cdns->bus); if (ret < 0 && slave_present && ret != -ENODATA) {
dev_err(cdns->dev, "bus clock stop failed %d\n", ret); return ret;
}
ret = cdns_set_wait(cdns, CDNS_MCP_STAT,
CDNS_MCP_STAT_CLK_STOP,
CDNS_MCP_STAT_CLK_STOP); if (ret < 0)
dev_err(cdns->dev, "Clock stop failed %d\n", ret);
return ret;
}
EXPORT_SYMBOL(sdw_cdns_clock_stop);
/** * sdw_cdns_clock_restart: Cadence PM clock restart configuration routine * * @cdns: Cadence instance * @bus_reset: context may be lost while in low power modes and the bus * may require a Severe Reset and re-enumeration after a wake.
*/ int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
{ int ret;
/* unmask Slave interrupts that were masked when stopping the clock */
cdns_enable_slave_interrupts(cdns, true);
ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
CDNS_MCP_CONTROL_CLK_STOP_CLR); if (ret < 0) {
dev_err(cdns->dev, "Couldn't exit from clock stop\n"); return ret;
}
int cdns_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
{ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_cdns_dai_runtime *dai_runtime;
dai_runtime = cdns->dai_runtime_array[dai->id];
if (stream) { /* first paranoia check */ if (dai_runtime) {
dev_err(dai->dev, "dai_runtime already allocated for dai %s\n",
dai->name); return -EINVAL;
}
/* allocate and set dai_runtime info */
dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL); if (!dai_runtime) return -ENOMEM;
cdns->dai_runtime_array[dai->id] = dai_runtime;
} else { /* second paranoia check */ if (!dai_runtime) {
dev_err(dai->dev, "dai_runtime not allocated for dai %s\n",
dai->name); return -EINVAL;
}
/* for NULL stream we release allocated dai_runtime */
kfree(dai_runtime);
cdns->dai_runtime_array[dai->id] = NULL;
} return 0;
}
EXPORT_SYMBOL(cdns_set_sdw_stream);
/** * cdns_find_pdi() - Find a free PDI * * @cdns: Cadence instance * @num: Number of PDIs * @pdi: PDI instances * @dai_id: DAI id * * Find a PDI for a given PDI array. The PDI num and dai_id are * expected to match, return NULL otherwise.
*/ staticstruct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns, unsignedint num, struct sdw_cdns_pdi *pdi, int dai_id)
{ int i;
for (i = 0; i < num; i++) if (pdi[i].num == dai_id) return &pdi[i];
return NULL;
}
/** * sdw_cdns_config_stream: Configure a stream * * @cdns: Cadence instance * @ch: Channel count * @dir: Data direction * @pdi: PDI to be used
*/ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
if (dir == SDW_DATA_DIR_RX) {
val = CDNS_PORTCTRL_DIRN;
/* The DataPort0 needs to be mapped to both PDI0 and PDI1 ! */ if (pdi->num == 1)
val = 0; else
val = pdi->num;
val |= CDNS_PDI_CONFIG_SOFT_RESET;
val |= FIELD_PREP(CDNS_PDI_CONFIG_CHANNEL, (1 << ch) - 1);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
/** * sdw_cdns_alloc_pdi() - Allocate a PDI * * @cdns: Cadence instance * @stream: Stream to be allocated * @ch: Channel count * @dir: Data direction * @dai_id: DAI id
*/ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns, struct sdw_cdns_streams *stream,
u32 ch, u32 dir, int dai_id)
{ struct sdw_cdns_pdi *pdi = NULL;
#define SDW_CDNS_BRA_HDR 6 /* defined by MIPI */ #define SDW_CDNS_BRA_HDR_CRC 1 /* defined by MIPI */ #define SDW_CDNS_BRA_HDR_CRC_PAD 1 /* Cadence only */ #define SDW_CDNS_BRA_HDR_RESP 1 /* defined by MIPI */ #define SDW_CDNS_BRA_HDR_RESP_PAD 1 /* Cadence only */
#define SDW_CDNS_BRA_DATA_PAD 1 /* Cadence only */ #define SDW_CDNS_BRA_DATA_CRC 1 /* defined by MIPI */ #define SDW_CDNS_BRA_DATA_CRC_PAD 1 /* Cadence only */
#define SDW_CDNS_BRA_FOOTER_RESP 1 /* defined by MIPI */ #define SDW_CDNS_BRA_FOOTER_RESP_PAD 1 /* Cadence only */
actual_bpt_bytes = sdw_cdns_bra_actual_data_size(bpt_bytes); if (!actual_bpt_bytes) return -EINVAL;
if (data_bytes < actual_bpt_bytes)
actual_bpt_bytes = data_bytes;
/* * the caller may want to set the number of bytes per frame, * allow when possible
*/ if (requested_bytes_per_frame < actual_bpt_bytes)
actual_bpt_bytes = requested_bytes_per_frame;
*data_per_frame = actual_bpt_bytes;
if (command == 0) { /* * for writes we need to send all the data_bytes per frame, * even for the last frame which may only transport fewer bytes
*/
*pdi0_buffer_size = pdi0_tx_size * *num_frames;
*pdi1_buffer_size = pdi1_rx_size * *num_frames;
} else { /* * for reads we need to retrieve only what is requested in the BPT * header, so the last frame needs to be special-cased
*/
*num_frames = data_bytes / actual_bpt_bytes;
staticint sdw_cdns_copy_write_data(u8 *data, int data_size, u8 *dma_buffer, int dma_buffer_size)
{ /* * the implementation copies the data one byte at a time. Experiments with * two bytes at a time did not seem to improve the performance
*/ int i, j;
/* size check to prevent out of bounds access */
i = data_size - 1;
j = (2 * i) - (i & 1); if (data_size & 1)
j++;
j += 2; if (j >= dma_buffer_size) return -EINVAL;
/* copy data */ for (i = 0; i < data_size; i++) {
j = (2 * i) - (i & 1);
dma_buffer[j] = data[i];
} /* add required pad */ if (data_size & 1)
dma_buffer[++j] = 0; /* skip last two bytes */
j += 2;
/* offset and data are off-by-one */ return j + 1;
}
/* tag last byte */
last_byte = dma_buffer - 1;
last_byte[0] = BIT(6);
return 0;
}
#define CDNS_BPT_ROLLING_COUNTER_START 1
int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, int data_size, int data_per_frame, u8 *dma_buffer, int dma_buffer_size, int *dma_buffer_total_bytes)
{ int total_dma_data_written = 0;
u8 *p_dma_buffer = dma_buffer;
u8 header[SDW_CDNS_BRA_HDR]; int dma_data_written;
u8 *p_data = data;
u8 counter; int ret;
counter = CDNS_BPT_ROLLING_COUNTER_START;
header[0] = BIT(1); /* write command: BIT(1) set */
header[0] |= GENMASK(7, 6); /* header is active */
header[0] |= (dev_num << 2);
int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_size, int data_per_frame, u8 *dma_buffer, int dma_buffer_size, int *dma_buffer_total_bytes)
{ int total_dma_data_written = 0;
u8 *p_dma_buffer = dma_buffer;
u8 header[SDW_CDNS_BRA_HDR]; int dma_data_written;
u8 counter; int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.