// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) // // This file is provided under a dual BSD/GPLv2 license. When using or // redistributing this file, you may do so under either license. // // Copyright(c) 2018 Intel Corporation // // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> // Rander Wang <rander.wang@intel.com> // Keyon Jie <yang.jie@linux.intel.com> //
/* * Hardware interface for generic Intel audio DSP HDA IP
*/
/* * set up one of BDL entries for a stream
*/ staticint hda_setup_bdle(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab, struct hdac_stream *hstream, struct sof_intel_dsp_bdl **bdlp, int offset, int size, int ioc)
{ struct hdac_bus *bus = sof_to_bus(sdev); struct sof_intel_dsp_bdl *bdl = *bdlp;
addr = snd_sgbuf_get_addr(dmab, offset); /* program BDL addr */
bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
bdl->addr_h = cpu_to_le32(upper_32_bits(addr)); /* program BDL size */
chunk = snd_sgbuf_get_chunk_size(dmab, offset, size); /* one BDLE should not cross 4K boundary */ if (bus->align_bdle_4k) {
u32 remain = 0x1000 - (offset & 0xfff);
if (chunk > remain)
chunk = remain;
}
bdl->size = cpu_to_le32(chunk); /* only program IOC when the whole segment is processed */
size -= chunk;
bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
bdl++;
hstream->frags++;
offset += chunk;
}
*bdlp = bdl; return offset;
}
/* * set up Buffer Descriptor List (BDL) for host memory transfer * BDL describes the location of the individual buffers and is little endian.
*/ int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab, struct hdac_stream *hstream)
{ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; struct sof_intel_dsp_bdl *bdl; int i, offset, period_bytes, periods; int remain, ioc;
/* * HDA spec demands that the LVI value must be at least one * before the DMA operation can begin. This means that there * must be at least two BDLE present for the transfer. * * If the buffer is not a single continuous area then the * hda_setup_bdle() will create multiple BDLEs for each segment. * If the memory is a single continuous area, force it to be * split into two 'periods', otherwise the transfer will be * split to multiple BDLE for each chunk in hda_setup_bdle() * * Note: period_bytes == 0 can only happen for firmware or * library loading. The data size is 4K aligned, which ensures * that the second chunk's start address will be 128-byte * aligned.
*/ if (chunk_size == hstream->bufsize)
period_bytes /= 2;
}
periods = hstream->bufsize / period_bytes;
dev_dbg(sdev->dev, "periods: %d\n", periods);
remain = hstream->bufsize % period_bytes; if (remain)
periods++;
/* program the initial BDL entries */
bdl = (struct sof_intel_dsp_bdl *)hstream->bdl.area;
offset = 0;
hstream->frags = 0;
/* * set IOC if don't use position IPC * and period_wakeup needed.
*/
ioc = hda->no_ipc_position ?
!hstream->no_period_wakeup : 0;
for (i = 0; i < periods; i++) { if (i == (periods - 1) && remain) /* set the last small entry */
offset = hda_setup_bdle(sdev, dmab,
hstream, &bdl, offset,
remain, 0); else
offset = hda_setup_bdle(sdev, dmab,
hstream, &bdl, offset,
period_bytes, ioc);
}
return offset;
}
int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream, int enable, u32 size)
{ struct hdac_stream *hstream = &hext_stream->hstream;
u32 mask;
if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
dev_err(sdev->dev, "error: address of spib capability is NULL\n"); return -EINVAL;
}
mask = (1 << hstream->index);
/* enable/disable SPIB for the stream */
snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
enable << hstream->index);
/* set the SPIB value */
sof_io_write(sdev, hstream->spib_addr, size);
/* get an unused stream */
list_for_each_entry(s, &bus->stream_list, list) { if (s->direction == direction && !s->opened) {
hext_stream = stream_to_hdac_ext_stream(s);
hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
hext_stream); /* check if the host DMA channel is reserved */ if (hda_stream->host_reserved) continue;
s->opened = true; break;
}
}
spin_unlock_irq(&bus->reg_lock);
/* stream found ? */ if (!hext_stream) {
dev_err(sdev->dev, "error: no free %s streams\n", snd_pcm_direction_name(direction)); return hext_stream;
}
hda_stream->flags = flags;
/* * Prevent DMI Link L1 entry for streams that don't support it. * Workaround to address a known issue with host DMA that results * in xruns during pause/release in capture scenarios. This is not needed for the ACE IP.
*/ if (chip_info->hw_ip_version < SOF_INTEL_ACE_1_0 &&
!(flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
HDA_VS_INTEL_EM2,
HDA_VS_INTEL_EM2_L1SEN, 0);
hda->l1_disabled = true;
}
return hext_stream;
}
/* free a stream */ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
{ conststruct sof_intel_dsp_desc *chip_info = get_chip_info(sdev->pdata); struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; struct hdac_bus *bus = sof_to_bus(sdev); struct sof_intel_hda_stream *hda_stream; struct hdac_ext_stream *hext_stream; struct hdac_stream *s; bool dmi_l1_enable = true; bool found = false;
spin_lock_irq(&bus->reg_lock);
/* * close stream matching the stream tag and check if there are any open streams * that are DMI L1 incompatible.
*/
list_for_each_entry(s, &bus->stream_list, list) {
hext_stream = stream_to_hdac_ext_stream(s);
hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, hext_stream);
if (!s->opened) continue;
if (s->direction == direction && s->stream_tag == stream_tag) {
s->opened = false;
found = true;
} elseif (!(hda_stream->flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
dmi_l1_enable = false;
}
}
if (!found) {
dev_err(sdev->dev, "%s: stream_tag %d not opened!\n",
__func__, stream_tag); return -ENODEV;
}
return 0;
}
staticint hda_dsp_stream_reset(struct snd_sof_dev *sdev, struct hdac_stream *hstream)
{ int sd_offset = SOF_STREAM_SD_OFFSET(hstream); int timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
u32 val;
/* enter stream reset */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST,
SOF_STREAM_SD_OFFSET_CRST); do {
val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset); if (val & SOF_STREAM_SD_OFFSET_CRST) break;
} while (--timeout); if (timeout == 0) {
dev_err(sdev->dev, "timeout waiting for stream reset\n"); return -ETIMEDOUT;
}
timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
/* exit stream reset and wait to read a zero before reading any other register */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST, 0x0);
/* wait for hardware to report that stream is out of reset */
udelay(3); do {
val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset); if ((val & SOF_STREAM_SD_OFFSET_CRST) == 0) break;
} while (--timeout); if (timeout == 0) {
dev_err(sdev->dev, "timeout waiting for stream to exit reset\n"); return -ETIMEDOUT;
}
return 0;
}
int hda_dsp_stream_trigger(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream, int cmd)
{ struct hdac_stream *hstream = &hext_stream->hstream; int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
u32 dma_start = SOF_HDA_SD_CTL_DMA_START; int ret = 0;
u32 run;
/* cmd must be for audio stream */ switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!sdev->dspless_mode_selected) break;
fallthrough; case SNDRV_PCM_TRIGGER_START: if (hstream->running) break;
break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (!sdev->dspless_mode_selected) break;
fallthrough; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP:
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset,
SOF_HDA_SD_CTL_DMA_START |
SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
/* program last valid index */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_LVI,
0xffff, (hstream->frags - 1));
/* decouple host and link DMA, enable DSP features */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
mask, mask);
/* Follow HW recommendation to set the guardband value to 95us during FW boot */
snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
HDA_VS_INTEL_LTRP_GB_MASK, HDA_LTRP_GB_VALUE_US);
/* decouple host and link DMA if the DSP is used */ if (!sdev->dspless_mode_selected)
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
mask, mask);
ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream); if (ret < 0) {
dev_err(sdev->dev, "error: set up of BDL failed\n"); return ret;
}
/* program stream tag to set up stream descriptor for DMA */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
hstream->stream_tag <<
SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
/* * Recommended hardware programming sequence for HDAudio DMA format * on earlier platforms - this is not needed on newer platforms * * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit * for corresponding stream index before the time of writing * format to SDxFMT register. * 2. Write SDxFMT * 3. Set PPCTL.PROCEN bit for corresponding stream index to * enable decoupled mode
*/
if (!sdev->dspless_mode_selected && (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) /* couple host and link DMA, disable DSP features */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
mask, 0);
/* program stream format */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset +
SOF_HDA_ADSP_REG_SD_FORMAT,
0xffff, hstream->format_val);
if (!sdev->dspless_mode_selected && (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) /* decouple host and link DMA, enable DSP features */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
mask, mask);
/* program last valid index */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_LVI,
0xffff, (hstream->frags - 1));
spin_lock_irq(&bus->reg_lock); /* couple host and link DMA if link DMA channel is idle */ if (!hext_stream->link_locked)
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
SOF_HDA_REG_PP_PPCTL, mask, 0);
spin_unlock_irq(&bus->reg_lock);
}
active = true; if (!s->running) continue; if ((sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0) continue; if (!s->substream && !s->cstream) { /* * when no substream is found, the DMA may used for code loading * or data transfers which can rely on wait_for_completion()
*/ struct sof_intel_hda_stream *hda_stream; struct hdac_ext_stream *hext_stream;
/* Inform ALSA only if the IPC position is not used */ if (s->substream && sof_hda->no_ipc_position) {
snd_sof_pcm_period_elapsed(s->substream);
} elseif (s->cstream) {
hda_dsp_compr_bytes_transferred(s, s->cstream->direction);
snd_compr_fragment_elapsed(s->cstream);
}
}
}
/* * Loop 10 times to handle missed interrupts caused by * unsolicited responses from the codec
*/ for (i = 0, active = true; i < 10 && active; i++) {
spin_lock_irq(&bus->reg_lock);
status = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
/* check streams */
active = hda_dsp_stream_check(bus, status);
/* check and clear RIRB interrupt */ if (status & AZX_INT_CTRL_EN) {
active |= hda_codec_check_rirb_status(sdev);
}
spin_unlock_irq(&bus->reg_lock);
}
dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
num_playback, num_capture);
if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
dev_err(sdev->dev, "error: too many playback streams %d\n",
num_playback); return -EINVAL;
}
if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
dev_err(sdev->dev, "error: too many capture streams %d\n",
num_capture); return -EINVAL;
}
/* * mem alloc for the position buffer * TODO: check position buffer update
*/
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
SOF_HDA_DPIB_ENTRY_SIZE * num_total,
&bus->posbuf); if (ret < 0) {
dev_err(sdev->dev, "error: posbuffer dma alloc failed\n"); return -ENOMEM;
}
/* * mem alloc for the CORB/RIRB ringbuffers - this will be used only for * HDAudio codecs
*/
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
PAGE_SIZE, &bus->rb); if (ret < 0) {
dev_err(sdev->dev, "error: RB alloc failed\n"); return -ENOMEM;
}
/* create capture and playback streams */ for (i = 0; i < num_total; i++) { struct sof_intel_hda_stream *hda_stream;
hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
GFP_KERNEL); if (!hda_stream) return -ENOMEM;
/* do we support SPIB */ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
hstream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
SOF_HDA_SPIB_SPIB;
/* store total stream count (playback + capture) from GCAP */
sof_hda->stream_max = num_total;
/* store stream count from GCAP required for CHAIN_DMA */ if (sdev->pdata->ipc_type == SOF_IPC_TYPE_4) { struct sof_ipc4_fw_data *ipc4_data = sdev->private;
switch (sof_hda_position_quirk) { case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY: /* * This legacy code, inherited from the Skylake driver, * mixes DPIB registers and DPIB DDR updates and * does not seem to follow any known hardware recommendations. * It's not clear e.g. why there is a different flow * for capture and playback, the only information that matters is * what traffic class is used, and on all SOF-enabled platforms * only VC0 is supported so the work-around was likely not necessary * and quite possibly wrong.
*/
/* DPIB/posbuf position mode: * For Playback, Use DPIB register from HDA space which * reflects the actual data transferred. * For Capture, Use the position buffer for pointer, as DPIB * is not accurate enough, its update may be completed * earlier than the data written to DDR.
*/ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hstream->index));
} else { /* * For capture stream, we need more workaround to fix the * position incorrect issue: * * 1. Wait at least 20us before reading position buffer after * the interrupt generated(IOC), to make sure position update * happens on frame boundary i.e. 20.833uSec for 48KHz. * 2. Perform a dummy Read to DPIB register to flush DMA * position value. * 3. Read the DMA Position from posbuf. Now the readback * value should be >= period boundary.
*/ if (can_sleep)
usleep_range(20, 21);
snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hstream->index));
pos = snd_hdac_stream_get_pos_posbuf(hstream);
} break; case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS: /* * In case VC1 traffic is disabled this is the recommended option
*/
pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hstream->index)); break; case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE: /* * This is the recommended option when VC1 is enabled. * While this isn't needed for SOF platforms it's added for * consistency and debug.
*/
pos = snd_hdac_stream_get_pos_posbuf(hstream); break; default:
dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
sof_hda_position_quirk);
pos = 0; break;
}
/** * hda_dsp_get_stream_llp - Retrieve the LLP (Linear Link Position) of the stream * @sdev: SOF device * @component: ASoC component * @substream: PCM substream * * Returns the raw Linear Link Position value
*/
u64 hda_dsp_get_stream_llp(struct snd_sof_dev *sdev, struct snd_soc_component *component, struct snd_pcm_substream *substream)
{ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_pcm_runtime *be_rtd = NULL; struct hdac_ext_stream *hext_stream; struct snd_soc_dai *cpu_dai; struct snd_soc_dpcm *dpcm;
u32 llp_l, llp_u;
/* * The LLP needs to be read from the Link DMA used for this FE as it is * allowed to use any combination of Link and Host channels
*/
for_each_dpcm_be(rtd, substream->stream, dpcm) { if (dpcm->fe != rtd) continue;
be_rtd = dpcm->be;
}
if (!be_rtd) return 0;
cpu_dai = snd_soc_rtd_to_cpu(be_rtd, 0); if (!cpu_dai) return 0;
hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream); if (!hext_stream) return 0;
/* * The pplc_addr have been calculated during probe in * hda_dsp_stream_init(): * pplc_addr = sdev->bar[HDA_DSP_PP_BAR] + * SOF_HDA_PPLC_BASE + * SOF_HDA_PPLC_MULTI * total_stream + * SOF_HDA_PPLC_INTERVAL * stream_index * * Use this pre-calculated address to avoid repeated re-calculation.
*/
llp_l = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPL);
llp_u = readl(hext_stream->pplc_addr + AZX_REG_PPLCLLPU);
/* Compensate the LLP counter with the saved offset */ if (hext_stream->pplcllpl || hext_stream->pplcllpu) return merge_u64(llp_u, llp_l) -
merge_u64(hext_stream->pplcllpu, hext_stream->pplcllpl);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.