// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */
mdelay(10);
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from * continuing warm path and entering dead loop.
*/
ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10);
val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
/* A read clear register. clear the register to prevent * Q6 from entering wrong code path.
*/
val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
}
v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0;
for (i = 0; i < 10; i++) {
ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0;
mdelay(2);
}
ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
offset, v & mask, value);
return -ETIMEDOUT;
}
staticint ath11k_pci_fix_l1ss(struct ath11k_base *ab)
{ int ret;
ret = ath11k_pci_set_link_reg(ab,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret); return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); return ret;
}
return 0;
}
staticvoid ath11k_pci_enable_ltssm(struct ath11k_base *ab)
{
u32 val; int i;
val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { if (val == 0xffffffff)
mdelay(5);
ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
}
val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL;
ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
staticvoid ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
{ /* This is a WAR for PCIE Hotreset. * When target receive Hotreset, but will set the interrupt. * So when download SBL again, SBL will open Interrupt and * receive it, and crash immediately.
*/
ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
}
if (power_on) {
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
ath11k_pci_set_wlaon_pwr_ctrl(ab); if (ab->hw_params.fix_l1ss)
ath11k_pci_fix_l1ss(ab);
}
staticint ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
{ struct ath11k_base *ab = ab_pci->ab;
u16 device_id; int ret = 0;
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); if (device_id != ab_pci->dev_id) {
ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
device_id, ab_pci->dev_id);
ret = -EIO; goto out;
}
ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM); if (ret) {
ath11k_err(ab, "failed to assign pci resource: %d\n", ret); goto out;
}
ret = pci_enable_device(pdev); if (ret) {
ath11k_err(ab, "failed to enable pci device: %d\n", ret); goto out;
}
ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci"); if (ret) {
ath11k_err(ab, "failed to request pci region: %d\n", ret); goto disable_device;
}
ret = dma_set_mask(&pdev->dev,
DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret); goto release_region;
}
ret = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK)); if (ret) {
ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
ATH11K_PCI_COHERENT_DMA_MASK, ret); goto release_region;
}
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0); if (!ab->mem) {
ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
ret = -EIO; goto release_region;
}
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
if (mem_type == FW_CRASH_DUMP_NONE) continue;
if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
ath11k_dbg(ab, ATH11K_DBG_PCI, "target mem region type %d not supported",
ab->qmi.target_mem[i].type); continue;
}
/* append all segments together as they are all part of a single contiguous * block of memory
*/ for (i = 0; i < fw_img->entries ; i++) { if (!fw_img->mhi_buf[i].buf) continue;
/* append all segments together as they are all part of a single contiguous * block of memory
*/ for (i = 0; i < rddm_img->entries; i++) { if (!rddm_img->mhi_buf[i].buf) continue;
/* TODO: for now don't restore ASPM in case of single MSI * vector as MHI register reading in M2 causes system hang.
*/ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
ath11k_pci_aspm_restore(ab_pci); else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
/* Set fixed_mem_region to true for platforms support reserved memory * from DT. If memory is reserved from DT for FW, ath11k driver need not * allocate memory.
*/ if (of_property_present(ab->dev->of_node, "memory-region"))
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev); if (ret) {
ath11k_err(ab, "failed to claim device: %d\n", ret); goto err_free_core;
}
ret = ath11k_pcic_init_msi_config(ab); if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret); goto err_pci_free_region;
}
ret = ath11k_pci_alloc_msi(ab_pci); if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret); goto err_pci_free_region;
}
ret = ath11k_core_pre_init(ab); if (ret) goto err_pci_disable_msi;
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret); goto err_pci_disable_msi;
}
ret = ath11k_mhi_register(ab_pci); if (ret) {
ath11k_err(ab, "failed to register mhi: %d\n", ret); goto err_irq_affinity_cleanup;
}
ret = ath11k_hal_srng_init(ab); if (ret) goto err_mhi_unregister;
ret = ath11k_ce_alloc_pipes(ab); if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); goto err_hal_srng_deinit;
}
ath11k_pci_init_qmi_ce_config(ab);
ret = ath11k_pcic_config_irq(ab); if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret); goto err_ce_free;
}
/* kernel may allocate a dummy vector before request_irq and * then allocate a real vector when request_irq is called. * So get msi_data here again to avoid spurious interrupt * as msi_data will configured to srngs.
*/
ret = ath11k_pci_config_msi_data(ab_pci); if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret); goto err_free_irq;
}
ret = ath11k_core_init(ab); if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret); goto err_free_irq;
}
ath11k_qmi_fwreset_from_cold_boot(ab); return 0;
err_free_irq: /* __free_irq() expects the caller to have cleared the affinity hint */
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pcic_free_irq(ab);
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
{ struct ath11k_base *ab = dev_get_drvdata(dev); int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n"); return 0;
}
ret = ath11k_core_suspend(ab); if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
return 0;
}
static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
{ struct ath11k_base *ab = dev_get_drvdata(dev); int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n"); return 0;
}
ret = ath11k_core_resume(ab); if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);
return ret;
}
static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
{ struct ath11k_base *ab = dev_get_drvdata(dev); int ret;
ret = ath11k_core_suspend_late(ab); if (ret)
ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
/* Similar to ath11k_pci_pm_suspend(), we return success here * even error happens, to allow system suspend/hibernation survive.
*/ return 0;
}
static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
{ struct ath11k_base *ab = dev_get_drvdata(dev); int ret;
ret = ath11k_core_resume_early(ab); if (ret)
ath11k_warn(ab, "failed to early resume core: %d\n", ret);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.