// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
staticconststruct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { /* QCA988X pre 2.0 chips are not supported because they need some nasty * hacks. ath10k doesn't have them and these devices crash horribly * because of that.
*/
{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
staticint ath10k_pci_wake_wait(struct ath10k *ar)
{ int tot_delay = 0; int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) { if (ath10k_pci_is_awake(ar)) { if (tot_delay > PCIE_WAKE_LATE_US)
ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
tot_delay / 1000); return 0;
}
udelay(curr_delay);
tot_delay += curr_delay;
if (curr_delay < 50)
curr_delay += 5;
}
return -ETIMEDOUT;
}
staticint ath10k_pci_force_wake(struct ath10k *ar)
{ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsignedlong flags; int ret = 0;
if (ar_pci->pci_ps) return ret;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
if (!ar_pci->ps_awake) {
iowrite32(PCIE_SOC_WAKE_V_MASK,
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS);
ret = ath10k_pci_wake_wait(ar); if (ret == 0)
ar_pci->ps_awake = true;
}
/* This function can be called very frequently. To avoid excessive * CPU stalls for MMIO reads use a cache var to hold the device state.
*/ if (!ar_pci->ps_awake) {
__ath10k_pci_wake(ar);
ret = ath10k_pci_wake_wait(ar); if (ret == 0)
ar_pci->ps_awake = true;
}
if (ret == 0) {
ar_pci->ps_wake_refcount++;
WARN_ON(ar_pci->ps_wake_refcount == 0);
}
/* Check if the shared legacy irq is for us */
cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS); if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) returntrue;
returnfalse;
}
void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)
{ /* IMPORTANT: INTR_CLR register has to be set after * INTR_ENABLE is set to 0, otherwise interrupt can not be * really cleared.
*/
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
/* IMPORTANT: this extra read transaction is required to * flush the posted write buffer.
*/
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
/* IMPORTANT: this extra read transaction is required to * flush the posted write buffer.
*/
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
& 0x7ff) << 21;
val |= 0x100000 | region; return val;
}
/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. * Support to access target space below 1M for qca6174 and qca9377. * If target space is below 1M, the bit[20] of converted CE addr is 0. * Otherwise bit[20] of converted CE addr is 1.
*/ static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) return -EOPNOTSUPP;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
/* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user * at any moment.
*/ staticint ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes)
{ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0;
u32 *buf; unsignedint completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0; void *data_buf; int i;
/* * Allocate a temporary bounce buffer to hold caller's data * to be DMA'ed from Target. This guarantees * 1) 4-byte alignment * 2) Buffer in DMA-able space
*/
alloc_nbytes = min_t(unsignedint, nbytes, DIAG_TRANSFER_LIMIT);
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
GFP_ATOMIC); if (!data_buf) {
ret = -ENOMEM; goto done;
}
/* The address supplied by the caller is in the * Target CPU virtual address space. * * In order to use this address with the diagnostic CE, * convert it from Target CPU virtual address space * to CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done;
/* Request CE to send from Target(!) address to Host buffer */
ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); if (ret) goto done;
i = 0; while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY; goto done;
}
}
i = 0; while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
&completed_nbytes) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY; goto done;
}
}
if (nbytes != completed_nbytes) {
ret = -EIO; goto done;
}
ret = ath10k_pci_diag_read32(ar, host_addr, &addr); if (ret != 0) {
ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
src, ret); return ret;
}
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); if (ret != 0) {
ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
addr, len, ret); return ret;
}
/* * Allocate a temporary bounce buffer to hold caller's data * to be DMA'ed to Target. This guarantees * 1) 4-byte alignment * 2) Buffer in DMA-able space
*/
alloc_nbytes = min_t(unsignedint, nbytes, DIAG_TRANSFER_LIMIT);
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
GFP_ATOMIC); if (!data_buf) {
ret = -ENOMEM; goto done;
}
/* * The address supplied by the caller is in the * Target CPU virtual address space. * * In order to use this address with the diagnostic CE, * convert it from * Target CPU virtual address space * to * CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
/* Copy caller's data to allocated DMA buf */
memcpy(data_buf, data, nbytes);
/* Set up to receive directly into Target(!) address */
ret = ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done;
/* * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address.
*/
ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done;
i = 0; while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY; goto done;
}
}
i = 0; while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
&completed_nbytes) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY; goto done;
}
}
if (nbytes != completed_nbytes) {
ret = -EIO; goto done;
}
if (*buf != address) {
ret = -EIO; goto done;
}
remaining_bytes -= nbytes;
address += nbytes;
data += nbytes;
}
done: if (data_buf) {
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}
if (ret != 0)
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
/* Called by lower (CE) layer when a send to Target completes. */ staticvoid ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
{ struct ath10k *ar = ce_state->ar; struct sk_buff_head list; struct sk_buff *skb;
__skb_queue_head_init(&list); while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { /* no need to call tx completion for NULL pointers */ if (skb == NULL) continue;
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list)))
ath10k_htc_tx_completion_handler(ar, skb);
}
if (unlikely(max_nbytes < nbytes)) {
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb); continue;
}
/* No need to acquire ce_lock for CE5, since this is the only place CE5 * is processed other than init and deinit. Before releasing CE5 * buffers, interrupts are disabled. Thus CE5 access is serialized.
*/
__skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
&nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
if (unlikely(max_nbytes < nbytes)) {
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes); continue;
}
/*let device gain the buffer again*/
dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
}
ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
}
/* Called by lower (CE) layer when data is received from the Target. */ staticvoid ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
staticvoid ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{ /* CE4 polling needs to be done whenever CE pipe which transports * HTT Rx (target->host) is processed.
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
/* Called by lower (CE) layer when data is received from the Target. * Only 10.4 firmware uses separate CE to transfer pktlog data.
*/ staticvoid ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
{
ath10k_pci_process_rx_cb(ce_state,
ath10k_htt_rx_pktlog_completion_handler);
}
/* Called by lower (CE) layer when a send to HTT Target completes. */ staticvoid ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
{ struct ath10k *ar = ce_state->ar; struct sk_buff *skb;
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { /* no need to call tx completion for NULL pointers */ if (!skb) continue;
/* Called by lower (CE) layer when HTT data is received from the Target. */ staticvoid ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
{ /* CE4 polling needs to be done whenever CE pipe which transports * HTT Rx (target->host) is processed.
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
/* fill the gap between the first register section and register * start address
*/ for (i = 0; i < skip_size; i++) {
*buf = ATH10K_MAGIC_NOT_COPIED;
buf++;
}
count = 0;
for (i = 0; cur_section != NULL; i++) {
section_size = cur_section->end - cur_section->start;
if (section_size <= 0) {
ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
cur_section->start,
cur_section->end); break;
}
if (cur_section->end > next_section->start) {
ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
next_section->start,
cur_section->end); break;
}
if (buf_len < (skip_size + section_size)) {
ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); break;
}
buf_len -= skip_size + section_size;
/* read section to dest memory */
ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
buf, section_size); if (ret) {
ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
cur_section->start, ret); break;
}
buf += section_size;
count += section_size;
/* fill in the gap between this section and the next */ for (j = 0; j < skip_size; j++) {
*buf = ATH10K_MAGIC_NOT_COPIED;
buf++;
}
count += skip_size;
if (!next_section) /* this was the last section */ break;
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
FW_RAM_CONFIG_ADDRESS); if (val != config) {
ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
val, config); return -EIO;
}
for (i = 0; i < region->len; i += 4) {
iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
*(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
}
return region->len;
}
/* if an error happened returns < 0, otherwise the length */ staticint ath10k_pci_dump_memory_reg(struct ath10k *ar, conststruct ath10k_mem_region *region,
u8 *buf)
{ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 i; int ret;
mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) {
ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
ret = -EIO; goto done;
}
for (i = 0; i < region->len; i += 4)
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
ret = region->len;
done:
mutex_unlock(&ar->conf_mutex); return ret;
}
/* if an error happened returns < 0, otherwise the length */ staticint ath10k_pci_dump_memory_generic(struct ath10k *ar, conststruct ath10k_mem_region *current_region,
u8 *buf)
{ int ret;
if (current_region->section_table.size > 0) /* Copy each section individually. */ return ath10k_pci_dump_memory_section(ar,
current_region,
buf,
current_region->len);
/* No individual memory sections defined so we can * copy the entire memory region.
*/
ret = ath10k_pci_diag_read_mem(ar,
current_region->start,
buf,
current_region->len); if (ret) {
ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
current_region->name, ret); return ret;
}
for (i = 0; i < mem_layout->region_table.size; i++) {
count = 0;
if (current_region->len > buf_len) {
ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
current_region->name,
current_region->len,
buf_len); break;
}
/* To get IRAM dump, the host driver needs to switch target * ram config from DRAM to IRAM.
*/ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
shift = current_region->start >> 20;
ret = ath10k_pci_set_ram_config(ar, shift); if (ret) {
ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
current_region->name, ret); break;
}
}
/* Reserve space for the header. */
hdr = (void *)buf;
buf += sizeof(*hdr);
buf_len -= sizeof(*hdr);
switch (current_region->type) { case ATH10K_MEM_REGION_TYPE_IOSRAM:
count = ath10k_pci_dump_memory_sram(ar, current_region, buf); break; case ATH10K_MEM_REGION_TYPE_IOREG:
ret = ath10k_pci_dump_memory_reg(ar, current_region, buf); if (ret < 0) break;
count = ret; break; default:
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); if (ret < 0) break;
if (!force) { int resources; /* * Decide whether to actually poll for completions, or just * wait for a later chance. * If there seem to be plenty of resources left, then just wait * since checking involves reading a CE register, which is a * relatively expensive operation.
*/
resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
/* * If at least 50% of the total resources are still available, * don't bother checking again yet.
*/ if (resources > (ar_pci->attr[pipe].src_nentries >> 1)) return;
}
ath10k_ce_per_engine_service(ar, pipe);
}
switch (ar->hw_rev) { case ATH10K_HW_QCA988X: case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: case ATH10K_HW_QCA9984: case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI.
*/ break; case ATH10K_HW_WCN3990: break;
}
}
switch (ar->hw_rev) { case ATH10K_HW_QCA988X: case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val |= CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: case ATH10K_HW_QCA9984: case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI.
*/ break; case ATH10K_HW_WCN3990: break;
}
}
ar = pci_pipe->hif_ce_state;
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
if (!ce_ring) return;
if (!pci_pipe->buf_sz) return;
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i]; if (!skb) continue;
ce_ring->per_transfer_context[i] = NULL;
ath10k_htc_tx_completion_handler(ar, skb);
}
}
/* * Cleanup residual buffers for device shutdown: * buffers that were enqueued for receive * buffers that were to be sent * Note: Buffers that had completed but which were * not yet processed are on a completion queue. They * are handled when the completion thread shuts down.
*/ staticvoid ath10k_pci_buffer_cleanup(struct ath10k *ar)
{ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int pipe_num;
/* Most likely the device has HTT Rx ring configured. The only way to * prevent the device from accessing (and possible corrupting) host * memory is to reset the chip now. * * There's also no known way of masking MSI interrupts on the device. * For ranged MSI the CE-related interrupts can be masked. However * regardless how many MSI interrupts are assigned the first one * is always used for firmware indications (crashes) and cannot be * masked. To prevent the device from asserting the interrupt reset it * before proceeding with cleanup.
*/
ath10k_pci_safe_chip_reset(ar);
while (time_before_eq(jiffies, timeout)) {
ath10k_pci_bmi_send_done(tx_pipe);
ath10k_pci_bmi_recv_data(rx_pipe);
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
ret = 0; goto out;
}
schedule();
}
ret = -ETIMEDOUT;
out:
dur = jiffies - started; if (dur > HZ)
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi cmd took %lu jiffies hz %d ret %d\n",
dur, HZ, ret); return ret;
}
/* * Send an interrupt to the device to wake up the Target CPU * so it has an opportunity to notice any changed state.
*/ staticint ath10k_pci_wake_target_cpu(struct ath10k *ar)
{
u32 addr, val;
addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
val = ath10k_pci_read32(ar, addr);
val |= CORE_CTRL_CPU_INTR_MASK;
ath10k_pci_write32(ar, addr, val);
switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: case QCA9888_2_0_DEVICE_ID: case QCA9984_1_0_DEVICE_ID: case QCA9887_1_0_DEVICE_ID: return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { case QCA6174_HW_1_0_CHIP_ID_REV: case QCA6174_HW_1_1_CHIP_ID_REV: case QCA6174_HW_2_1_CHIP_ID_REV: case QCA6174_HW_2_2_CHIP_ID_REV: return 3; case QCA6174_HW_1_3_CHIP_ID_REV: return 2; case QCA6174_HW_3_0_CHIP_ID_REV: case QCA6174_HW_3_1_CHIP_ID_REV: case QCA6174_HW_3_2_CHIP_ID_REV: return 9;
} break; case QCA9377_1_0_DEVICE_ID: return 9;
}
ath10k_warn(ar, "unknown number of banks, assuming 1\n"); return 1;
}
/* Download to Target the CE Config and the service-to-CE map */
interconnect_targ_addr =
host_interest_item_address(HI_ITEM(hi_interconnect_state));
/* Supply Target-side CE configuration */
ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
&pcie_state_targ_addr); if (ret != 0) {
ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); return ret;
}
if (pcie_state_targ_addr == 0) {
ret = -EIO;
ath10k_err(ar, "Invalid pcie state addr\n"); return ret;
}
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
pipe_cfg_addr)),
&pipe_cfg_targ_addr); if (ret != 0) {
ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); return ret;
}
if (pipe_cfg_targ_addr == 0) {
ret = -EIO;
ath10k_err(ar, "Invalid pipe cfg addr\n"); return ret;
}
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
ar_pci->pipe_config, sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN);
if (ret != 0) {
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); return ret;
}
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
svc_to_pipe_map)),
&svc_to_pipe_map); if (ret != 0) {
ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); return ret;
}
if (svc_to_pipe_map == 0) {
ret = -EIO;
ath10k_err(ar, "Invalid svc_to_pipe map\n"); return ret;
}
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
ar_pci->serv_to_pipe, sizeof(pci_target_service_to_ce_map_wlan)); if (ret != 0) {
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); return ret;
}
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
config_flags)),
&pcie_config_flags); if (ret != 0) {
ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); return ret;
}
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
config_flags)),
pcie_config_flags); if (ret != 0) {
ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); return ret;
}
/* configure early allocation */
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); if (ret != 0) {
ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); return ret;
}
/* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK);
ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); if (ret != 0) {
ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); return ret;
}
/* Tell Target to proceed with initialization */
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); if (ret != 0) {
ath10k_err(ar, "Failed to get option val: %d\n", ret); return ret;
}
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); if (ret != 0) {
ath10k_err(ar, "Failed to set option val: %d\n", ret); return ret;
}
void ath10k_pci_free_pipes(struct ath10k *ar)
{ int i;
for (i = 0; i < CE_COUNT; i++)
ath10k_ce_free_pipe(ar, i);
}
int ath10k_pci_init_pipes(struct ath10k *ar)
{ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]); if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
i, ret); return ret;
}
}
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); return (val == 0xffffffff);
}
/* this function effectively clears target memory controller assert line */ staticvoid ath10k_pci_warm_reset_si0(struct ath10k *ar)
{
u32 val;
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_SI0_RST_MASK);
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
msleep(10);
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.