// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
{ struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer;
u32 reo_dest; int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
WMI_PEER_SET_DEFAULT_ROUTING,
DP_RX_HASH_ENABLE | (reo_dest << 1));
if (ret) {
ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
ret, addr, vdev_id); return ret;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
HAL_PN_TYPE_NONE); if (ret) {
ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
tid, ret); goto peer_clean;
}
}
ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); if (ret) {
ath11k_warn(ab, "failed to setup rx defrag context\n");
tid--; goto peer_clean;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
peer_clean:
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, addr); if (!peer) {
ath11k_warn(ab, "failed to find the peer to del rx tid\n");
spin_unlock_bh(&ab->base_lock); return -ENOENT;
}
for (; tid >= 0; tid--)
ath11k_peer_rx_tid_delete(ar, peer, tid);
switch (type) { case HAL_WBM2SW_RELEASE: if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
grp_mask = &ab->hw_params.ring_mask->tx[0];
} break; case HAL_REO_EXCEPTION:
grp_mask = &ab->hw_params.ring_mask->rx_err[0]; break; case HAL_REO_DST:
grp_mask = &ab->hw_params.ring_mask->rx[0]; break; case HAL_REO_STATUS:
grp_mask = &ab->hw_params.ring_mask->reo_status[0]; break; case HAL_RXDMA_MONITOR_STATUS: case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; break; case HAL_RXDMA_DST:
grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; break; case HAL_RXDMA_BUF:
grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; break; case HAL_RXDMA_MONITOR_BUF: case HAL_TCL_DATA: case HAL_TCL_CMD: case HAL_REO_CMD: case HAL_SW2WBM_RELEASE: case HAL_WBM_IDLE_LINK: case HAL_TCL_STATUS: case HAL_REO_REINJECT: case HAL_CE_SRC: case HAL_CE_DST: case HAL_CE_DST_STATUS: default: return -ENOENT;
}
switch (type) { case HAL_REO_DST:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_RX;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; case HAL_RXDMA_BUF: case HAL_RXDMA_MONITOR_BUF: case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; case HAL_WBM2SW_RELEASE: if (ring_num < 3) {
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_TX;
params.intr_timer_thres_us =
HAL_SRNG_INT_TIMER_THRESHOLD_TX; break;
} /* follow through when ring_num >= 3 */
fallthrough; case HAL_REO_EXCEPTION: case HAL_REO_REINJECT: case HAL_REO_CMD: case HAL_REO_STATUS: case HAL_TCL_DATA: case HAL_TCL_CMD: case HAL_TCL_STATUS: case HAL_WBM_IDLE_LINK: case HAL_SW2WBM_RELEASE: case HAL_RXDMA_DST: case HAL_RXDMA_MONITOR_DST: case HAL_RXDMA_MONITOR_DESC:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; break; case HAL_RXDMA_DIR_BUF: break; default:
ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); return -EINVAL;
}
if (cached) {
params.flags |= HAL_SRNG_FLAGS_CACHED;
ring->cached = 1;
}
ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num); return ret;
}
ring->ring_id = ret;
return 0;
}
void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
{ int i;
if (!ab->hw_params.supports_shadow_regs) return;
for (i = 0; i < ab->hw_params.max_tx_ring; i++)
ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
staticint ath11k_dp_srng_common_setup(struct ath11k_base *ab)
{ struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; int i, ret;
u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
DP_WBM_RELEASE_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
DP_TCL_CMD_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
0, 0, DP_TCL_STATUS_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); goto err;
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, tcl_num, 0,
ab->hw_params.tx_ring_size); if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
i, ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0, 0, DP_REO_REINJECT_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0, 0, DP_REO_EXCEPTION_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0, 0, DP_REO_CMD_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); goto err;
}
/* When hash based routing of rx packet is enabled, 32 entries to map * the hash values to the ring will be configured.
*/
ab->hw_params.hw_ops->reo_setup(ab);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) return -EINVAL;
for (i = 0; i < num_scatter_buf; i++) {
slist[i].vaddr = dma_alloc_coherent(ab->dev,
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
&slist[i].paddr, GFP_KERNEL); if (!slist[i].vaddr) {
ret = -ENOMEM; goto err;
}
}
staticvoid
ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, struct dp_link_desc_bank *link_desc_banks)
{ int i;
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { if (link_desc_banks[i].vaddr_unaligned) {
dma_free_coherent(ab->dev,
link_desc_banks[i].size,
link_desc_banks[i].vaddr_unaligned,
link_desc_banks[i].paddr_unaligned);
link_desc_banks[i].vaddr_unaligned = NULL;
}
}
}
staticint ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, struct dp_link_desc_bank *desc_bank, int n_link_desc_bank, int last_bank_sz)
{ struct ath11k_dp *dp = &ab->dp; int i; int ret = 0; int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
for (i = 0; i < n_link_desc_bank; i++) { if (i == (n_link_desc_bank - 1) && last_bank_sz)
desc_sz = last_bank_sz;
desc_bank[i].vaddr_unaligned =
dma_alloc_coherent(ab->dev, desc_sz,
&desc_bank[i].paddr_unaligned,
GFP_KERNEL); if (!desc_bank[i].vaddr_unaligned) {
ret = -ENOMEM; goto err;
}
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) return -EINVAL;
ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
n_link_desc_bank, last_bank_sz); if (ret) return ret;
/* Setup link desc idle list for HW internal usage */
entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
ring_type != HAL_RXDMA_MONITOR_DESC) {
ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
n_link_desc_bank,
n_link_desc,
last_bank_sz); if (ret) {
ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
ret); goto fail_desc_bank_free;
}
return 0;
}
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (link_desc_banks[i].size - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr; while (n_entries &&
(desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
i, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
}
int ath11k_dp_service_srng(struct ath11k_base *ab, struct ath11k_ext_irq_grp *irq_grp, int budget)
{ struct napi_struct *napi = &irq_grp->napi; conststruct ath11k_hw_hal_params *hal_params; int grp_id = irq_grp->grp_id; int work_done = 0; int i, j; int tot_work_done = 0;
for (i = 0; i < ab->hw_params.max_tx_ring; i++) { if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
ab->hw_params.ring_mask->tx[grp_id])
ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx[grp_id]) {
i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
work_done = ath11k_dp_process_rx(ab, i, napi,
budget);
budget -= work_done;
tot_work_done += work_done; if (budget <= 0) goto done;
}
if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { int id = i * ab->hw_params.num_rxdma_per_pdev + j;
void ath11k_dp_pdev_free(struct ath11k_base *ab)
{ struct ath11k *ar; int i;
timer_delete_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ath11k_dp_rx_pdev_free(ab, i);
ath11k_debugfs_unregister(ar);
ath11k_dp_rx_pdev_mon_detach(ar);
}
}
void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
{ struct ath11k *ar; struct ath11k_pdev_dp *dp; int i; int j;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
dp = &ar->dp;
dp->mac_id = i;
idr_init(&dp->rx_refill_buf_ring.bufs_idr);
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq); for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
}
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
}
}
int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
{ struct ath11k *ar; int ret; int i;
/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath11k_dp_rx_pdev_alloc(ab, i); if (ret) {
ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
i); goto err;
}
ret = ath11k_dp_rx_pdev_mon_attach(ar); if (ret) {
ath11k_warn(ab, "failed to initialize mon pdev %d\n",
i); goto err;
}
}
return 0;
err:
ath11k_dp_pdev_free(ab);
return ret;
}
int ath11k_dp_htt_connect(struct ath11k_dp *dp)
{ struct ath11k_htc_svc_conn_req conn_req; struct ath11k_htc_svc_conn_resp conn_resp; int status;
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
/* when the timer is fired, the handler checks whether there * are new TX happened. The handler updates HP only when there * are no TX operations during the timeout interval, and stop * the timer. Timer will be started again when TX happens again.
*/ if (update_timer->timer_tx_num != update_timer->tx_num) {
update_timer->timer_tx_num = update_timer->tx_num;
mod_timer(&update_timer->timer, jiffies +
msecs_to_jiffies(update_timer->interval));
} else {
update_timer->started = false;
ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.