// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
{ struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer;
u32 reo_dest; int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
WMI_PEER_SET_DEFAULT_ROUTING,
DP_RX_HASH_ENABLE | (reo_dest << 1));
if (ret) {
ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
ret, addr, vdev_id); return ret;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
HAL_PN_TYPE_NONE); if (ret) {
ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
tid, ret); goto peer_clean;
}
}
ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id); if (ret) {
ath12k_warn(ab, "failed to setup rx defrag context\n"); goto peer_clean;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
peer_clean:
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, addr); if (!peer) {
ath12k_warn(ab, "failed to find the peer to del rx tid\n");
spin_unlock_bh(&ab->base_lock); return -ENOENT;
}
for (tid--; tid >= 0; tid--)
ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
ext_group_num++) { if (mask & grp_mask[ext_group_num]) return ext_group_num;
}
return -ENOENT;
}
staticint ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, enum hal_ring_type type, int ring_num)
{ conststruct ath12k_hal_tcl_to_wbm_rbm_map *map; const u8 *grp_mask; int i;
switch (type) { case HAL_WBM2SW_RELEASE: if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; for (i = 0; i < ab->hw_params->max_tx_ring; i++) { if (ring_num == map[i].wbm_ring_num) {
ring_num = i; break;
}
}
grp_mask = &ab->hw_params->ring_mask->tx[0];
} break; case HAL_REO_EXCEPTION:
grp_mask = &ab->hw_params->ring_mask->rx_err[0]; break; case HAL_REO_DST:
grp_mask = &ab->hw_params->ring_mask->rx[0]; break; case HAL_REO_STATUS:
grp_mask = &ab->hw_params->ring_mask->reo_status[0]; break; case HAL_RXDMA_MONITOR_STATUS:
grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0]; break; case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0]; break; case HAL_TX_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0]; break; case HAL_RXDMA_BUF:
grp_mask = &ab->hw_params->ring_mask->host2rxdma[0]; break; case HAL_RXDMA_MONITOR_BUF: case HAL_TCL_DATA: case HAL_TCL_CMD: case HAL_REO_CMD: case HAL_SW2WBM_RELEASE: case HAL_WBM_IDLE_LINK: case HAL_TCL_STATUS: case HAL_REO_REINJECT: case HAL_CE_SRC: case HAL_CE_DST: case HAL_CE_DST_STATUS: default: return -ENOENT;
}
int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
{ struct hal_srng_params params = {}; int entry_sz = ath12k_hal_srng_get_entrysize(ab, type); int max_entries = ath12k_hal_srng_get_max_entries(ab, type); int ret;
if (max_entries < 0 || entry_sz < 0) return -EINVAL;
if (num_entries > max_entries)
num_entries = max_entries;
/* Only valid for raw frames with HW crypto enabled. * With SW crypto, mac80211 sets key per packet
*/ if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
bank_config |=
u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
/* only valid if idx_lookup_override is not set in tcl_data_cmd */ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); else
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
DP_WBM_RELEASE_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
ret); goto err;
}
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, i, 0,
DP_TCL_DATA_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret); goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
DP_TX_COMP_RING_SIZE(ab)); if (ret) {
ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
tx_comp_ring_num, ret); goto err;
}
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0, 0, DP_REO_REINJECT_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
ret); goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
HAL_WBM2SW_REL_ERR_RING_NUM, 0,
DP_RX_RELEASE_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0, 0, DP_REO_EXCEPTION_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
ret); goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0, 0, DP_REO_CMD_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE); if (ret) {
ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret); goto err;
}
/* When hash based routing of rx packet is enabled, 32 entries to map * the hash values to the ring will be configured. Each hash entry uses * four bits to map to a particular ring. The ring mapping will be * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
*/
ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) return -EINVAL;
for (i = 0; i < num_scatter_buf; i++) {
slist[i].vaddr = dma_alloc_coherent(ab->dev,
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
&slist[i].paddr, GFP_KERNEL); if (!slist[i].vaddr) {
ret = -ENOMEM; goto err;
}
}
staticvoid
ath12k_dp_link_desc_bank_free(struct ath12k_base *ab, struct dp_link_desc_bank *link_desc_banks)
{ int i;
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { if (link_desc_banks[i].vaddr_unaligned) {
dma_free_coherent(ab->dev,
link_desc_banks[i].size,
link_desc_banks[i].vaddr_unaligned,
link_desc_banks[i].paddr_unaligned);
link_desc_banks[i].vaddr_unaligned = NULL;
}
}
}
staticint ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab, struct dp_link_desc_bank *desc_bank, int n_link_desc_bank, int last_bank_sz)
{ struct ath12k_dp *dp = &ab->dp; int i; int ret = 0; int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
for (i = 0; i < n_link_desc_bank; i++) { if (i == (n_link_desc_bank - 1) && last_bank_sz)
desc_sz = last_bank_sz;
desc_bank[i].vaddr_unaligned =
dma_alloc_coherent(ab->dev, desc_sz,
&desc_bank[i].paddr_unaligned,
GFP_KERNEL); if (!desc_bank[i].vaddr_unaligned) {
ret = -ENOMEM; goto err;
}
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) return -EINVAL;
ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
n_link_desc_bank, last_bank_sz); if (ret) return ret;
/* Setup link desc idle list for HW internal usage */
entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
ring_type != HAL_RXDMA_MONITOR_DESC) {
ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
n_link_desc_bank,
n_link_desc,
last_bank_sz); if (ret) {
ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
ret); goto fail_desc_bank_free;
}
if (ab->hw_params->ring_mask->rx[grp_id]) {
i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
work_done = ath12k_dp_rx_process(ab, i, napi,
budget);
budget -= work_done;
tot_work_done += work_done; if (budget <= 0) goto done;
}
if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id]; for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { int id = i * ab->hw_params->num_rxdma_per_pdev + j;
void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
{ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { /* RX TLVS compaction is supported, hence change the hal_rx_ops * to compact hal_rx_ops.
*/
ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
}
ab->hal.hal_desc_sz =
ab->hal_rx_ops->rx_desc_get_desc_size();
}
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{ struct ath12k *ar; int ret; int i;
ret = ath12k_dp_rx_htt_setup(ab); if (ret) goto out;
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */ for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath12k_dp_rx_pdev_alloc(ab, i); if (ret) {
ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
i); goto err;
}
ret = ath12k_dp_rx_pdev_mon_attach(ar); if (ret) {
ath12k_warn(ab, "failed to initialize mon pdev %d\n", i); goto err;
}
}
/* TODO: error path for bank id failure */ if (arvif->bank_id == DP_INVALID_BANK_ID) {
ath12k_err(ar->ab, "Failed to initialize DP TX Banks"); return;
}
}
/* if we are unregistering, hw would've been destroyed and * ar is no longer valid.
*/ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
if (dp->txbaddr) { for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
tx_spt_page = i + pool_id *
ATH12K_TX_SPT_PAGES_PER_POOL(ab); if (!dp->txbaddr[tx_spt_page]) continue;
if (!dp->rxbaddr) {
spin_unlock_bh(&dp->rx_desc_lock); return -ENOMEM;
}
/* First ATH12K_NUM_RX_SPT_PAGES(ab) of allocated SPT pages are used for * RX
*/ for (i = 0; i < num_rx_spt_pages; i++) {
rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
GFP_ATOMIC);
if (!rx_descs) {
spin_unlock_bh(&dp->rx_desc_lock); return -ENOMEM;
}
switch (type) { case ATH12K_DP_TX_DESC:
start = ATH12K_TX_SPT_PAGE_OFFSET;
end = start + ATH12K_NUM_TX_SPT_PAGES(ab); break; case ATH12K_DP_RX_DESC:
cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
start = ATH12K_RX_SPT_PAGE_OFFSET(ab);
end = start + ATH12K_NUM_RX_SPT_PAGES(ab); break; default:
ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type); return -EINVAL;
}
/* Write to PPT in CMEM */ for (i = start; i < end; i++)
ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
spin_lock_init(&dp->tx_desc_lock[i]);
}
dp->num_spt_pages = ath12k_dp_get_num_spt_pages(ab); if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
ret = ath12k_dp_alloc_reoq_lut(ab, &dp->reoq_lut); if (ret) {
ath12k_warn(ab, "failed to allocate memory for reoq table"); return ret;
}
ret = ath12k_dp_alloc_reoq_lut(ab, &dp->ml_reoq_lut); if (ret) {
ath12k_warn(ab, "failed to allocate memory for ML reoq table");
dma_free_coherent(ab->dev, dp->reoq_lut.size,
dp->reoq_lut.vaddr_unaligned,
dp->reoq_lut.paddr_unaligned);
dp->reoq_lut.vaddr_unaligned = NULL; return ret;
}
/* Bits in the register have address [39:8] LUT base address to be * allocated such that LSBs are assumed to be zero. Also, current * design supports paddr up to 4 GB max hence it fits in 32 bit * register only
*/
ret = ath12k_dp_reoq_lut_setup(ab); if (ret) {
ath12k_warn(ab, "failed to setup reoq table %d\n", ret); goto fail_cmn_srng_cleanup;
}
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE(ab) - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM; /* FIXME: The allocated tx status is not freed * properly here
*/ goto fail_cmn_reoq_cleanup;
}
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
ath12k_hal_tx_set_dscp_tid_map(ab, i);
ret = ath12k_dp_rx_alloc(ab); if (ret) goto fail_dp_rx_free;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.