// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
for (i = 0; i < DP_REO_DST_RING_MAX; i++)
ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
}
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
{ struct ath11k_dp *dp = &ab->dp; int ret; int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
HAL_REO_DST, i, 0,
DP_REO_DST_RING_SIZE); if (ret) {
ath11k_warn(ab, "failed to setup reo_dst_ring\n"); goto err_reo_cleanup;
}
}
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0,
dp->mac_id, DP_RXDMA_BUF_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); return ret;
}
if (ar->ab->hw_params.rx_mac_buf_ring) { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
dp->mac_id + i, 1024); if (ret) {
ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
i); return ret;
}
}
}
for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, dp->mac_id + i,
DP_RXDMA_ERR_DST_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); return ret;
}
}
for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath11k_dp_srng_setup(ar->ab,
srng,
HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
DP_RXDMA_MON_STATUS_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup rx_mon_status_refill_ring %d\n", i); return ret;
}
}
/* if rxdma1_enable is false, then it doesn't need * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring * and rxdma_mon_desc_ring. * init reap timer for QCA6390.
*/ if (!ar->ab->hw_params.rxdma1_enable) { //init mon status buffer reap timer
timer_setup(&ar->ab->mon_reap_timer,
ath11k_dp_service_mon_ring, 0); return 0;
}
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
DP_RXDMA_MONITOR_BUF_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret;
}
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
DP_RXDMA_MONITOR_DST_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DST\n"); return ret;
}
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
DP_RXDMA_MONITOR_DESC_RING_SIZE); if (ret) {
ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DESC\n"); return ret;
}
ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL); if (ret) {
ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
rx_tid->tid, ret); return ret;
}
peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) {
ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
peer_mac);
spin_unlock_bh(&ab->base_lock); return -ENOENT;
}
rx_tid = &peer->rx_tid[tid]; /* Update the tid queue if it is already setup */ if (rx_tid->active) {
paddr = rx_tid->paddr;
ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock); if (ret) {
ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
peer_mac, tid, ret); return ret;
}
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac, paddr,
tid, 1, ba_win_sz); if (ret)
ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret); return ret;
}
rx_tid->tid = tid;
rx_tid->ba_win_sz = ba_win_sz;
/* TODO: Optimize the memory allocation for qos tid based on * the actual BA window size in REO tid update path.
*/ if (tid == HAL_DESC_REO_NON_QOS_TID)
hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. * Since these changes are not reflected in the device, driver now needs to * explicitly call dma_sync_single_for_device.
*/
dma_sync_single_for_device(ab->dev, rx_tid->paddr,
rx_tid->size,
DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
tid, 1, ba_win_sz); if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
}
return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params)
{ struct ath11k_base *ab = ar->ab; struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret;
ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->pn_type); if (ret)
ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); if (!peer) {
ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
spin_unlock_bh(&ab->base_lock); return -ENOENT;
}
paddr = peer->rx_tid[params->tid].paddr;
active = peer->rx_tid[params->tid].active;
if (!active) {
spin_unlock_bh(&ab->base_lock); return 0;
}
ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
spin_unlock_bh(&ab->base_lock); if (ret) {
ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
params->tid, ret); return ret;
}
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
params->sta->addr, paddr,
params->tid, 1, 1); if (ret)
ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
ret);
/* NOTE: Enable PN/TSC replay check offload only for unicast frames. * We use mac80211 PN/TSC replay check functionality for bcast/mcast * for now.
*/ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return 0;
switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (key_cmd == SET_KEY) {
cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
cmd.pn_size = 48;
} break; default: break;
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); if (!peer) {
ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
spin_unlock_bh(&ab->base_lock); return -ENOENT;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid]; if (!rx_tid->active) continue;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL); if (ret) {
ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
tid, ret); break;
}
}
spin_unlock_bh(&ab->base_lock);
return ret;
}
staticinlineint ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
u16 peer_id)
{ int i;
for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { if (ppdu_stats->user_stats[i].is_valid_peer_id) { if (peer_id == ppdu_stats->user_stats[i].peer_id) return i;
} else { return i;
}
}
switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: if (len < sizeof(struct htt_ppdu_stats_common)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag); return -EINVAL;
}
memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, sizeof(struct htt_ppdu_stats_common)); break; case HTT_PPDU_STATS_TAG_USR_RATE: if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag); return -EINVAL;
}
peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id); if (cur_user < 0) return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy((void *)&user_stats->rate, ptr, sizeof(struct htt_ppdu_stats_user_rate));
user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag); return -EINVAL;
}
peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id); if (cur_user < 0) return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy((void *)&user_stats->cmpltn_cmn, ptr, sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag); return -EINVAL;
}
/* Note: If host configured fixed rates and in some other special * cases, the broadcast/management frames are sent in different rates. * Firmware rate's control to be skipped for this?
*/
if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); return;
}
hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
++data;
backpressure_time = *data;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) return;
if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) return;
bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
} else {
ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
ring_type); return;
}
/* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing * the length of the msdu in the first buffer.
*/
buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
/* MSDU spans over multiple buffers because the length of the MSDU * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
*/
skb_put(first, DP_RX_BUFFER_SIZE);
skb_pull(first, buf_first_hdr_len);
/* When an MSDU spread over multiple buffers attention, MSDU_END and * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
*/
ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); if (space_extra > 0 &&
(pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { /* Free up all buffers of the MSDU */ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
rxcb = ATH11K_SKB_RXCB(skb); if (!rxcb->is_continuation) {
dev_kfree_skb_any(skb); break;
}
dev_kfree_skb_any(skb);
} return -ENOMEM;
}
int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{ switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return 0; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_MIC_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); return 0;
}
staticint ath11k_dp_rx_crypto_param_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{ switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_IV_LEN; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_HDR_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0;
}
staticint ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{ switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_CCMP_128: case HAL_ENCRYPT_TYPE_CCMP_256: case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_ICV_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0;
}
/* copy SA & DA and pull decapped header */
hdr = (struct ieee80211_hdr *)msdu->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
if (rxcb->is_first_msdu) { /* original 802.11 header is valid for the first msdu * hence we can reuse the same header
*/
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
/* Each A-MSDU subframe will be reported as a separate MSDU, * so strip the A-MSDU bit from QoS Ctl.
*/ if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
} else { /* Rebuild qos header if this is a middle/last msdu */
hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
/* Reset the order bit as the HT_Control header is stripped */
hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
qos_ctl = rxcb->tid;
if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO Add other QoS ctl fields when required */
/* copy decap header before overwriting for reuse below */
memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
}
/* original 802.11 header has a different DA and in * case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
/* original 802.11 header has a different DA and in * case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
switch (decap) { case DP_RX_DECAP_TYPE_NATIVE_WIFI:
ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
enctype, status); break; case DP_RX_DECAP_TYPE_RAW:
ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
decrypted); break; case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
ehdr = (struct ethhdr *)msdu->data;
/* mac80211 allows fast path only for authorized STA */ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
ATH11K_SKB_RXCB(msdu)->is_eapol = true;
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status); break;
}
/* PN for mcast packets will be validated in mac80211; * remove eth header and add 802.11 header.
*/ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status); break; case DP_RX_DECAP_TYPE_8023: /* TODO: Handle undecap for these formats */ break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.