// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
staticint __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{ struct ath10k_hw_params *hw = &htt->ar->hw_params; struct htt_rx_desc *rx_desc; struct ath10k_skb_rxcb *rxcb; struct sk_buff *skb;
dma_addr_t paddr; int ret = 0, idx;
/* The Full Rx Reorder firmware has no way of telling the host * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. * To keep things simple make sure ring is always half empty. This * guarantees there'll be no replenishment overruns possible.
*/
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
if (idx < 0 || idx >= htt->rx_ring.size) {
ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
idx &= htt->rx_ring.size_mask;
ret = -ENOMEM; goto fail;
}
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); if (!skb) {
ret = -ENOMEM; goto fail;
}
if (!IS_ALIGNED((unsignedlong)skb->data, HTT_RX_DESC_ALIGN))
skb_pull(skb,
PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
paddr);
}
num--;
idx++;
idx &= htt->rx_ring.size_mask;
}
fail: /* * Make sure the rx buffer is updated before available buffer * index to avoid any potential rx ring corruption.
*/
mb();
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); return ret;
}
staticvoid ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{ int ret, num_deficit, num_to_fill;
/* Refilling the whole RX ring buffer proves to be a bad idea. The * reason is RX may take up significant amount of CPU cycles and starve * other tasks, e.g. TX on an ethernet device while acting as a bridge * with ath10k wlan interface. This ended up with very poor performance * once CPU the host system was overwhelmed with RX on ath10k. * * By limiting the number of refills the replenishing occurs * progressively. This in turns makes use of the fact tasklets are * processed in FIFO order. This means actual RX processing can starve * out refilling. If there's not enough buffers on RX ring FW will not * report RX until it is refilled with enough buffers. This * automatically balances load wrt to CPU power. * * This probably comes at a cost of lower maximum throughput but * improves the average and stability.
*/
spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); if (ret == -ENOMEM) { /* * Failed to fill it to the desired level - * we'll start a timer and try again next time. * As long as enough buffers are left in the ring for * another A-MPDU rx, no special recovery is needed.
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} elseif (num_deficit > 0) {
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
}
spin_unlock_bh(&htt->rx_ring.lock);
}
/* FIXME: we must report msdu payload since this is what caller * expects now
*/
skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/* * Sanity check - confirm the HW is finished filling in the * rx data. * If the HW and SW are working correctly, then it's guaranteed * that the HW's MAC DMA is done before this point in the SW. * To prevent the case that we handle a stale Rx descriptor, * just assert for now until we have a way to recover.
*/ if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu); return -EIO;
}
/* FIXME: why are we skipping the first part of the rx_desc? */
trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu) break;
}
if (skb_queue_empty(amsdu))
msdu_chaining = -1;
/* * Don't refill the ring yet. * * First, the elements popped here are still in use - it is not * safe to overwrite them until the matching call to * mpdu_desc_list_next. Second, for efficiency it is preferable to * refill the rx ring with 1 PPDU's worth of rx buffers (something * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers * (something like 3 buffers). Consequently, we'll rely on the txrx * SW to tell us when it is done pulling all the PPDU's rx buffers * out of the rx ring, and then refill it just once.
*/
if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); return -EIO;
}
}
if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); return -EIO;
}
}
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0;
htt->rx_confused = false;
/* XXX: The fill level could be changed during runtime in response to * the host processing latency. Is this really worth it?
*/
htt->rx_ring.size = HTT_RX_RING_SIZE;
htt->rx_ring.size_mask = htt->rx_ring.size - 1;
htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n"); return -EINVAL;
}
staticint ath10k_htt_rx_crypto_param_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type)
{ switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: return IEEE80211_WEP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: return IEEE80211_TKIP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: return IEEE80211_CCMP_HDR_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: return IEEE80211_CCMP_256_HDR_LEN; case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: return IEEE80211_GCMP_HDR_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0;
}
#define MICHAEL_MIC_LEN 8
staticint ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type)
{ switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: return 0; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: return IEEE80211_CCMP_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: return IEEE80211_CCMP_256_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: return IEEE80211_GCMP_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0;
}
staticint ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type)
{ switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: return IEEE80211_WEP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: return IEEE80211_TKIP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WAPI: break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0;
}
staticinline u8 ath10k_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
switch (bw) { case 0:
ret = RATE_INFO_BW_20; break; case 1:
ret = RATE_INFO_BW_40; break; case 2:
ret = RATE_INFO_BW_80; break; case 3:
ret = RATE_INFO_BW_160; break;
}
switch (preamble) { case HTT_RX_LEGACY: /* To get legacy rate index band is required. Since band can't * be undefined check if freq is non-zero.
*/ if (!status->freq) return;
status->rate_idx = mcs;
status->encoding = RX_ENC_HT; if (sgi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (bw)
status->bw = RATE_INFO_BW_40; break; case HTT_RX_VHT: case HTT_RX_VHT_WITH_TXBF: /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 * TODO check this
*/
bw = info2 & 3;
sgi = info3 & 1;
stbc = (info2 >> 3) & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
nsts_su = ((info2 >> 10) & 0x07); if (stbc)
nss = (nsts_su >> 2) + 1; else
nss = (nsts_su + 1);
} else { /* Hardware doesn't decode VHT-SIG-B into Rx descriptor * so it's impossible to decode MCS. Also since * firmware consumes Group Id Management frames host * has no knowledge regarding group/user position * mapping so it's impossible to pick the correct Nsts * from VHT-SIG-A1. * * Bandwidth and SGI are valid so report the rateinfo * on best-effort basis.
*/
mcs = 0;
nss = 1;
}
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This * means all prior MSDUs in a PPDU are reported to mac80211 without the * TSF. Is it worth holding frames until end of PPDU is known? * * FIXME: Can we get/compute 64bit TSF?
*/
status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
if (is_first_ppdu) { /* New PPDU starts so clear out the old per-PPDU status. */
status->freq = 0;
status->rate_idx = 0;
status->nss = 0;
status->encoding = RX_ENC_LEGACY;
status->bw = RATE_INFO_BW_20;
/* Delivered decapped frame: * [802.11 header] * [crypto param] <-- can be trimmed if !fcs_err && * !decrypt_err && !peer_idx_invalid * [amsdu header] <-- only if A-MSDU * [rfc1042/llc] * [payload] * [FCS] <-- at end, needs to be trimmed
*/
/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when * deaggregate, so that unwanted MSDU-deaggregation is avoided for * error packets. If limit exceeds, hw sends all remaining MSDUs as * a single last MSDU with this msdu limit error set.
*/
msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU * without first MSDU is expected in that case, and handled later here.
*/ /* This probably shouldn't happen but warn just in case */ if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) return;
/* This probably shouldn't happen but warn just in case */ if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) return;
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
if (crypto_len)
memcpy(skb_push(msdu, crypto_len),
(void *)hdr + round_up(hdr_len, bytes_aligned),
crypto_len);
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
/* In most cases this will be true for sniffed frames. It makes sense * to deliver them as-is without stripping the crypto param. This is * necessary for software based decryption. * * If there's no error then the frame is decrypted. At least that is * the case for frames that come in via fragmented rx indication.
*/ if (!is_decrypted) return;
/* The payload is decrypted so strip crypto params. Start from tail * since hdr is used to compute some stuff.
*/
/* Delivered decapped frame: * [nwifi 802.11 header] <-- replaced with 802.11 hdr * [rfc1042/llc] * * Note: The nwifi header doesn't have QoS Control and is * (always?) a 3addr frame. * * Note2: There's no A-MSDU subframe header. Even if it's part * of an A-MSDU.
*/
/* pull decapped header and copy SA & DA */
rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
hw->rx_desc_ops->rx_desc_size);
/* original 802.11 header has a different DA and in * case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
/* original 802.11 header has a different DA and in * case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
if (!is_ip4 && !is_ip6) return CHECKSUM_NONE; if (!is_tcp && !is_udp) return CHECKSUM_NONE; if (!ip_csum_ok) return CHECKSUM_NONE; if (!tcpudp_csum_ok) return CHECKSUM_NONE;
peer = ath10k_peer_find_by_id(ar, peer_id); if (!peer) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); returnfalse;
}
hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr); else
tid = ATH10K_TXRX_NON_QOS_TID;
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 * decapped header. It'll be used for undecapping of each MSDU.
*/
hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
/* Each A-MSDU subframe will use the original header as the base and be * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/
hdr = (void *)first_hdr;
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)last->data - hw->rx_desc_ops->rx_desc_size);
/* Note: If hardware captures an encrypted frame that it can't decrypt, * e.g. due to fcs error, missing peer or invalid key data it will * report the frame as raw.
*/
is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
!has_fcs_err &&
!has_crypto_err &&
!has_peer_idx_invalid);
/* Firmware reports all necessary management frames via WMI already. * They are not reported to monitor interfaces at all so pass the ones * coming via HTT to monitor interfaces instead. This simplifies * matters a lot.
*/ if (is_mgmt)
status->flag |= RX_FLAG_ONLY_MONITOR;
if (is_decrypted) {
status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt))
status->flag |= RX_FLAG_MMIC_STRIPPED;
/* Undecapping involves copying the original 802.11 header back * to sk_buff. If frame is protected and hardware has decrypted * it then remove the protected bit.
*/ if (!is_decrypted) continue; if (is_mgmt) continue;
staticint ath10k_unchain_msdu(struct sk_buff_head *amsdu, unsignedlong *unchain_cnt)
{ struct sk_buff *skb, *first; int space; int total_len = 0; int amsdu_len = skb_queue_len(amsdu);
/* TODO: Might could optimize this by using * skb_try_coalesce or similar method to * decrease copying, or maybe get mac80211 to * provide a way to just receive a list of * skb?
*/
first = __skb_dequeue(amsdu);
/* Allocate total length all at once. */
skb_queue_walk(amsdu, skb)
total_len += skb->len;
space = total_len - skb_tailroom(first); if ((space > 0) &&
(pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { /* TODO: bump some rx-oom error stat */ /* put it back together so we can free the * whole list at once.
*/
__skb_queue_head(amsdu, first); return -1;
}
/* Walk list again, copying contents into * msdu_head
*/ while ((skb = __skb_dequeue(amsdu))) {
skb_copy_from_linear_data(skb, skb_put(first, skb->len),
skb->len);
dev_kfree_skb_any(skb);
}
/* FIXME: Current unchaining logic can only handle simple case of raw * msdu chaining. If decapping is other than raw the chaining may be * more complex and this isn't handled by the current code. Don't even * try re-constructing such frames - it'll be pretty much garbage.
*/ if (decap != RX_MSDU_DECAP_RAW ||
skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu); return;
}
/* Validate if the amsdu has a proper first subframe. * There are chances a single msdu can be received as amsdu when * the unauthenticated amsdu flag of a QoS header * gets flipped in non-SPP AMSDU's, in such cases the first * subframe has llc/snap header in place of a valid da. * return false if the da matches rfc1042 pattern
*/ if (ether_addr_equal(subframe_hdr, rfc1042_header)) returnfalse;
spin_lock_bh(&htt->rx_ring.lock); if (htt->rx_confused) {
spin_unlock_bh(&htt->rx_ring.lock); return -EIO;
}
ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
spin_unlock_bh(&htt->rx_ring.lock);
if (ret < 0) {
ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
__skb_queue_purge(&amsdu); /* FIXME: It's probably a good idea to reboot the * device instead of leaving it inoperable.
*/
htt->rx_confused = true; return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.