// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* Let the default ring selection be based on current processor * number, where one of the 3 tcl rings are selected based on * the smp_processor_id(). In case that ring * is full/busy, we resort to other available rings. * If all rings are full, we drop the packet. * TODO: Add throttling logic when all rings are full
*/
ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
if (gsn_valid) { /* Reset and Initialize meta_data_flags with Global Sequence * Number (GSN) info.
*/
ti.meta_data_flags =
u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
HTT_TCL_META_DATA_TYPE) |
u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
}
switch (ti.encap_type) { case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
is_null_frame = ieee80211_is_nullfunc(hdr->frame_control); if (ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) { if (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null_frame)
is_diff_encap = true;
/* Firmware expects msdu ext descriptor for nwifi/raw packets * received in ETH mode. Without this, observed tx fail for * Multicast packets in ETH mode.
*/
msdu_ext_desc = true;
} else {
ath12k_dp_tx_encap_nwifi(skb);
} break; case HAL_TCL_ENCAP_TYPE_RAW: if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
ret = -EINVAL; goto fail_remove_tx_buf;
} break; case HAL_TCL_ENCAP_TYPE_ETHERNET: /* no need to encap */ break; case HAL_TCL_ENCAP_TYPE_802_3: default: /* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->device_stats.tx_err.misc_fail); goto fail_remove_tx_buf;
}
if (iova_mask &&
(unsignedlong)skb->data & iova_mask) {
ret = ath12k_dp_tx_align_payload(ab, &skb); if (ret) {
ath12k_warn(ab, "failed to align TX buffer %d\n", ret); /* don't bail out, give original buffer * a chance even unaligned.
*/ goto map;
}
/* hdr is pointing to a wrong place after alignment, * so refresh it for later use.
*/
hdr = (void *)skb->data;
}
map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM; goto fail_remove_tx_buf;
}
if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
!(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
!(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
ieee80211_has_protected(hdr->frame_control)) ||
is_diff_encap) { /* Firmware is not expecting meta data for qos null * nwifi packet received in ETH encap mode.
*/ if (is_null_frame && msdu_ext_desc) goto skip_htt_meta;
/* Add metadata for sw encrypted vlan group traffic * and EAPOL nwifi packet received in ETH encap mode.
*/
add_htt_metadata = true;
msdu_ext_desc = true;
ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
skip_htt_meta:
ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
if (add_htt_metadata) {
ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc); if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX, "Failed to add HTT meta data, dropping packet\n"); goto fail_free_ext_skb;
}
}
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr); if (ret) goto fail_free_ext_skb;
hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring); if (!hal_tcl_desc) { /* NOTE: It is highly unlikely we'll be running out of tcl_ring * desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
ab->device_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
/* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet.
*/ if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
ab->hw_params->tcl_ring_retry) {
tcl_ring_retry = true;
ring_selector++;
}
ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts, peer_id); break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params); break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY: /* This event is to be handled only when the driver decides to * use WDS offload functionality.
*/ break; default:
ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status); break;
}
}
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, ts->peer_id); if (!peer || !peer->sta) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX, "failed to find the peer by id %u\n", ts->peer_id);
spin_unlock_bh(&ab->base_lock); return;
}
sta = peer->sta;
ahsta = ath12k_sta_to_ahsta(sta);
arsta = &ahsta->deflink;
/* This is to prefer choose the real NSS value arsta->last_txrate.nss, * if it is invalid, then choose the NSS value while assoc.
*/ if (arsta->last_txrate.nss)
txrate.nss = arsta->last_txrate.nss; else
txrate.nss = arsta->peer_nss;
spin_unlock_bh(&ab->base_lock);
switch (ts->pkt_type) { case HAL_TX_RATE_STATS_PKT_TYPE_11A: case HAL_TX_RATE_STATS_PKT_TYPE_11B:
ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
ts->pkt_type,
&rate_idx,
&rate); if (ret < 0) {
ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret); return;
}
txrate.legacy = rate; break; case HAL_TX_RATE_STATS_PKT_TYPE_11N: if (ts->mcs > ATH12K_HT_MCS_MAX) {
ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs); return;
}
if (ts->sgi)
txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case HAL_TX_RATE_STATS_PKT_TYPE_11AX: if (ts->mcs > ATH12K_HE_MCS_MAX) {
ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs); return;
}
txrate.mcs = ts->mcs;
txrate.flags = RATE_INFO_FLAGS_HE_MCS;
txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi); break; case HAL_TX_RATE_STATS_PKT_TYPE_11BE: if (ts->mcs > ATH12K_EHT_MCS_MAX) {
ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs); return;
}
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
ieee80211_free_txskb(ah->hw, msdu); gotoexit;
}
if (!skb_cb->vif) {
ieee80211_free_txskb(ah->hw, msdu); gotoexit;
}
vif = skb_cb->vif; if (vif) {
ahvif = ath12k_vif_to_ahvif(vif);
arvif = rcu_dereference(ahvif->link[skb_cb->link_id]); if (arvif) {
spin_lock_bh(&arvif->link_stats_lock);
arvif->link_stats.tx_completed++;
spin_unlock_bh(&arvif->link_stats_lock);
}
}
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
switch (ts->status) { case HAL_WBM_TQM_REL_REASON_FRAME_ACKED: if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts->ack_rssi;
if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ab->wmi_ab.svc_map)) {
spin_lock_bh(&ar->data_lock);
noise_floor = ath12k_pdev_get_noise_floor(ar);
spin_unlock_bh(&ar->data_lock);
info->status.ack_signal += noise_floor;
}
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} break; case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; break;
}
fallthrough; case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: /* The failure status is due to internal firmware tx failure * hence drop the frame; do not update the status of frame to * the upper layer
*/
ieee80211_free_txskb(ah->hw, msdu); gotoexit; default:
ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
ts->status); break;
}
/* NOTE: Tx rate status reporting. Tx completion status does not have * necessary information (for example nss) to build the tx rate. * Might end up reporting it out-of-band from HTT stats.
*/
ath12k_dp_tx_update_txcompl(ar, ts);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, ts->peer_id); if (!peer || !peer->sta) {
ath12k_err(ab, "dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu); gotoexit;
}
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
(ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) ==
tx_ring->tx_status_tail)) { /* TODO: Process pending tx_status messages when kfifo_is_full() */
ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
ret = ath12k_htc_send(&ab->htc, dp->eid, skb); if (ret) {
dev_kfree_skb_any(skb); return ret;
}
ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ); if (ret == 0) {
ath12k_warn(ab, "htt target version request timed out\n"); return -ETIMEDOUT;
}
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); return -EOPNOTSUPP;
}
return 0;
}
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
{ struct ath12k_base *ab = ar->ab; struct ath12k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ppdu_stats_cfg_cmd *cmd; int len = sizeof(*cmd);
u8 pdev_mask; int ret; int i;
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM;
if (tlv_filter->rx_mpdu_start_wmask > 0 &&
tlv_filter->rx_msdu_end_wmask > 0) {
cmd->info2 |=
le32_encode_bits(true,
HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
cmd->rx_mpdu_start_end_mask =
le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK); /* mpdu_end is not used for any hardwares so far * please assign it in future if any chip is * using through hal ops
*/
cmd->rx_mpdu_start_end_mask |=
le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
cmd->rx_msdu_end_word_mask =
le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
}
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, struct htt_ext_stats_cfg_params *cfg_params,
u64 cookie)
{ struct ath12k_base *ab = ar->ab; struct ath12k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ext_stats_cfg_cmd *cmd; int len = sizeof(*cmd); int ret;
u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM;
if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
}
if (ab->hw_params->rxdma1_enable) { for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_DST,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter); if (ret) {
ath12k_err(ab, "failed to setup filter for monitor buf %d\n",
ret); return ret;
}
} return 0;
}
if (!reset) { for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
i,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter); if (ret) {
ath12k_err(ab, "failed to setup filter for mon rx buf %d\n",
ret); return ret;
}
}
}
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
}
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
i,
HAL_RXDMA_MONITOR_STATUS,
RX_MON_STATUS_BUF_SIZE,
&tlv_filter); if (ret) {
ath12k_err(ab, "failed to setup filter for mon status buf %d\n",
ret); return ret;
}
}
return 0;
}
int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type, int tx_buf_size, struct htt_tx_ring_tlv_filter *htt_tlv_filter)
{ struct htt_tx_ring_selection_cfg_cmd *cmd; struct hal_srng *srng = &ab->hal.srng_list[ring_id]; struct hal_srng_params params; struct sk_buff *skb; int len = sizeof(*cmd); enum htt_srng_ring_type htt_ring_type; enum htt_srng_ring_id htt_ring_id; int ret;
skb = ath12k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.