/* Toggles between TX antennas. Receives the bitmask of valid TX antennas and * the *index* used for the last TX, and returns the next valid *index* to use. * In order to set it in the tx_cmd, must do BIT(idx).
*/ static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
{
u8 index = last_idx;
for (int i = 0; i < MAX_ANT_NUM; i++) {
index = (index + 1) % MAX_ANT_NUM; if (valid & BIT(index)) return index;
}
WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
if (link_sta->he_cap.has_he)
max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
}
return max_size;
}
staticint iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
{
u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta); /* We can't know when the station is asleep or awake, so we * must disable the queue hang detection.
*/ unsignedint watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
IWL_WATCHDOG_DISABLED :
mld->trans->mac_cfg->base->wd_timeout; int queue, size;
local_bh_disable();
spin_lock(&mld->add_txqs_lock);
list_del_init(&mld_txq->list);
spin_unlock(&mld->add_txqs_lock); /* If the queue allocation failed, we can't transmit. Leave the * frames on the txq, maybe the attempt to allocate the queue * will succeed.
*/ if (!failed)
iwl_mld_tx_from_txq(mld, txq);
local_bh_enable();
}
}
/* Do not compute checksum if already computed */ if (skb->ip_summed != CHECKSUM_PARTIAL) goto out;
/* We do not expect to be requested to csum stuff we do not support */
/* TBD: do we also need to check * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all * the devices we support has this flags?
*/ if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6), "No support for requested checksum\n")) {
skb_checksum_help(skb); goto out;
}
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
protocol = hp->nexthdr;
off += ipv6_optlen(hp);
} /* if we get here - protocol now should be TCP/UDP */ #endif
}
/* Set offset to IP header (snap). * We don't support tunneling so no need to take care of inner header. * Size is in words.
*/
offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ if (skb->protocol == htons(ETH_P_IP) && amsdu) {
ip_hdr(skb)->check = 0;
offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
}
if (amsdu)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); elseif (ieee80211_hdrlen(hdr->frame_control) % 4) /* padding is inserted later in transport */
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
if (link_id == IEEE80211_LINK_UNSPECIFIED &&
ieee80211_vif_is_mld(vif)) { /* shouldn't do this when >1 link is active */
WARN_ON(hweight16(vif->active_links) != 1);
link_id = __ffs(vif->active_links);
}
if (link_id < IEEE80211_LINK_UNSPECIFIED) { struct ieee80211_bss_conf *link_conf;
rcu_read_lock();
link_conf = rcu_dereference(vif->link_conf[link_id]); if (link_conf) {
*basic_rates = link_conf->basic_rates; if (link_conf->chanreq.oper.chan)
*band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
}
/* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = iwl_mld_get_lowest_rate(mld, info,
info->control.vif);
WARN_ON_ONCE(rate_idx < 0);
/* Set CCK or OFDM flag */ if (rate_idx <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_MOD_TYPE_CCK; else
rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
/* Legacy rates are indexed: * 0 - 3 for CCK and 0 - 7 for OFDM
*/
rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
result = RATE_MCS_MOD_TYPE_VHT;
result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
result |= RATE_MCS_CHAN_WIDTH_40; elseif (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
result |= RATE_MCS_CHAN_WIDTH_80; elseif (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
result |= RATE_MCS_CHAN_WIDTH_160;
} elseif (rate->flags & IEEE80211_TX_RC_MCS) { /* only MCS 0-15 are supported */
u8 mcs = rate->idx & 7;
u8 nss = rate->idx > 7;
result = RATE_MCS_MOD_TYPE_HT;
result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
result |= RATE_MCS_SGI_MSK; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
result |= RATE_MCS_CHAN_WIDTH_40; if (info->flags & IEEE80211_TX_CTL_LDPC)
result |= RATE_MCS_LDPC_MSK; if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
} else {
result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
}
if (info->control.antennas)
result |= u32_encode_bits(info->control.antennas,
RATE_MCS_ANT_AB_MSK); else
result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
if (!amsdu || !skb_is_gso(skb)) return;
/* As described in IEEE sta 802.11-2020, table 9-30 (Address * field contents), A-MSDU address 3 should contain the BSSID * address. * * In TSO, the skb header address 3 contains the original address 3 to * correctly create all the A-MSDU subframes headers from it. * Override now the address 3 in the command header with the BSSID. * * Note: we fill in the MLD address, but the firmware will do the * necessary translation to link address after encryption.
*/
vif = info->control.vif; switch (vif->type) { case NL80211_IFTYPE_STATION:
ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr); break; case NL80211_IFTYPE_AP:
ether_addr_copy(tx_cmd->hdr->addr3, vif->addr); break; default: break;
}
}
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/* For data and mgmt packets rate info comes from the fw. * Only set rate/antenna for injected frames with fixed rate, or * when no sta is given.
*/ if (unlikely(!sta ||
info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
hdr->frame_control);
} elseif (!ieee80211_is_data(hdr->frame_control) ||
(mld_sta &&
mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) { /* These are important frames */
flags |= IWL_TX_FLAGS_HIGH_PRI;
}
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len);
tx_cmd->flags = cpu_to_le16(flags);
tx_cmd->rate_n_flags = rate_n_flags;
}
/* Caller of this need to check that info->control.vif is not NULL */ staticstruct iwl_mld_link *
iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
{ struct iwl_mld_vif *mld_vif =
iwl_mld_vif_from_mac80211(info->control.vif);
u32 link_id = u32_get_bits(info->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
if (link_id == IEEE80211_LINK_UNSPECIFIED) { if (info->control.vif->active_links)
link_id = ffs(info->control.vif->active_links) - 1; else
link_id = 0;
}
if (txq && txq->sta) return iwl_mld_txq_from_mac80211(txq)->fw_id;
if (!info->control.vif) return IWL_MLD_INVALID_QUEUE;
switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC:
link = iwl_mld_get_link_from_tx_info(info);
if (WARN_ON(!link)) break;
/* ucast disassociate/deauth frames without a station might * happen, especially with reason 7 ("Class 3 frame received * from nonassociated STA").
*/ if (ieee80211_is_mgmt(fc) &&
(!ieee80211_is_bufferable_mmpdu(skb) ||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return link->bcast_sta.queue_id;
if (is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_order(fc)) return link->mcast_sta.queue_id;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc)); return link->bcast_sta.queue_id; case NL80211_IFTYPE_P2P_DEVICE:
mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control))) return -1;
dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans); if (unlikely(!dev_tx_cmd)) return -1;
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { if (IWL_MLD_NON_TRANSMITTING_AP) return -1;
iwl_mld_probe_resp_set_noa(mld, skb);
}
iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
if (ieee80211_is_data(hdr->frame_control)) { if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr); else
tid = IWL_TID_NON_QOS;
}
IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
tid, queue, skb->len);
/* From now on, we cannot access info->control */
memset(&info->status, 0, sizeof(info->status));
memset(info->driver_data, 0, sizeof(info->driver_data));
info->driver_data[1] = dev_tx_cmd;
if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue)) goto err;
/* Update low-latency counter when a packet is queued instead * of after TX, it makes sense for early low-latency detection
*/ if (sta)
iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
if (!ieee80211_is_data_qos(hdr->frame_control) ||
!sta->cur->max_rc_amsdu_len) return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
/* Do not build AMSDU for IPv6 with extension headers. * Ask stack to segment and checksum the generated MPDUs for us.
*/ if (skb->protocol == htons(ETH_P_IPV6) &&
((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
IPPROTO_TCP) {
netdev_flags &= ~NETIF_F_CSUM_MASK; return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
}
tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL;
max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid]; if (!max_tid_amsdu_len) return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
/* Sub frame header + SNAP + IP header + TCP header + MSS */
subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
pad = (4 - subf_len) & 0x3;
/* If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad.
*/
num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
if (sta->max_amsdu_subframes &&
num_subframes > sta->max_amsdu_subframes)
num_subframes = sta->max_amsdu_subframes;
/* Make sure we have enough TBs for the A-MSDU: * 2 for each subframe * 1 more for each fragment * 1 more for the potential data in the header
*/ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
mld->trans->info.max_skb_frags)
num_subframes = 1;
if (num_subframes > 1)
*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
/* This skb fits in one single A-MSDU */ if (tcp_payload_len <= num_subframes * mss) {
__skb_queue_tail(mpdus_skbs, skb); return 0;
}
/* Trick the segmentation function to make it create SKBs that can fit * into one A-MSDU.
*/ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
}
/* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting * large packets when necessary and transmitting each segment as MPDU.
*/ staticint iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb, struct ieee80211_txq *txq)
{ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct sk_buff *orig_skb = skb; struct sk_buff_head mpdus_skbs; unsignedint payload_len; int ret;
if (payload_len <= skb_shinfo(skb)->gso_size) return iwl_mld_tx_mpdu(mld, skb, txq);
if (!info->control.vif) return -1;
__skb_queue_head_init(&mpdus_skbs);
ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs); if (ret) return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
while (!skb_queue_empty(&mpdus_skbs)) {
skb = __skb_dequeue(&mpdus_skbs);
ret = iwl_mld_tx_mpdu(mld, skb, txq); if (!ret) continue;
/* Free skbs created as part of TSO logic that have not yet * been dequeued
*/
__skb_queue_purge(&mpdus_skbs);
/* skb here is not necessarily same as skb that entered * this method, so free it explicitly.
*/ if (skb == orig_skb)
ieee80211_free_txskb(mld->hw, skb); else
kfree_skb(skb);
/* there was error, but we consumed skb one way or * another, so return 0
*/ return 0;
}
return 0;
} #else staticint iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb, struct ieee80211_txq *txq)
{ /* Impossible to get TSO without CONFIG_INET */
WARN_ON(1);
/* * No need for threads to be pending here, they can leave the first * taker all the work. * * mld_txq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing.
*/ if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2)) return;
rcu_read_lock(); do { while (likely(!mld_txq->status.stop_full) &&
(skb = ieee80211_tx_dequeue(mld->hw, txq)))
iwl_mld_tx_skb(mld, skb, txq);
} while (atomic_dec_return(&mld_txq->tx_request));
IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
txq->sta ? txq->sta->addr : zero_addr, txq->tid);
/* validate the size of the variable part of the notif */ if (IWL_FW_CHECK(mld, notif_size != pkt_len, "Invalid tx_resp notif size (expected=%zu got=%u)\n",
notif_size, pkt_len)) return;
/* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: case TX_STATUS_DIRECT_DONE:
info->flags |= IEEE80211_TX_STAT_ACK; break; default: break;
}
/* If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged * before
*/ if (skb_freed > 1)
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_failure) { enum iwl_fw_ini_time_point tp =
IWL_FW_INI_TIME_POINT_TX_FAILED;
if (ieee80211_is_action(hdr->frame_control))
tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED; elseif (ieee80211_is_mgmt(hdr->frame_control))
mgmt = true;
link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); if (!link_sta) { /* This can happen if the TX cmd was sent before pre_rcu_remove * but the TX response was received after
*/
IWL_DEBUG_TX_REPLY(mld, "Got valid sta_id (%d) but sta is NULL\n",
sta_id); goto out;
}
/* Packet was transmitted successfully, failures come as single * frames because before failing a frame the firmware transmits * it without aggregation at least once.
*/ if (!in_flush)
info->flags |= IEEE80211_TX_STAT_ACK; else
info->flags &= ~IEEE80211_TX_STAT_ACK;
if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id, "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
le16_to_cpu(rsp->sta_id))) {
ret = -EIO; goto free_rsp;
}
num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP, "num_flushed_queues %d\n", num_flushed_queues)) {
ret = -EIO; goto free_rsp;
}
for (int i = 0; i < num_flushed_queues; i++) { struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; int read_after = le16_to_cpu(queue_info->read_after_flush); int txq_id = le16_to_cpu(queue_info->queue_num);
if (IWL_FW_CHECK(mld,
txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), "Invalid txq id %d\n", txq_id)) continue;
if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len, "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
tfd_cnt, pkt_len)) return;
IWL_DEBUG_TX_REPLY(mld, "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
le16_to_cpu(ba_res->txed),
le16_to_cpu(ba_res->done));
for (int i = 0; i < tfd_cnt; i++) { struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; int txq_id = le16_to_cpu(ba_tfd->q_num); int index = le16_to_cpu(ba_tfd->tfd_index);
if (IWL_FW_CHECK(mld,
txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), "Invalid txq id %d\n", txq_id)) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.