/* This function checks if particular RA list has packets more than low bridge * packet threshold and then deletes packet from this RA list. * Function deletes packets from such RA list and returns true. If no such list * is found, false is returned.
*/ staticbool
mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv, struct list_head *ra_list_head, int tid)
{ struct mwifiex_ra_list_tbl *ra_list; struct sk_buff *skb, *tmp; bool pkt_deleted = false; struct mwifiex_txinfo *tx_info; struct mwifiex_adapter *adapter = priv->adapter;
list_for_each_entry(ra_list, ra_list_head, list) { if (skb_queue_empty(&ra_list->skb_head)) continue;
/* This function deletes packets from particular RA List. RA list index * from which packets are deleted is preserved so that packets from next RA * list are deleted upon subsequent call thus maintaining fairness.
*/ staticvoid mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
{ struct list_head *ra_list; int i;
spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) { if (priv->del_list_idx == MAX_NUM_TID)
priv->del_list_idx = 0;
ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list; if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) {
priv->del_list_idx++; break;
}
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { /* Replace the 803 header and rfc1042 header (llc/snap) with * an Ethernet II header, keep the src/dst and snap_type * (ethertype). * * The firmware only passes up SNAP frames converting all RX * data from 802.11 to 802.2/LLC/SNAP frames. * * To create the Ethernet II, just move the src, dst address * right before the snap_type.
*/
p_ethhdr = (struct ethhdr *)
((u8 *)(&rx_pkt_hdr->eth803_hdr)
+ sizeof(rx_pkt_hdr->eth803_hdr)
+ sizeof(rx_pkt_hdr->rfc1042_hdr)
- sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
- sizeof(rx_pkt_hdr->eth803_hdr.h_source)
- sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, sizeof(p_ethhdr->h_source));
memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, sizeof(p_ethhdr->h_dest)); /* Chop off the rxpd + the excess memory from * 802.2/llc/snap header that was removed.
*/
hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
} else { /* Chop off the rxpd */
hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
}
/* Chop off the leading header bytes so that it points * to the start of either the reconstructed EthII frame * or the 802.2/llc/snap frame.
*/
skb_pull(skb, hdr_chop);
if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) { /* Update bridge packet statistics as the * packet is not going to kernel/upper layer.
*/
priv->stats.rx_bytes += skb->len;
priv->stats.rx_packets++;
/* Sending bridge packet to TX queue, so save the packet * length in TXCB to update statistics in TX complete.
*/
tx_info->pkt_len = skb->len;
}
__net_timestamp(skb);
index = mwifiex_1d_to_wmm_queue[skb->priority];
atomic_inc(&priv->wmm_tx_pending[index]);
mwifiex_wmm_add_buf_txqueue(priv, skb);
atomic_inc(&adapter->tx_pending);
atomic_inc(&adapter->pending_bridged_pkts);
mwifiex_queue_main_work(priv->adapter);
return;
}
/* * This function contains logic for AP packet forwarding. * * If a packet is multicast/broadcast, it is sent to kernel/upper layer * as well as queued back to AP TX queue so that it can be sent to other * associated stations. * If a packet is unicast and RA is present in associated station list, * it is again requeued into AP TX queue. * If a packet is unicast and RA is not in associated station list, * packet is forwarded to kernel to handle routing logic.
*/ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, struct sk_buff *skb)
{ struct mwifiex_adapter *adapter = priv->adapter; struct uap_rxpd *uap_rx_pd; struct rx_packet_hdr *rx_pkt_hdr;
u8 ra[ETH_ALEN]; struct sk_buff *skb_uap;
/* don't do packet forwarding in disconnected state */ if (!priv->media_connected) {
mwifiex_dbg(adapter, ERROR, "drop packet in disconnected state.\n");
dev_kfree_skb_any(skb); return 0;
}
/* This is required only in case of 11n and USB/PCIE as we alloc * a buffer of 4K only if its 11N (to be able to receive 4K * AMSDU packets). In case of SD we allocate buffers based * on the size of packet and hence this is not needed. * * Modifying the truesize here as our allocation for each * skb is 4K but we only receive 2K packets and this cause * the kernel to start dropping packets in case where * application has allocated buffer based on 2K size i.e. * if there a 64K packet received (in IP fragments and * application allocates 64K to receive this packet but * this packet would almost double up because we allocate * each 1.5K fragment in 4K and pass it up. As soon as the * 64K limit hits kernel will start to drop rest of the * fragments. Currently we fail the Filesndl-ht.scr script * for UDP, hence this fix
*/ if ((adapter->iface_type == MWIFIEX_USB ||
adapter->iface_type == MWIFIEX_PCIE) &&
skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* * This function processes the packet received on AP interface. * * The function looks into the RxPD and performs sanity tests on the * received buffer to ensure its a valid packet before processing it * further. If the packet is determined to be aggregated, it is * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic. * * The completion callback is called after processing is complete.
*/ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, struct sk_buff *skb)
{ struct mwifiex_adapter *adapter = priv->adapter; int ret; struct uap_rxpd *uap_rx_pd; struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type; struct mwifiex_sta_node *node;
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb); return ret;
}
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
uap_rx_pd->priority, ta, pkt_type,
skb);
if (ret || (rx_pkt_type == PKT_TYPE_BAR))
dev_kfree_skb_any(skb);
if (ret)
priv->stats.rx_dropped++;
return ret;
}
/* * This function fills the TxPD for AP tx packets. * * The Tx buffer received by this function should already have the * header space allocated for TxPD. * * This function inserts the TxPD in between interface header and actual * data and adjusts the buffer pointers accordingly. * * The following TxPD fields are set by this function, as required - * - BSS number * - Tx packet length and offset * - Priority * - Packet delay * - Priority specific Tx control * - Flags
*/ void mwifiex_process_uap_txpd(struct mwifiex_private *priv, struct sk_buff *skb)
{ struct mwifiex_adapter *adapter = priv->adapter; struct uap_txpd *txpd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); int pad;
u16 pkt_type, pkt_offset; int hroom = adapter->intf_hdr_len;
if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) /* * Set the priority specific tx_control field, setting of 0 will * cause the default value to be used later in this function.
*/
txpd->tx_control =
cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
/* Offset of actual data */
pkt_offset = sizeof(*txpd) + pad; if (pkt_type == PKT_TYPE_MGMT) { /* Set the packet type and add header for management frame */
txpd->tx_pkt_type = cpu_to_le16(pkt_type);
pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
}
txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
/* make space for adapter->intf_hdr_len */
skb_push(skb, hroom);
if (!txpd->tx_control) /* TxCtrl set by user or default */
txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.37 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.