/* Max number of entries (packets to complete) to update the hwtail of tx * status ring. Should be power of 2
*/ #define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128 #define WIL_EDMA_MAX_DATA_OFFSET (2) /* RX buffer size must be aligned to 4 bytes */ #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048) #define MAX_INVALID_BUFF_ID_RETRY (3)
if (sz == 0) {
wil_err(wil, "Cannot allocate a zero size status ring\n"); return -EINVAL;
}
sring->swhead = 0;
/* Status messages are allocated and initialized to 0. This is necessary * since DR bit should be initialized to 0.
*/
sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); if (!sring->va) return -ENOMEM;
if (unlikely(list_empty(free))) {
wil->rx_buff_mgmt.free_list_empty_cnt++; return -EAGAIN;
}
skb = dev_alloc_skb(sz); if (unlikely(!skb)) return -ENOMEM;
skb_put(skb, sz);
/** * Make sure that the network stack calculates checksum for packets * which failed the HW checksum calculation
*/
skb->ip_summed = CHECKSUM_NONE;
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb); return -ENOMEM;
}
/* Get the buffer ID - the index of the rx buffer in the buff_arr */
rx_buff = list_first_entry(free, struct wil_rx_buff, list);
buff_id = rx_buff->id;
/* Move a buffer from the free list to the active list */
list_move(&rx_buff->list, active);
/* Move all the buffers to the free list in case active list is * not empty in order to release all SKBs before deleting the array
*/
wil_move_all_rx_buff_to_free_list(wil, ring);
/* Set list heads */
INIT_LIST_HEAD(active);
INIT_LIST_HEAD(free);
/* Linkify the list. * buffer id 0 should not be used (marks invalid id).
*/
buff_arr = wil->rx_buff_mgmt.buff_arr; for (i = 1; i <= size; i++) {
list_add(&buff_arr[i].list, free);
buff_arr[i].id = i;
}
staticvoid wil_get_reorder_params_edma(struct wil6210_priv *wil, struct sk_buff *skb, int *tid, int *cid, int *mid, u16 *seq, int *mcast, int *retry)
{ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
/* In HW reorder, HW is responsible for crypto check */ if (wil->use_rx_hw_reordering) return 0;
st = wil_skb_rxstatus(skb);
cid = wil_rx_status_get_cid(st);
tid = wil_rx_status_get_tid(st);
key_id = wil_rx_status_get_key_id(st);
mc = wil_rx_status_get_mcast(st);
s = &wil->sta[cid];
c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
cc = &c->key_id[key_id];
pn = (u8 *)&st->ext.pn;
/* In SW reorder one must use extended status messages */ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
wil_err(wil, "compressed RX status cannot be used with SW reorder\n"); return -EINVAL;
} if (wil->rx_status_ring_order <= desc_ring_order) /* make sure sring is larger than desc ring */
wil->rx_status_ring_order = desc_ring_order + 1; if (wil->rx_buff_id_count <= desc_ring_size) /* make sure we will not run out of buff_ids */
wil->rx_buff_id_count = desc_ring_size + 512; if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
/* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
wil->num_rx_status_rings);
rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len); if (rc) return rc;
/* Allocate status ring */ for (i = 0; i < wil->num_rx_status_rings; i++) { int sring_id = wil_find_free_sring(wil);
staticint wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id, int cid, int tid)
{ struct wil6210_priv *wil = vif_to_wil(vif);
wil_err(wil, "ring modify is not supported for EDMA\n");
return -EOPNOTSUPP;
}
/* This function is used only for RX SW reorder */ staticint wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, struct sk_buff *skb, struct wil_net_stats *stats)
{
u8 ftype;
u8 fc1; int mid; int tid;
u16 seq; struct wil6210_vif *vif;
ftype = wil_rx_status_get_frame_type(wil, msg); if (ftype == IEEE80211_FTYPE_DATA) return 0;
/* Extract the SKB from the rx_buff management array */
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; if (!skb) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
wil_rx_status_reset_buff_id(sring); /* Move the buffer from the active list to the free list */
list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
&wil->rx_buff_mgmt.free);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++; goto again;
}
skipping: /* skipping indicates if a certain SKB should be dropped. * It is set in case there is an error on the current SKB or in case * of RX chaining: as long as we manage to merge the SKBs it will * be false. once we have a bad SKB or we don't manage to merge SKBs * it will be set to the !EOP value of the current SKB. * This guarantees that all the following SKBs until EOP will also * get dropped.
*/ if (unlikely(rxdata->skipping)) {
kfree_skb(skb); if (rxdata->skb) {
kfree_skb(rxdata->skb);
rxdata->skb = NULL;
}
rxdata->skipping = !eop; goto again;
}
/* Compensate for the HW data alignment according to the status * message
*/
data_offset = wil_rx_status_get_data_offset(msg); if (data_offset == 0xFF ||
data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
wil_err(wil, "Unexpected data offset %d\n", data_offset);
kfree_skb(skb); goto again;
}
if (unlikely(!ring->va)) {
wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return;
}
wil_dbg_txrx(wil, "rx_handle\n");
for (i = 0; i < wil->num_rx_status_rings; i++) {
sring = &wil->srings[i]; if (unlikely(!sring->va)) {
wil_err(wil, "Rx IRQ while Rx status ring %d not yet initialized\n",
i); continue;
}
while ((*quota > 0) &&
(NULL != (skb =
wil_sring_reap_rx_edma(wil, sring)))) {
(*quota)--; if (wil->use_rx_hw_reordering) { void *msg = wil_skb_rxstatus(skb); int mid = wil_rx_status_get_mid(msg); struct wil6210_vif *vif = wil->vifs[mid];
*dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS; /* make sure dr_bit is read before the rest of status msg */
rmb();
*msg = *_msg;
}
/* Clean up transmitted skb's from the Tx descriptor RING. * Return number of descriptors cleared.
*/ int wil_tx_sring_handler(struct wil6210_priv *wil, struct wil_status_ring *sring)
{ struct net_device *ndev; struct device *dev = wil_to_dev(wil); struct wil_ring *ring = NULL; struct wil_ring_tx_data *txdata; /* Total number of completed descriptors in all descriptor rings */ int desc_cnt = 0; int cid; struct wil_net_stats *stats; struct wil_tx_enhanced_desc *_d; unsignedint ring_id; unsignedint num_descs, num_statuses = 0; int i;
u8 dr_bit; /* Descriptor Ready bit */ struct wil_ring_tx_status msg; struct wil6210_vif *vif; int used_before_complete; int used_new;
wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
/* Process completion messages while DR bit has the expected polarity */ while (dr_bit == sring->desc_rdy_pol) {
num_descs = msg.num_descriptors; if (!num_descs) {
wil_err(wil, "invalid num_descs 0\n"); goto again;
}
/* Find the corresponding descriptor ring */
ring_id = msg.ring_id;
if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
wil_err(wil, "invalid ring id %d\n", ring_id); goto again;
}
ring = &wil->ring_tx[ring_id]; if (unlikely(!ring->va)) {
wil_err(wil, "Tx irq[%d]: ring not initialized\n",
ring_id); goto again;
}
txdata = &wil->ring_tx_data[ring_id]; if (unlikely(!txdata->enabled)) {
wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id); goto again;
}
vif = wil->vifs[txdata->mid]; if (unlikely(!vif)) {
wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
txdata->mid, ring_id); goto again;
}
if (skb->protocol == cpu_to_be16(ETH_P_PAE))
wil_tx_complete_handle_eapol(vif, skb);
wil_consume_skb(skb, msg.status == 0);
}
memset(ctx, 0, sizeof(*ctx)); /* Make sure the ctx is zeroed before updating the tail * to prevent a case where wil_tx_ring will see * this descriptor as used and handle it before ctx zero * is completed.
*/
wmb();
/* hold reference to skb * to prevent skb release before accounting * in case of immediate "tx done"
*/ if (tso_desc_type == wil_tso_type_lst)
ring->ctx[i].skb = skb_get(skb);
staticint __wil_tx_ring_tso_edma(struct wil6210_priv *wil, struct wil6210_vif *vif, struct wil_ring *ring, struct sk_buff *skb)
{ int ring_index = ring - wil->ring_tx; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; int nr_frags = skb_shinfo(skb)->nr_frags; int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */ int used, avail = wil_ring_avail_tx(ring); int f, hdrlen, headlen; int gso_type; bool is_ipv4;
u32 swhead = ring->swhead; int descs_used = 0; /* total number of used descriptors */ int rc = -EINVAL; int tcp_hdr_len; int skb_net_hdr_len; int mss = skb_shinfo(skb)->gso_size;
wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
ring_index);
if (unlikely(!txdata->enabled)) return -EINVAL;
if (unlikely(avail < min_desc_required)) {
wil_err_ratelimited(wil, "TSO: Tx ring[%2d] full. No space for %d fragments\n",
ring_index, min_desc_required); return -ENOMEM;
}
if (skb->ip_summed != CHECKSUM_PARTIAL) return -EINVAL;
/* tcp header length and skb network header length are fixed for all * packet's descriptors - read them once here
*/
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
/* First descriptor must contain the header only * Header Length = MAC header len + IP header len + TCP header len
*/
hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
hdrlen);
rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
wil_tso_type_hdr, NULL, ring, skb,
is_ipv4, tcp_hdr_len, skb_net_hdr_len,
mss, &descs_used); if (rc) return -EINVAL;
/* Rest of the descriptors are from the SKB fragments */ for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
len, descs_used);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.