void wfx_tx_queues_init(struct wfx_vif *wvif)
{ /* The device is in charge to respect the details of the QoS parameters. The driver just * ensure that it roughtly respect the priorities to avoid any shortage.
*/ constint priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 }; int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wvif->tx_queue[i].normal);
skb_queue_head_init(&wvif->tx_queue[i].cab);
skb_queue_head_init(&wvif->tx_queue[i].offchan);
wvif->tx_queue[i].priority = priorities[i];
}
}
void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{ int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
}
}
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
wvif = wfx_skb_wvif(wdev, skb); if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_queue_head(dropped, skb);
}
}
if (vif->type != NL80211_IFTYPE_AP) returnfalse; for (i = 0; i < IEEE80211_NUM_ACS; ++i) /* Note: since only AP can have mcast frames in queue and only one vif can be AP, * all queued frames has same interface id
*/ if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) returntrue; returnfalse;
}
/* sort the queues */
wvif = NULL; while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
WARN_ON(num_queues >= ARRAY_SIZE(queues));
queues[num_queues] = &wvif->tx_queue[i]; for (j = num_queues; j > 0; j--) if (wfx_tx_queue_get_weight(queues[j]) <
wfx_tx_queue_get_weight(queues[j - 1]))
swap(queues[j - 1], queues[j]);
num_queues++;
}
}
wvif = NULL; while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->offchan); if (!skb) continue;
hif = (struct wfx_hif_msg *)skb->data; /* Offchan frames are assigned to a special interface. * The only interface allowed to send data during scan.
*/
WARN_ON(hif->interface != 2);
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]); return skb;
}
}
if (mutex_is_locked(&wdev->scan_lock)) return NULL;
wvif = NULL; while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { if (!wvif->after_dtim_tx_allowed) continue; for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->cab); if (!skb) continue; /* Note: since only AP can have mcast frames in queue and only one vif can * be AP, all queued frames has same interface id
*/
hif = (struct wfx_hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]); return skb;
} /* No more multicast to sent */
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->normal); if (skb) {
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]); return skb;
}
} return NULL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.