spin_lock_bh(&cache->lock); if (list_empty(&cache->free)) {
WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock); return HIF_TX_RETRY_POLICY_INVALID;
}
idx = wfx_tx_policy_find(cache, &wanted); if (idx >= 0) {
*renew = false;
} else { /* If policy is not found create a new one using the oldest entry in "free" list */
*renew = true;
entry = list_entry(cache->free.prev, struct wfx_tx_policy, link);
memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
entry->uploaded = false;
entry->usage_count = 0;
idx = entry - cache->cache;
}
wfx_tx_policy_use(cache, &cache->cache[idx]); if (list_empty(&cache->free))
ieee80211_stop_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock); return idx;
}
staticvoid wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
{ int usage, locked; struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
if (idx == HIF_TX_RETRY_POLICY_INVALID) return;
spin_lock_bh(&cache->lock);
locked = list_empty(&cache->free);
usage = wfx_tx_policy_release(cache, &cache->cache[idx]); if (locked && !usage)
ieee80211_wake_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
}
staticint wfx_tx_policy_upload(struct wfx_vif *wvif)
{ struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache;
u8 tmp_rates[12]; int i, is_used;
do {
spin_lock_bh(&wvif->tx_policy_cache.lock); for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates)); if (!policies[i].uploaded && is_used) break;
} if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
policies[i].uploaded = true;
memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
spin_unlock_bh(&wvif->tx_policy_cache.lock);
wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
} else {
spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
} while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)); return 0;
}
if (sta_priv && sta_priv->link_id) return sta_priv->link_id; if (vif->type != NL80211_IFTYPE_AP) return 0; if (is_multicast_ether_addr(da)) return 0; return HIF_LINK_ID_NOT_ASSOCIATED;
}
staticvoid wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
{ bool has_rate0 = false; int i, j;
for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) { if (rates[j].idx == -1) break; /* The device use the rates in descending order, whatever the request from minstrel. * We have to trade off here. Most important is to respect the primary rate * requested by minstrel. So, we drops the entries with rate higher than the * previous.
*/ if (rates[j].idx >= rates[i - 1].idx) {
rates[i - 1].count += rates[j].count;
rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
} else {
memcpy(rates + i, rates + j, sizeof(rates[i])); if (rates[i].idx == 0)
has_rate0 = true; /* The device apply Short GI only on the first rate */
rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
i++;
}
} /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
rates[i].idx = 0;
rates[i].count = 8; /* == hw->max_rate_tries */
rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
i++;
} for (; i < IEEE80211_TX_MAX_RATES; i++) {
memset(rates + i, 0, sizeof(rates[i]));
rates[i].idx = -1;
}
}
ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); if (ret == HIF_TX_RETRY_POLICY_INVALID)
dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
wfx_tx_lock(wvif->wdev); if (!schedule_work(&wvif->tx_policy_upload_work))
wfx_tx_unlock(wvif->wdev);
} return ret;
}
/* From now tx_info->control is unusable */
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv)); /* Fill tx_priv */
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
tx_priv->vif_id = wvif->id;
/* Fill hif_msg */
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
WARN(offset & 1, "attempt to transmit an unaligned frame");
skb_put(skb, tx_priv->icv_size);
skb_push(skb, wmsg_len);
memset(skb->data, 0, wmsg_len);
hif_msg = (struct wfx_hif_msg *)skb->data;
hif_msg->len = cpu_to_le16(skb->len);
hif_msg->id = HIF_REQ_ID_TX; if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
hif_msg->interface = 2; else
hif_msg->interface = wvif->id; if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) {
dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf));
skb_pull(skb, wmsg_len); return -EIO;
}
/* Fill tx request */
req = (struct wfx_hif_req_tx *)hif_msg->body; /* packet_id just need to be unique on device. 32bits are more than necessary for that task, * so we take advantage of it to add some extra data for debug.
*/
req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
req->packet_id |= queue_id << 28;
req->fc_offset = offset; /* Queue index are inverted between firmware and Linux */
req->queue_id = 3 - queue_id; if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
req->peer_sta_id = HIF_LINK_ID_NOT_ASSOCIATED;
req->retry_policy_index = HIF_TX_RETRY_POLICY_INVALID;
req->frame_format = HIF_FRAME_FORMAT_NON_HT;
} else {
req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
req->frame_format = wfx_tx_get_frame_format(tx_info);
} if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
req->short_gi = 1; if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
req->after_dtim = 1;
BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room, "struct tx_priv is too large");
WARN(skb->next || skb->prev, "skb is already member of a list"); /* control.vif can be NULL for injected frames */ if (tx_info->control.vif)
wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv; else
wvif = wvif_iterate(wdev, NULL); if (WARN_ON(!wvif)) goto drop; /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session * management frame. The check below exist just in case.
*/ if (wfx_is_action_back(hdr)) {
dev_info(wdev->dev, "drop BA action\n"); goto drop;
} if (wfx_tx_inner(wvif, sta, skb)) goto drop;
if (!wvif) {
pr_warn("vif associated with the skb does not exist anymore\n"); return;
}
wfx_tx_policy_put(wvif, req->retry_policy_index);
skb_pull(skb, offset);
ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
}
staticvoid wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info, conststruct wfx_hif_cnf_tx *arg)
{ struct ieee80211_tx_rate *rate; int tx_count; int i;
tx_count = arg->ack_failures; if (!arg->status || arg->ack_failures)
tx_count += 1; /* Also report success */ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
rate = &tx_info->status.rates[i]; if (rate->idx < 0) break; if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
arg->ack_failures)
dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
rate->count, tx_count); if (tx_count <= rate->count && tx_count &&
arg->txed_rate != wfx_get_hw_rate(wdev, rate))
dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
arg->txed_rate, wfx_get_hw_rate(wdev, rate)); if (tx_count > rate->count) {
tx_count -= rate->count;
} elseif (!tx_count) {
rate->count = 0;
rate->idx = -1;
} else {
rate->count = tx_count;
tx_count = 0;
}
} if (tx_count)
dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
}
/* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
wfx_tx_fill_rates(wdev, tx_info, arg);
skb_trim(skb, skb->len - tx_priv->icv_size);
/* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */ /* FIXME: use ieee80211_tx_info_clear_status() */
memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
memset(tx_info->pad, 0, sizeof(tx_info->pad));
if (!arg->status) {
tx_info->status.tx_time = le32_to_cpu(arg->media_delay) -
le32_to_cpu(arg->tx_queue_delay); if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
} elseif (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
WARN(!arg->requeue, "incoherent status and result_flags"); if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
schedule_work(&wvif->update_tim_work);
}
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
}
wfx_skb_dtor(wvif, skb);
}
for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (!(BIT(i) & queues)) continue;
queue = &wvif->tx_queue[i]; if (dropped)
wfx_tx_queue_drop(wvif, queue, dropped);
} if (wvif->wdev->chip_frozen) return; for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (!(BIT(i) & queues)) continue;
queue = &wvif->tx_queue[i]; if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue),
msecs_to_jiffies(1000)) <= 0)
dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?");
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.