/* Sort rates in descending order. */ for (i = 1; i < count; ++i) { if (rates[i].idx < 0) {
count = i; break;
} if (rates[i].idx > rates[i - 1].idx) { struct ieee80211_tx_rate tmp = rates[i - 1];
rates[i - 1] = rates[i];
rates[i] = tmp;
}
}
/* Eliminate duplicates. */
total = rates[0].count; for (i = 0, j = 1; j < count; ++j) { if (rates[j].idx == rates[i].idx) {
rates[i].count += rates[j].count;
} elseif (rates[j].idx > rates[i].idx) { break;
} else {
++i; if (i != j)
rates[i] = rates[j];
}
total += rates[j].count;
}
count = i + 1;
/* Re-fill policy trying to keep every requested rate and with * respect to the global max tx retransmission count.
*/ if (limit < count)
limit = count; if (total > limit) { for (i = 0; i < count; ++i) { int left = count - i - 1; if (rates[i].count > limit - left)
rates[i].count = limit - left;
limit -= rates[i].count;
}
}
/* HACK!!! Device has problems (at least) switching from * 54Mbps CTS to 1Mbps. This switch takes enormous amount * of time (100-200 ms), leading to valuable throughput drop. * As a workaround, additional g-rates are injected to the * policy.
*/ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
rates[0].idx > 4 && rates[0].count > 2 &&
rates[1].idx < 2) { int mid_rate = (rates[0].idx + 4) >> 1;
/* Decrease number of retries for the initial rate */
rates[0].count -= 2;
if (mid_rate != 4) { /* Keep fallback rate at 1Mbps. */
rates[3] = rates[1];
/* Fallback to 1 Mbps is a really bad thing, * so let's try to increase probability of * successful transmission on the lowest g rate * even more
*/ if (rates[0].count >= 3) {
--rates[0].count;
++rates[2].count;
}
/* Adjust amount of rates defined */
count += 2;
} else { /* Keep fallback rate at 1Mbps. */
rates[2] = rates[1];
staticinlinebool tx_policy_is_equal(conststruct tx_policy *wanted, conststruct tx_policy *cached)
{
size_t count = wanted->defined >> 1; if (wanted->defined > cached->defined) returnfalse; if (count) { if (memcmp(wanted->raw, cached->raw, count)) returnfalse;
} if (wanted->defined & 1) { if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) returnfalse;
} returntrue;
}
staticint tx_policy_find(struct tx_policy_cache *cache, conststruct tx_policy *wanted)
{ /* O(n) complexity. Not so good, but there's only 8 entries in * the cache. * Also lru helps to reduce search time.
*/ struct tx_policy_cache_entry *it; /* First search for policy in "used" list */
list_for_each_entry(it, &cache->used, link) { if (tx_policy_is_equal(wanted, &it->policy)) return it - cache->cache;
} /* Then - in "free list" */
list_for_each_entry(it, &cache->free, link) { if (tx_policy_is_equal(wanted, &it->policy)) return it - cache->cache;
} return -1;
}
for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) {
entry = &cache->cache[idx]; /* Policy usage count should be 0 at this time as all queues should be empty
*/ if (WARN_ON(entry->policy.usage_count)) {
entry->policy.usage_count = 0;
list_move(&entry->link, &cache->free);
}
memset(&entry->policy, 0, sizeof(entry->policy));
} if (locked)
cw1200_tx_queues_unlock(priv);
u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
{
u32 ret = 0; int i; for (i = 0; i < 32; ++i) { if (rates & BIT(i))
ret |= BIT(priv->rates[i].hw_value);
} return ret;
}
if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
wiphy_err(priv->hw->wiphy, "Bug: no space allocated for WSM header. headroom: %d\n",
skb_headroom(t->skb)); return NULL;
}
if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
priv->listen_interval) {
pr_debug("Modified Listen Interval to %d from %d\n",
priv->listen_interval,
mgt_frame->u.assoc_req.listen_interval); /* Replace listen interval derieved from * the one read from SDD
*/
mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
}
}
if (tx_policy_renew) {
pr_debug("[TX] TX policy renew.\n"); /* It's not so optimal to stop TX queues every now and then. * Better to reimplement task scheduling with * a counter. TODO.
*/
wsm_lock_tx_async(priv);
cw1200_tx_queues_lock(priv); if (queue_work(priv->workqueue,
&priv->tx_policy_upload_work) <= 0) {
cw1200_tx_queues_unlock(priv);
wsm_unlock_tx(priv);
}
} return 0;
}
/* Filter block ACK negotiation: fully controlled by firmware */ if (mgmt->u.action.category == WLAN_CATEGORY_BACK) return 1;
return 0;
}
staticint cw1200_handle_pspoll(struct cw1200_common *priv, struct sk_buff *skb)
{ struct ieee80211_sta *sta; struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; int link_id = 0;
u32 pspoll_mask = 0; int drop = 1; int i;
if (priv->join_status != CW1200_JOIN_STATUS_AP) goto done; if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) goto done;
rcu_read_lock();
sta = ieee80211_find_sta(priv->vif, pspoll->ta); if (sta) { struct cw1200_sta_priv *sta_priv;
sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
link_id = sta_priv->link_id;
pspoll_mask = BIT(sta_priv->link_id);
}
rcu_read_unlock(); if (!link_id) goto done;
priv->pspoll_mask |= pspoll_mask;
drop = 0;
/* Do not report pspols if data for given link id is queued already. */ for (i = 0; i < 4; ++i) { if (cw1200_queue_get_num_queued(&priv->tx_queue[i],
pspoll_mask)) {
cw1200_bh_wakeup(priv);
drop = 1; break;
}
}
pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
done: return drop;
}
if (!arg->status) {
tx->flags |= IEEE80211_TX_STAT_ACK;
++tx_count;
cw1200_debug_txed(priv); if (arg->flags & WSM_TX_STATUS_AGGREGATION) { /* Do not report aggregation to mac80211: * it confuses minstrel a lot.
*/ /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
cw1200_debug_txed_agg(priv);
}
} else { if (tx_count)
++tx_count;
}
for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { if (tx->status.rates[i].count >= tx_count) {
tx->status.rates[i].count = tx_count; break;
}
tx_count -= tx->status.rates[i].count; if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
tx->status.rates[i].flags |= ht_flags;
}
for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
tx->status.rates[i].count = 0;
tx->status.rates[i].idx = -1;
}
/* Pull off any crypto trailers that we added on */ if (tx->control.hw_key) {
skb_trim(skb, skb->len - tx->control.hw_key->icv_len); if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
skb_trim(skb, skb->len - 8); /* MIC space */
}
cw1200_queue_remove(queue, arg->packet_id);
} /* XXX TODO: Only wake if there are pending transmits.. */
cw1200_bh_wakeup(priv);
}
int cw1200_upload_keys(struct cw1200_common *priv)
{ int idx, ret = 0; for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) if (priv->key_map & BIT(idx)) {
ret = wsm_add_key(priv, &priv->keys[idx]); if (ret < 0) break;
} return ret;
}
/* Workaround for WFD test case 6.1.10 */ void cw1200_link_id_reset(struct work_struct *work)
{ struct cw1200_common *priv =
container_of(work, struct cw1200_common, linkid_reset_work); int temp_linkid;
if (!priv->action_linkid) { /* In GO mode we can receive ACTION frames without a linkID */
temp_linkid = cw1200_alloc_link_id(priv,
&priv->action_frame_sa[0]);
WARN_ON(!temp_linkid); if (temp_linkid) { /* Make sure we execute the WQ */
flush_workqueue(priv->workqueue); /* Release the link ID */
spin_lock_bh(&priv->ps_state_lock);
priv->link_id_db[temp_linkid - 1].prev_status =
priv->link_id_db[temp_linkid - 1].status;
priv->link_id_db[temp_linkid - 1].status =
CW1200_LINK_RESET;
spin_unlock_bh(&priv->ps_state_lock);
wsm_lock_tx_async(priv); if (queue_work(priv->workqueue,
&priv->link_id_work) <= 0)
wsm_unlock_tx(priv);
}
} else {
spin_lock_bh(&priv->ps_state_lock);
priv->link_id_db[priv->action_linkid - 1].prev_status =
priv->link_id_db[priv->action_linkid - 1].status;
priv->link_id_db[priv->action_linkid - 1].status =
CW1200_LINK_RESET_REMAP;
spin_unlock_bh(&priv->ps_state_lock);
wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
wsm_unlock_tx(priv);
flush_workqueue(priv->workqueue);
}
}
int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
{ int i, ret = 0;
spin_lock_bh(&priv->ps_state_lock); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
priv->link_id_db[i].status) {
priv->link_id_db[i].timestamp = jiffies;
ret = i + 1; break;
}
}
spin_unlock_bh(&priv->ps_state_lock); return ret;
}
int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
{ int i, ret = 0; unsignedlong max_inactivity = 0; unsignedlong now = jiffies;
spin_lock_bh(&priv->ps_state_lock); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (!priv->link_id_db[i].status) {
ret = i + 1; break;
} elseif (priv->link_id_db[i].status != CW1200_LINK_HARD &&
!priv->tx_queue_stats.link_map_cache[i + 1]) { unsignedlong inactivity =
now - priv->link_id_db[i].timestamp; if (inactivity < max_inactivity) continue;
max_inactivity = inactivity;
ret = i + 1;
}
} if (ret) { struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
pr_debug("[AP] STA added, link_id: %d\n", ret);
entry->status = CW1200_LINK_RESERVE;
memcpy(&entry->mac, mac, ETH_ALEN);
memset(&entry->buffered, 0, CW1200_MAX_TID);
skb_queue_head_init(&entry->rx_queue);
wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
wsm_unlock_tx(priv);
} else {
wiphy_info(priv->hw->wiphy, "[AP] Early: no more link IDs available.\n");
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.