/* Number of bits for an average sized packet */ #define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
/* Number of symbols for a packet with (bps) bits per symbol */ #define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */ #define MCS_SYMBOL_TIME(sgi, syms) \
(sgi ? \
((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
((syms) * 1000) << 2 /* syms * 4 us */ \
)
/* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) \
(MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
staticbool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only, "Use only VHT rates when VHT is supported by sta.");
/* * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. * * Sortorder has to be fixed for GROUP_IDX macro to be applicable: * BW -> SGI -> #streams
*/ conststruct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, BW_20),
MCS_GROUP(2, 0, BW_20),
MCS_GROUP(3, 0, BW_20),
MCS_GROUP(4, 0, BW_20),
/* * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer) * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1 * * Returns the valid mcs map for struct minstrel_mcs_group_data.supported
*/ static u16
minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
{
u16 mask = 0;
staticbool
minstrel_ht_is_legacy_group(int group)
{ return group == MINSTREL_CCK_GROUP ||
group == MINSTREL_OFDM_GROUP;
}
/* * Look up an MCS group index based on mac80211 rate information
*/ staticint
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{ return GROUP_IDX((rate->idx / 8) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
/* * Look up an MCS group index based on new cfg80211 rate_info.
*/ staticint
minstrel_ht_ri_get_group_idx(struct rate_info *rate)
{ return GROUP_IDX((rate->mcs / 8) + 1,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40));
}
/* * Look up an MCS group index based on new cfg80211 rate_info.
*/ staticint
minstrel_vht_ri_get_group_idx(struct rate_info *rate)
{ return VHT_GROUP_IDX(rate->nss,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40) +
2*!!(rate->bw & RATE_INFO_BW_80));
}
/* * Return current throughput based on the average A-MPDU length, taking into * account the expected number of retransmissions and their expected length
*/ int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, int prob_avg)
{ unsignedint nsecs = 0, overhead = mi->overhead; unsignedint ampdu_len = 1;
/* do not account throughput if success prob is below 10% */ if (prob_avg < MINSTREL_FRAC(10, 100)) return 0;
if (minstrel_ht_is_legacy_group(group))
overhead = mi->overhead_legacy; else
ampdu_len = minstrel_ht_avg_ampdu_len(mi);
/* * For the throughput calculation, limit the probability value to 90% to * account for collision related packet error rate fluctuation * (prob is scaled - see MINSTREL_FRAC above)
*/ if (prob_avg > MINSTREL_FRAC(90, 100))
prob_avg = MINSTREL_FRAC(90, 100);
/* * Find & sort topmost throughput rates * * If multiple rates provide equal throughput the sorting is based on their * current success probability. Higher success probability is preferred among * MCS groups, CCK rates do not provide aggregation and are therefore at last.
*/ staticvoid
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
u16 *tp_list)
{ int cur_group, cur_idx, cur_tp_avg, cur_prob; int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; int j = MAX_THR_RATES;
/* * Find and set the topmost probability rate per sta and per group
*/ staticvoid
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index)
{ struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mrs; int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob; int max_tp_group, max_tp_idx, max_tp_prob; int cur_tp_avg, cur_group, cur_idx; int max_gpr_group, max_gpr_idx; int max_gpr_tp_avg, max_gpr_prob;
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
* MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]);
max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg;
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) &&
!minstrel_ht_is_legacy_group(max_tp_group)) return;
/* skip rates faster than max tp rate with lower prob */ if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) &&
mrs->prob_avg < max_tp_prob) return;
/* * Assign new rate set per sta and use CCK rates only if the fastest * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted * rate sets where MCS and CCK rates are mixed, because CCK rates can * not use aggregation.
*/ staticvoid
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
u16 tmp_mcs_tp_rate[MAX_THR_RATES],
u16 tmp_legacy_tp_rate[MAX_THR_RATES])
{ unsignedint tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob; int i;
if (tmp_cck_tp > tmp_mcs_tp) { for(i = 0; i < MAX_THR_RATES; i++) {
minstrel_ht_sort_best_tp_rates(mi, tmp_legacy_tp_rate[i],
tmp_mcs_tp_rate);
}
}
}
/* * Try to increase robustness of max_prob rate by decrease number of * streams if possible.
*/ staticinlinevoid
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{ struct minstrel_mcs_group_data *mg; int tmp_max_streams, group, tmp_idx, tmp_prob; int tmp_tp = 0;
if (!mi->sta->deflink.ht_cap.ht_supported) return;
group = MI_RATE_GROUP(mi->max_tp_rate[0]);
tmp_max_streams = minstrel_mcs_groups[group].streams; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group]; if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) continue;
staticint
minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
u32 max_duration)
{
u16 supported = mi->supported[group]; int i;
for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) { if (!(supported & BIT(0))) continue;
if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration) continue;
return i;
}
return -1;
}
/* * Incremental update rates: * Flip through groups and pick the first group rate that is faster than the * highest currently selected rate
*/ static u16
minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
{
u8 type = MINSTREL_SAMPLE_TYPE_INC; int i, index = 0;
u8 group;
group = mi->sample[type].sample_group; for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
index = minstrel_ht_group_min_rate_offset(mi, group,
fast_rate_dur); if (index < 0) continue;
index = MI_RATE(group, index & 0xf); if (!minstrel_ht_find_sample_rate(mi, type, index)) goto out;
}
index = 0;
out:
mi->sample[type].sample_group = group;
return index;
}
staticint
minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group,
u16 supported, int offset)
{ struct minstrel_mcs_group_data *mg = &mi->groups[group];
u16 idx; int i;
for (i = 0; i < MCS_GROUP_RATES; i++) {
idx = sample_table[mg->column][mg->index]; if (++mg->index >= MCS_GROUP_RATES) {
mg->index = 0; if (++mg->column >= ARRAY_SIZE(sample_table))
mg->column = 0;
}
if (idx < offset) continue;
if (!(supported & BIT(idx))) continue;
return MI_RATE(group, idx);
}
return -1;
}
/* * Jump rates: * Sample random rates, use those that are faster than the highest * currently selected rate. Rates between the fastest and the slowest * get sorted into the slow sample bucket, but only if it has room
*/ static u16
minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur,
u32 slow_rate_dur, int *slow_rate_ofs)
{ struct minstrel_rate_stats *mrs;
u32 max_duration = slow_rate_dur; int i, index, offset;
u16 *slow_rates;
u16 supported;
u32 duration;
u8 group;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates;
group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group; for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
u8 type;
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
supported = mi->supported[group]; if (!supported) continue;
offset = minstrel_ht_group_min_rate_offset(mi, group,
max_duration); if (offset < 0) continue;
index = minstrel_ht_next_group_sample_rate(mi, group, supported,
offset); if (index < 0) continue;
duration = minstrel_get_duration(index); if (duration < fast_rate_dur)
type = MINSTREL_SAMPLE_TYPE_JUMP; else
type = MINSTREL_SAMPLE_TYPE_SLOW;
if (minstrel_ht_find_sample_rate(mi, type, index)) continue;
if (type == MINSTREL_SAMPLE_TYPE_JUMP) goto found;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES) continue;
if (duration >= slow_rate_dur) continue;
/* skip slow rates with high success probability */
mrs = minstrel_get_ratestats(mi, index); if (mrs->prob_avg > MINSTREL_FRAC(95, 100)) continue;
slow_rates[(*slow_rate_ofs)++] = index; if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
}
index = 0;
rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC,
fast_rate_dur, slow_rate_dur); while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur); if (!rates[i]) break;
i++;
}
rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP,
fast_rate_dur, slow_rate_dur);
j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW,
fast_rate_dur, slow_rate_dur); while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur,
slow_rate_dur, &j); if (!rates[i]) break;
i++;
}
for (i = 0; i < ARRAY_SIZE(mi->sample); i++)
memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates, sizeof(mi->sample[i].cur_sample_rates));
}
/* * Update rate statistics and select new primary rates * * Rules for rate selection: * - max_prob_rate must use only one stream, as a tradeoff between delivery * probability and throughput during strong fluctuations * - as long as the max prob rate has a probability of more than 75%, pick * higher throughput rates, even if the probability is a bit lower
*/ staticvoid
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{ struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mrs; int group, i, j, cur_prob;
u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate;
u16 index; bool ht_supported = mi->sta->deflink.ht_cap.ht_supported;
if (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP; elseif (mi->supported[MINSTREL_OFDM_GROUP])
group = MINSTREL_OFDM_GROUP; else
group = 0;
index = MI_RATE(group, 0); for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
tmp_legacy_tp_rate[j] = index;
if (mi->supported[MINSTREL_VHT_GROUP_0])
group = MINSTREL_VHT_GROUP_0; elseif (ht_supported)
group = MINSTREL_HT_GROUP_0; elseif (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP; else
group = MINSTREL_OFDM_GROUP;
index = MI_RATE(group, 0);
tmp_max_prob_rate = index; for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++)
tmp_mcs_tp_rate[j] = index;
/* Find best rate sets within all MCS groups*/ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
u16 *tp_rate = tmp_mcs_tp_rate;
u16 last_prob = 0;
mg = &mi->groups[group]; if (!mi->supported[group]) continue;
/* Assign new rate set per sta */
minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate,
tmp_legacy_tp_rate);
memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { if (!mi->supported[group]) continue;
for (i = 0; i < MCS_GROUP_RATES; i++) { if (!(mi->supported[group] & BIT(i))) continue;
index = MI_RATE(group, i);
/* Find max probability rate per group and global */
minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate,
index);
}
}
mi->max_prob_rate = tmp_max_prob_rate;
/* Try to increase robustness of max_prob_rate*/
minstrel_ht_prob_rate_reduce_streams(mi);
minstrel_ht_refill_sample_rates(mi);
#ifdef CONFIG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) { for (i = 0; i < 4; i++)
mi->max_tp_rate[i] = mp->fixed_rate_idx;
mi->max_prob_rate = mp->fixed_rate_idx;
} #endif
if (!rate_status) returnfalse; if (!rate_status->try_count) returnfalse;
if (rate_status->rate_idx.flags & RATE_INFO_FLAGS_MCS ||
rate_status->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS) returntrue;
for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) { if (rate_status->rate_idx.legacy ==
minstrel_cck_bitrates[ mp->cck_rates[i] ]) returntrue;
}
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates); i++) { if (rate_status->rate_idx.legacy ==
minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][i] ]) returntrue;
}
/* Ignore packet that was sent with noAck flag */ if (info->flags & IEEE80211_TX_CTL_NO_ACK) return;
/* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU)) return;
if (st->rates && st->n_rates) {
last = !minstrel_ht_ri_txstat_valid(mp, mi, &(st->rates[0])); for (i = 0; !last; i++) {
last = (i == st->n_rates - 1) ||
!minstrel_ht_ri_txstat_valid(mp, mi,
&(st->rates[i + 1]));
if (mp->hw->max_rates > 1) { /* * check for sudden death of spatial multiplexing, * downgrade to a lower number of streams if necessary.
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); if (rate->attempts > 30 &&
rate->success < rate->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
/* Total TX time for data and Contention after first 2 tries */
tx_time = ctime + 2 * (overhead + tx_time_data);
tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
/* See how many more tries we can fit inside segment size */ do { /* Contention time for this try */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
/* Total TX time after this try */
tx_time += ctime + overhead + tx_time_data;
tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
if (tx_time_rtscts < mp->segment_size)
mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
(++mrs->retry_count < mp->max_retry));
}
/* enable RTS/CTS if needed: * - if station is in dynamic SMPS (and streams > 1) * - for fallback rates, to increase chances of getting through
*/ if (offset > 0 ||
(mi->sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
flags |= IEEE80211_TX_RC_USE_RTS_CTS;
}
/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */ if (duration > MCS_DURATION(1, 0, 52)) return 500;
/* * If the rate is slower than single-stream MCS4, limit A-MSDU to usual * data packet size
*/ if (duration > MCS_DURATION(1, 0, 104)) return 1600;
/* * If the rate is slower than single-stream MCS7, or if the max throughput * rate success probability is less than 75%, limit A-MSDU to twice the usual * data packet size
*/ if (duration > MCS_DURATION(1, 0, 260) ||
(minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100))) return 3200;
/* * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes. * Since aggregation sessions are started/stopped without txq flush, use * the limit here to avoid the complexity of having to de-aggregate * packets in the queue.
*/ if (!mi->sta->deflink.vht_cap.vht_supported) return IEEE80211_MAX_MPDU_LEN_HT_BA;
/* unlimited */ return 0;
}
staticvoid
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{ struct ieee80211_sta_rates *rates; int i = 0; int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
rates = kzalloc(sizeof(*rates), GFP_ATOMIC); if (!rates) return;
/* Start with max_tp_rate[0] */
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
/* Fill up remaining, keep one entry for max_probe_rate */ for (; i < (max_rates - 1); i++)
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
if (i < max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
rates->rate[i].idx = -1;
#ifdef CONFIG_MAC80211_DEBUGFS if (mp->fixed_rate_idx != -1) return; #endif
/* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 &&
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) return;
if (time_is_after_jiffies(mi->sample_time)) return;
mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); if (!mp) return NULL;
/* contention window settings * Just an approximation. Using the per-queue values would complicate
* the calculations and is probably unnecessary */
mp->cw_min = 15;
mp->cw_max = 1023;
/* maximum time that the hw is allowed to stay in one MRR segment */
mp->segment_size = 6000;
if (hw->max_rate_tries > 0)
mp->max_retry = hw->max_rate_tries; else /* safe default, does not necessarily have to match hw properties */
mp->max_retry = 7;
mp->hw = hw;
mp->update_interval = HZ / 20;
minstrel_ht_init_cck_rates(mp); for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++)
minstrel_ht_init_ofdm_rates(mp, i);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.13Bemerkung:
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.