/*- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. *
*/
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */
size = 220;
band = NL80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G:
size = 26;
band = NL80211_BAND_2GHZ; break; default:
ATH5K_WARN(ah, "bad mode, not copying channels\n"); return 0;
}
for (i = 0; i < AR5K_MAX_RATES; i++)
ah->rate_idx[b->band][i] = -1;
for (i = 0; i < b->n_bitrates; i++) {
ah->rate_idx[b->band][b->bitrates[i].hw_value] = i; if (b->bitrates[i].hw_value_short)
ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
}
}
staticint
ath5k_setup_bands(struct ieee80211_hw *hw)
{ struct ath5k_hw *ah = hw->priv; struct ieee80211_supported_band *sband; int max_c, count_c = 0; int i;
/* 5211 only supports B rates and uses 4bit rate codes * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) * fix them up here:
*/ if (ah->ah_version == AR5K_AR5211) { for (i = 0; i < 4; i++) {
sband->bitrates[i].hw_value =
sband->bitrates[i].hw_value & 0xF;
sband->bitrates[i].hw_value_short =
sband->bitrates[i].hw_value_short & 0xF;
}
}
/* * Set/change channels. We always reset the chip. * To accomplish this we must first cleanup any pending DMA, * then restart stuff after a la ath5k_init. * * Called with ah->lock.
*/ int
ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "channel set, resetting (%u -> %u MHz)\n",
ah->curchan->center_freq, chandef->chan->center_freq);
switch (chandef->width) { case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT:
ah->ah_bwmode = AR5K_BWMODE_DEFAULT; break; case NL80211_CHAN_WIDTH_5:
ah->ah_bwmode = AR5K_BWMODE_5MHZ; break; case NL80211_CHAN_WIDTH_10:
ah->ah_bwmode = AR5K_BWMODE_10MHZ; break; default:
WARN_ON(1); return -EINVAL;
}
/* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w.
*/ return ath5k_reset(ah, chandef->chan, true);
}
if (iter_data->hw_macaddr) for (i = 0; i < ETH_ALEN; i++)
iter_data->mask[i] &=
~(iter_data->hw_macaddr[i] ^ mac[i]);
if (!iter_data->found_active) {
iter_data->found_active = true;
memcpy(iter_data->active_mac, mac, ETH_ALEN);
}
if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) if (ether_addr_equal(iter_data->hw_macaddr, mac))
iter_data->need_set_hw_addr = false;
if (!iter_data->any_assoc) { if (avf->assoc)
iter_data->any_assoc = true;
}
/* Calculate combined mode - when APs are active, operate in AP mode. * Otherwise use the mode of the new interface. This can currently * only deal with combinations of APs and STAs. Only one ad-hoc * interfaces is allowed.
*/ if (avf->opmode == NL80211_IFTYPE_AP)
iter_data->opmode = NL80211_IFTYPE_AP; else { if (avf->opmode == NL80211_IFTYPE_STATION)
iter_data->n_stas++; if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
iter_data->opmode = avf->opmode;
}
}
/* * Use the hardware MAC address as reference, the hardware uses it * together with the BSSID mask when matching addresses.
*/
iter_data.hw_macaddr = common->macaddr;
eth_broadcast_addr(iter_data.mask);
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
iter_data.n_stas = 0;
if (vif)
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
ieee80211_iterate_active_interfaces_atomic(
ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath5k_vif_iter, &iter_data);
memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
ah->opmode = iter_data.opmode; if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED) /* Nothing active, default to station mode */
ah->opmode = NL80211_IFTYPE_STATION;
if (iter_data.need_set_hw_addr && iter_data.found_active)
ath5k_hw_set_lladdr(ah, iter_data.active_mac);
if (ath5k_hw_hasbssidmask(ah))
ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
/* Set up RX Filter */ if (iter_data.n_stas > 1) { /* If you have multiple STA interfaces connected to * different APs, ARPs are not received (most of the time?) * Enabling PROMISC appears to fix that problem.
*/
ah->filter_flags |= AR5K_RX_FILTER_PROM;
}
/* * Allocate buffer with headroom_needed space for the * fake physical layer header at the start.
*/
skb = ath_rxbuf_alloc(common,
common->rx_bufsize,
GFP_ATOMIC);
if (!skb) {
ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
common->rx_bufsize); return NULL;
}
if (!skb) {
skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr); if (!skb) return -ENOMEM;
bf->skb = skb;
}
/* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To ensure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is "fixed" naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This ensures the hardware always has * someplace to write a new frame.
*/
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); if (ret) {
ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__); return ret;
}
/* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
* from tx power (value is in dB units already) */ if (info->control.hw_key) {
keyidx = info->control.hw_key->hw_key_idx;
pktlen += info->control.hw_key->icv_len;
} if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= AR5K_TXDESC_RTSENA;
cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
info->control.vif, pktlen, info));
} if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
flags |= AR5K_TXDESC_CTSENA;
cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
info->control.vif, pktlen, info));
}
/* Set up MRR descriptor */ if (ah->ah_capabilities.cap_has_mrr_support) {
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
rate = ath5k_get_rate(ah->hw, info, bf, i); if (!rate) break;
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
txq->txq_len++; if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); else/* no, so only link it */
*txq->link = bf->daddr;
INIT_LIST_HEAD(&ah->rxbuf); for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->rxbuf);
}
INIT_LIST_HEAD(&ah->txbuf);
ah->txbuf_len = ATH_TXBUF; for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->txbuf);
}
/* beacon buffers */
INIT_LIST_HEAD(&ah->bcbuf); for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->bcbuf);
}
staticstruct ath5k_txq *
ath5k_txq_setup(struct ath5k_hw *ah, int qtype, int subtype)
{ struct ath5k_txq *txq; struct ath5k_txq_info qi = {
.tqi_subtype = subtype, /* XXX: default values not correct for B and XR channels,
* but who cares? */
.tqi_aifs = AR5K_TUNE_AIFS,
.tqi_cw_min = AR5K_TUNE_CWMIN,
.tqi_cw_max = AR5K_TUNE_CWMAX
}; int qnum;
/* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise we wait for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors.
*/
qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); if (qnum < 0) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues
*/ return ERR_PTR(qnum);
}
txq = &ah->txqs[qnum]; if (!txq->setup) {
txq->qnum = qnum;
txq->link = NULL;
INIT_LIST_HEAD(&txq->q);
spin_lock_init(&txq->lock);
txq->setup = true;
txq->txq_len = 0;
txq->txq_max = ATH5K_TXQ_LEN_MAX;
txq->txq_poll_mark = false;
txq->txq_stuck = 0;
} return &ah->txqs[qnum];
}
staticint
ath5k_beaconq_setup(struct ath5k_hw *ah)
{ struct ath5k_txq_info qi = { /* XXX: default values not correct for B and XR channels,
* but who cares? */
.tqi_aifs = AR5K_TUNE_AIFS,
.tqi_cw_min = AR5K_TUNE_CWMIN,
.tqi_cw_max = AR5K_TUNE_CWMAX, /* NB: for dynamic turbo, don't enable any other interrupts */
.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
};
ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi); if (ret) {
ATH5K_ERR(ah, "%s: unable to update parameters for beacon " "hardware queue!\n", __func__); goto err;
}
ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */ if (ret) goto err;
/* reconfigure cabq with ready time to 80% of beacon_interval */
ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); if (ret) goto err;
qi.tqi_ready_time = (ah->bintval * 80) / 100;
ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); if (ret) goto err;
ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err: return ret;
}
/** * ath5k_drain_tx_buffs - Empty tx buffers * * @ah: The &struct ath5k_hw * * Empty tx buffers from all queues in preparation * of a reset or during shutdown. * * NB: this assumes output has been stopped and * we do not need to block ath5k_tx_tasklet
*/ staticvoid
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
{ struct ath5k_txq *txq; struct ath5k_buf *bf, *bf0; int i;
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { if (ah->txqs[i].setup) {
txq = &ah->txqs[i];
spin_lock_bh(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ath5k_debug_printtxbuf(ah, bf);
if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
rs->rs_keyix != AR5K_RXKEYIX_INVALID) return RX_FLAG_DECRYPTED;
/* Apparently when a default key is used to decrypt the packet the hw does not set the index used to decrypt. In such cases
get the index from the packet. */
hlen = ieee80211_hdrlen(hdr->frame_control); if (ieee80211_has_protected(hdr->frame_control) &&
!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
skb->len >= hlen + 4) {
keyix = skb->data[hlen + 3] >> 6;
if (test_bit(keyix, common->keymap)) return RX_FLAG_DECRYPTED;
}
if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) { /* * Received an IBSS beacon with the same BSSID. Hardware *must* * have updated the local TSF. We have to work around various * hardware bugs, though...
*/
tsf = ath5k_hw_get_tsf64(ah);
bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
hw_tu = TSF_TO_TU(tsf);
/* * Sometimes the HW will give us a wrong tstamp in the rx * status, causing the timestamp extension to go wrong. * (This seems to happen especially with beacon frames bigger * than 78 byte (incl. FCS)) * But we know that the receive timestamp must be later than the * timestamp of the beacon since HW must have synced to that. * * NOTE: here we assume mactime to be after the frame was * received, not like mac80211 which defines it at the start.
*/ if (bc_tstamp > rxs->mactime) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "fixing mactime from %llx to %llx\n",
(unsignedlonglong)rxs->mactime,
(unsignedlonglong)tsf);
rxs->mactime = tsf;
}
/* * Local TSF might have moved higher than our beacon timers, * in that case we have to update them to continue sending * beacons. This also takes care of synchronizing beacon sending * times with other stations.
*/ if (hw_tu >= ah->nexttbtt)
ath5k_beacon_update_timers(ah, bc_tstamp);
/* Check if the beacon timers are still correct, because a TSF * update might have created a window between them - for a
* longer description see the comment of this function: */ if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
ath5k_beacon_update_timers(ah, bc_tstamp);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "fixed beacon timers after beacon receive\n");
}
}
}
if (ieee80211_has_a4(frame_control))
padpos += ETH_ALEN;
if (ieee80211_is_data_qos(frame_control))
padpos += IEEE80211_QOS_CTL_LEN;
return padpos;
}
/* * This function expects an 802.11 frame and returns the number of * bytes added, or -1 if we don't have enough header room.
*/ staticint ath5k_add_padding(struct sk_buff *skb)
{ int padpos = ath5k_common_padpos(skb); int padsize = padpos & 3;
/* * The MAC header is padded to have 32-bit boundary if the * packet payload is non-zero. The general calculation for * padsize would take into account odd header lengths: * padsize = 4 - (hdrlen & 3); however, since only * even-length headers are used, padding can only be 0 or 2 * bytes and we can optimize this a bit. We must not try to * remove padding from short control frames that do not have a * payload. * * This function expects an 802.11 frame and returns the number of * bytes removed.
*/ staticint ath5k_remove_padding(struct sk_buff *skb)
{ int padpos = ath5k_common_padpos(skb); int padsize = padpos & 3;
rxs->flag = 0; if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
rxs->flag |= RX_FLAG_MMIC_ERROR; if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
/* * always extend the mac timestamp, since this information is * also needed for proper IBSS merging. * * XXX: it might be too late to do it here, since rs_tstamp is * 15bit only. that means TSF extension has to be done within * 32768usec (about 32ms). it might be necessary to move this to * the interrupt handler, like it is done in madwifi.
*/
rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
rxs->flag |= RX_FLAG_MACTIME_END;
if (rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE;
trace_ath5k_rx(ah, skb);
if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
/* check beacons in IBSS mode */ if (ah->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(ah, skb, rxs);
}
ieee80211_rx(ah->hw, skb);
}
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not? * * Check if we want to further process this frame or not. Also update * statistics. Return true if we want this frame, false if not.
*/ staticbool
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
{
ah->stats.rx_all_count++;
ah->stats.rx_bytes_count += rs->rs_datalen;
if (unlikely(rs->rs_status)) { unsignedint filters;
if (rs->rs_status & AR5K_RXERR_CRC)
ah->stats.rxerr_crc++; if (rs->rs_status & AR5K_RXERR_FIFO)
ah->stats.rxerr_fifo++; if (rs->rs_status & AR5K_RXERR_PHY) {
ah->stats.rxerr_phy++; if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
/* * Treat packets that underwent a CCK or OFDM reset as having a bad CRC. * These restarts happen when the radio resynchronizes to a stronger frame * while receiving a weaker frame. Here we receive the prefix of the weak * frame. Since these are incomplete packets, mark their CRC as invalid.
*/ if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
rs->rs_status |= AR5K_RXERR_CRC;
rs->rs_status &= ~AR5K_RXERR_PHY;
} else { returnfalse;
}
} if (rs->rs_status & AR5K_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting
*/
ah->stats.rxerr_decrypt++; if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs->rs_status & AR5K_RXERR_CRC)) returntrue;
} if (rs->rs_status & AR5K_RXERR_MIC) {
ah->stats.rxerr_mic++; returntrue;
}
/* * Reject any frames with non-crypto errors, and take into account the * current FIF_* filters.
*/
filters = AR5K_RXERR_DECRYPT; if (ah->fif_filter_flags & FIF_FCSFAIL)
filters |= AR5K_RXERR_CRC;
if (rs->rs_status & ~filters) returnfalse;
}
if (unlikely(rs->rs_more)) {
ah->stats.rxerr_jumbo++; returnfalse;
} returntrue;
}
/* * The hardware expects the header padded to 4 byte boundaries. * If this is not the case, we add the padding after the header.
*/
padsize = ath5k_add_padding(skb); if (padsize < 0) {
ATH5K_ERR(ah, "tx hdrlen not %%4: not enough" " headroom to pad"); goto drop_packet;
}
if (txq->txq_len >= txq->txq_max &&
txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
ieee80211_stop_queue(hw, txq->qnum);
spin_lock_irqsave(&ah->txbuflock, flags); if (list_empty(&ah->txbuf)) {
ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
spin_unlock_irqrestore(&ah->txbuflock, flags);
ieee80211_stop_queues(hw); goto drop_packet;
}
bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
list_del(&bf->list);
ah->txbuf_len--; if (list_empty(&ah->txbuf))
ieee80211_stop_queues(hw);
spin_unlock_irqrestore(&ah->txbuflock, flags);
/* * It's possible that the hardware can say the buffer is * completed when it hasn't yet loaded the ds_link from * host memory and moved on. * Always keep the last descriptor to avoid HW races...
*/ if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
spin_lock(&ah->txbuflock);
list_move_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
txq->txq_len--;
spin_unlock(&ah->txbuflock);
}
}
spin_unlock(&txq->lock); if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
ieee80211_wake_queue(ah->hw, txq->qnum);
}
/* * If we use multiple antennas on AP and use * the Sectored AP scenario, switch antenna every * 4 beacons to make sure everybody hears our AP. * When a client tries to associate, hw will keep * track of the tx antenna to be used for this client * automatically, based on ACKed packets. * * Note: AP still listens and transmits RTS on the * default antenna which is supposed to be an omni. * * Note2: On sectored scenarios it's possible to have * multiple antennas (1 omni -- the default -- and 14 * sectors), so if we choose to actually support this * mode, we need to allow the user to set how many antennas * we have and tweak the code below to send beacons * on all of them.
*/ if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
antenna = ah->bsent & 4 ? 2 : 1;
/* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON,
(ah->ah_txpower.txp_requested * 2),
ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
antenna, flags, 0, 0); if (ret) goto err_unmap;
/* * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, * this is called only once at config_bss time, for AP we do it every * SWBA interrupt so that the TIM will reflect buffered frames. * * Called with the beacon lock.
*/ int
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{ int ret; struct ath5k_hw *ah = hw->priv; struct ath5k_vif *avf; struct sk_buff *skb;
/* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is * also adjusted based on current state. * * This is called from software irq context (beacontq tasklets) * or user context from ath5k_beacon_config.
*/ staticvoid
ath5k_beacon_send(struct ath5k_hw *ah)
{ struct ieee80211_vif *vif; struct ath5k_vif *avf; struct ath5k_buf *bf; struct sk_buff *skb; int err;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
/* * Check if the previous beacon has gone out. If * not, don't try to post another: skip this * period and wait for the next. Missed beacons * indicate a problem and should not occur. If we * miss too many consecutive beacons reset the device.
*/ if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
ah->bmisscount++;
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "missed %u consecutive beacons\n", ah->bmisscount); if (ah->bmisscount > 10) { /* NB: 10 is a guess */
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "stuck beacon time (%u missed)\n",
ah->bmisscount);
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "stuck beacon, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} return;
} if (unlikely(ah->bmisscount != 0)) {
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "resume beacon xmit after %u misses\n",
ah->bmisscount);
ah->bmisscount = 0;
}
/* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue.
*/ if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq); /* NB: hw still stops DMA, so proceed */
}
/* refresh the beacon for AP or MESH mode */ if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
err = ath5k_beacon_update(ah->hw, vif); if (err) return;
}
if (ah->cabq->txq_len >= ah->cabq->txq_max) break;
skb = ieee80211_get_buffered_bc(ah->hw, vif);
}
ah->bsent++;
}
/** * ath5k_beacon_update_timers - update beacon timers * * @ah: struct ath5k_hw pointer we are operating on * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a * beacon timer update based on the current HW TSF. * * Calculate the next target beacon transmit time (TBTT) based on the timestamp * of a received beacon or the current local hardware TSF and write it to the * beacon timer registers. * * This is called in a variety of situations, e.g. when a beacon is received, * when a TSF update has been detected, but also when an new IBSS is created or * when we otherwise know we have to update the timers, but we keep it in this * function to have it all together in one place.
*/ void
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
{
u32 nexttbtt, intval, hw_tu, bc_tu;
u64 hw_tsf;
intval = ah->bintval & AR5K_BEACON_PERIOD; if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
+ ah->num_mesh_vifs > 1) {
intval /= ATH_BCBUF; /* staggered multi-bss beacons */ if (intval < 15)
ATH5K_WARN(ah, "intval %u is too low, min 15\n",
intval);
} if (WARN_ON(!intval)) return;
/* beacon TSF converted to TU */
bc_tu = TSF_TO_TU(bc_tsf);
/* current TSF converted to TU */
hw_tsf = ath5k_hw_get_tsf64(ah);
hw_tu = TSF_TO_TU(hw_tsf);
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3) /* We use FUDGE to make sure the next TBTT is ahead of the current TU. * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
* configuration we need to make sure it is bigger than that. */
if (bc_tsf == -1) { /* * no beacons received, called internally. * just need to refresh timers based on HW TSF.
*/
nexttbtt = roundup(hw_tu + FUDGE, intval);
} elseif (bc_tsf == 0) { /* * no beacon received, probably called by ath5k_reset_tsf(). * reset TSF to start with 0.
*/
nexttbtt = intval;
intval |= AR5K_BEACON_RESET_TSF;
} elseif (bc_tsf > hw_tsf) { /* * beacon received, SW merge happened but HW TSF not yet updated. * not possible to reconfigure timers yet, but next time we * receive a beacon with the same BSSID, the hardware will * automatically update the TSF and then we need to reconfigure * the timers.
*/
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "need to wait for HW TSF sync\n"); return;
} else { /* * most important case for beacon synchronization between STA. * * beacon received and HW TSF has been already updated by HW. * update next TBTT based on the TSF of the beacon, but make * sure it is ahead of our local TSF timer.
*/
nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
} #undef FUDGE
/* * debugging output last in order to preserve the time critical aspect * of this function
*/ if (bc_tsf == -1)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "reconfigured timers based on HW TSF\n"); elseif (bc_tsf == 0)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "reset HW TSF and timers\n"); else
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "updated timers based on beacon TSF\n");
/** * ath5k_beacon_config - Configure the beacon queues and interrupts * * @ah: struct ath5k_hw pointer we are operating on * * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA * interrupts to detect TSF updates only.
*/ void
ath5k_beacon_config(struct ath5k_hw *ah)
{
spin_lock_bh(&ah->block);
ah->bmisscount = 0;
ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
if (ah->enable_beacon) { /* * In IBSS mode we use a self-linked tx descriptor and let the * hardware send the beacons automatically. We have to load it * only once here. * We use the SWBA interrupt only to keep track of the beacon * timers in order to detect automatic TSF updates.
*/
ath5k_beaconq_config(ah);
ah->imask |= AR5K_INT_SWBA;
if (ah->opmode == NL80211_IFTYPE_ADHOC) { if (ath5k_hw_hasveol(ah))
ath5k_beacon_send(ah);
} else
ath5k_beacon_update_timers(ah, -1);
} else {
ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
}
/* * Software beacon alert--time to send a beacon. * * In IBSS mode we use this interrupt just to * keep track of the next TBTT (target beacon * transmission time) in order to detect whether * automatic TSF updates happened.
*/ if (ah->opmode == NL80211_IFTYPE_ADHOC) { /* XXX: only if VEOL supported */
u64 tsf = ath5k_hw_get_tsf64(ah);
ah->nexttbtt += ah->bintval;
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "SWBA nexttbtt: %x hw_tu: %x " "TSF: %llx\n",
ah->nexttbtt,
TSF_TO_TU(tsf),
(unsignedlonglong) tsf);
} else {
spin_lock(&ah->block);
ath5k_beacon_send(ah);
spin_unlock(&ah->block);
}
}
/* Run calibration only when another calibration * is not running. * * Note: This is for both full/short calibration, * if it's time for a full one, ath5k_calibrate_work will deal
* with it. */
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
ieee80211_queue_work(ah->hw, &ah->calib_work);
} /* we could use SWI to generate enough interrupts to meet our * calibration interval requirements, if necessary:
* AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}
/* * If hw is not ready (or detached) and we get an * interrupt, or if we have no interrupts pending * (that means it's not for us) skip it. * * NOTE: Group 0/1 PCI interface registers are not * supported on WiSOCs, so we can't check for pending * interrupts (ISR belongs to another register group * so we are ok).
*/ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
((ath5k_get_bus_type(ah) != ATH_AHB) &&
!ath5k_hw_is_intr_pending(ah)))) return IRQ_NONE;
/** Main loop **/ do {
ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
/* * Fatal hw error -> Log and reset * * Fatal errors are unrecoverable so we have to * reset the card. These errors include bus and * dma errors.
*/ if (unlikely(status & AR5K_INT_FATAL)) {
/* * RX Overrun -> Count and reset if needed * * Receive buffers are full. Either the bus is busy or * the CPU is not fast enough to process all received * frames.
*/
} elseif (unlikely(status & AR5K_INT_RXORN)) {
/* * Older chipsets need a reset to come out of this * condition, but we treat it as RX for newer chips. * We don't know exactly which versions need a reset * this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
/* * No more RX descriptors -> Just count * * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs.
*/ if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.