/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* * Setup and link descriptors. * * 11N: we can no longer afford to self link the last descriptor. * MAC acknowledges BA status as long as it copies frames to host * buffer (or rx fifo). This can incorrectly acknowledge packets * to a sender if last desc is self-linked.
*/ staticvoid ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf, bool flush)
{ struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_desc *ds; struct sk_buff *skb;
ds = bf->bf_desc;
ds->ds_link = 0; /* link to null */
ds->ds_data = bf->bf_buf_addr;
/* virtual addr of the beginning of the buffer. */
skb = bf->bf_mpdu;
BUG_ON(skb == NULL);
ds->ds_vdata = skb->data;
/* * setup rx descriptors. The rx_bufsize here tells the hardware * how much data it can DMA to us and that we are prepared * to process
*/
ath9k_hw_setuprxdesc(ah, ds,
common->rx_bufsize,
0);
if (sc->rx.rxlink)
*sc->rx.rxlink = bf->bf_daddr; elseif (!flush)
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
/* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o maintain current state of phy error reception (the hal * may enable phy error frames for noise immunity work) * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when operating as a repeater so we see repeater-sta beacons * - when scanning
*/
/* if operating on a DFS channel, enable radar pulse detection */ if (sc->hw->conf.radar_enabled)
rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
if (sc->sc_ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if ((sc->cur_chan->rxfilter & FIF_CONTROL) ||
sc->sc_ah->dynack.enabled)
rfilt |= ATH9K_RX_FILTER_CONTROL;
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS, "Reconfigure beacon timers based on synchronized timestamp\n");
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT if (ath9k_is_chanctx_enabled()) { if (sc->cur_chan == &sc->offchannel.chan)
skip_beacon = true;
} #endif
if (!skip_beacon &&
!(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
ath9k_set_beacon(sc);
ath9k_p2p_beacon_sync(sc);
}
if (ath_beacon_dtim_pending_cab(skb)) { /* * Remain awake waiting for buffered broadcast/multicast * frames. If the last broadcast/multicast frame is not * received properly, the next beacon frame will work as * a backup trigger for returning into NETWORK SLEEP state, * so we are waiting for it as well.
*/
ath_dbg(common, PS, "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; return;
}
if (sc->ps_flags & PS_WAIT_FOR_CAB) { /* * This can happen if a broadcast frame is dropped or the AP * fails to send a frame indicating that all CAB frames have * been delivered.
*/
sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
}
}
/* Process Beacon and CAB receive in PS state */ if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
&& mybeacon) {
ath_rx_ps_beacon(sc, skb);
} elseif ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_moredata(hdr->frame_control)) { /* * No more broadcast/multicast frames to be received at this * point.
*/
sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
ath_dbg(common, PS, "All PS CAB frames received, back to sleep\n");
} elseif ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_dbg(common, PS, "Going back to sleep after having received PS-Poll data (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK));
}
}
ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); if (ret == -EINPROGRESS) { /*let device gain the buffer again*/
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
common->rx_bufsize, DMA_FROM_DEVICE); returnfalse;
}
__skb_unlink(skb, &rx_edma->rx_fifo); if (ret == -EINVAL) { /* corrupt descriptor, skip this one and the following one */
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
skb = skb_peek(&rx_edma->rx_fifo); if (skb) {
bf = SKB_CB_ATHBUF(skb);
BUG_ON(!bf);
/* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns.
*/
ret = ath9k_hw_rxprocdesc(ah, ds, rs); if (ret == -EINPROGRESS) { struct ath_rx_status trs; struct ath_rxbuf *tbf; struct ath_desc *tds;
/* * On some hardware the descriptor status words could * get corrupted, including the done bit. Because of * this, check if the next descriptor's done bit is * set or not. * * If the next descriptor's done bit is set, the current * descriptor has been corrupted. Force s/w to discard * this descriptor and continue...
*/
tds = tbf->bf_desc;
ret = ath9k_hw_rxprocdesc(ah, tds, &trs); if (ret == -EINPROGRESS) return NULL;
/* * Re-check previous descriptor, in case it has been filled * in the mean time.
*/
ret = ath9k_hw_rxprocdesc(ah, ds, rs); if (ret == -EINPROGRESS) { /* * mark descriptor as zero-length and set the 'more' * flag to ensure that both buffers get discarded
*/
rs->rs_datalen = 0;
rs->rs_more = true;
}
}
list_del(&bf->list); if (!bf->bf_mpdu) return bf;
/* * Synchronize the DMA transfer with CPU before * 1. accessing the frame * 2. requeueing the same buffer to h/w
*/
dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
common->rx_bufsize,
DMA_FROM_DEVICE);
/* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no * decryption key or real decryption error. This let us keep statistics there.
*/ staticint ath9k_rx_skb_preprocess(struct ath_softc *sc, struct sk_buff *skb, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, bool *decrypt_error, u64 tsf)
{ struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; bool is_phyerr;
/* * Discard corrupt descriptors which are marked in * ath_get_next_rx_buf().
*/ if (discard_current) goto corrupt;
sc->rx.discard_next = false;
/* * Discard zero-length packets and packets smaller than an ACK * which are not PHY_ERROR (short radar pulses have a length of 3)
*/
is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY; if (!rx_stats->rs_datalen ||
(rx_stats->rs_datalen < 10 && !is_phyerr)) {
RX_STAT_INC(sc, rx_len_err); goto corrupt;
}
/* * rs_status follows rs_datalen so if rs_datalen is too large * we can take a hint that hardware corrupted it, so ignore * those frames.
*/ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(sc, rx_len_err); goto corrupt;
}
/* Only use status info from the last fragment */ if (rx_stats->rs_more) return 0;
/* * Return immediately if the RX descriptor has been marked * as corrupt based on the various error bits. * * This is different from the other corrupt descriptor * condition handled above.
*/ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) goto corrupt;
/* * Process PHY errors and return so that the packet * can be dropped.
*/ if (rx_stats->rs_status & ATH9K_RXERR_PHY) { /* * DFS and spectral are mutually exclusive * * Since some chips use PHYERR_RADAR as indication for both, we * need to double check which feature is enabled to prevent * feeding spectral or dfs-detector with wrong frames.
*/ if (hw->conf.radar_enabled) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats,
rx_status->mactime);
} elseif (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
rx_status->mactime)) {
RX_STAT_INC(sc, rx_spectral);
} return -EINVAL;
}
/* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame.
*/
spin_lock_bh(&sc->chan_lock); if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error,
sc->cur_chan->rxfilter)) {
spin_unlock_bh(&sc->chan_lock); return -EINVAL;
}
spin_unlock_bh(&sc->chan_lock);
if (ath_is_mybeacon(common, hdr)) {
RX_STAT_INC(sc, rx_beacons);
rx_stats->is_mybeacon = true;
}
/* * This shouldn't happen, but have a safety check anyway.
*/ if (WARN_ON(!ah->curchan)) return -EINVAL;
if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) { /* * No valid hardware bitrate found -- we should not get here * because hardware has already validated this frame as OK.
*/
ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
RX_STAT_INC(sc, rx_rate_err); return -EINVAL;
}
if (ath9k_is_chanctx_enabled()) { if (rx_stats->is_mybeacon)
ath_chanctx_beacon_recv_ev(sc,
ATH_CHANCTX_EVENT_BEACON_RECEIVED);
}
/* * Run the LNA combining algorithm only in these cases: * * Standalone WLAN cards with both LNA/Antenna diversity * enabled in the EEPROM. * * WLAN+BT cards which are in the supported card list * in ath_pci_id_table and the user has loaded the * driver with "bt_ant_diversity" set to true.
*/ staticvoid ath9k_antenna_check(struct ath_softc *sc, struct ath_rx_status *rs)
{ struct ath_hw *ah = sc->sc_ah; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) return;
/* * Change the default rx antenna if rx diversity * chooses the other antenna 3 times in a row.
*/ if (sc->rx.defant != rs->rs_antenna) { if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rs->rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { if (common->bt_ant_diversity)
ath_ant_comb_scan(sc, rs);
} else {
ath_ant_comb_scan(sc, rs);
}
}
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
/* If there is no memory we ignore the current RX'd frame, * tell hardware it can give us a new frame using the old * skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */ if (!requeue_skb) {
RX_STAT_INC(sc, rx_oom_err); goto requeue_drop_frag;
}
/* We will now give hardware our shiny new allocated skb */
new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
common->rx_bufsize, dma_type); if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
dev_kfree_skb_any(requeue_skb); goto requeue_drop_frag;
}
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
common->rx_bufsize, dma_type);
skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); if (ah->caps.rx_status_len)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(sc, rx_frags); /* * rs_more indicates chained descriptors which can be * used to link buffers together for a sort of * scatter-gather operation.
*/ if (sc->rx.frag) { /* too many fragments - cannot handle frame */
dev_kfree_skb_any(sc->rx.frag);
dev_kfree_skb_any(skb);
RX_STAT_INC(sc, rx_too_many_frags_err);
skb = NULL;
}
sc->rx.frag = skb; goto requeue;
}
if (sc->rx.frag) { int space = skb->len - skb_tailroom(hdr_skb);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.