// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of wlcore * * Copyright (C) 2008-2010 Nokia Corporation * Copyright (C) 2011-2013 Texas Instruments Inc.
*/
/* * this function is being called when the rx_streaming interval * has beed changed or rx_streaming should be disabled
*/ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{ int ret = 0; int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out;
/* reconfigure/disable according to new streaming_period */ if (period &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
ret = wl1271_set_rx_streaming(wl, wlvif, true); else {
ret = wl1271_set_rx_streaming(wl, wlvif, false); /* don't cancel_work_sync since we might deadlock */
timer_delete_sync(&wlvif->rx_streaming_timer);
}
out: return ret;
}
/* wl->mutex must be taken */ void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
{ /* if the watchdog is not armed, don't do anything */ if (wl->tx_allocated_blocks == 0) return;
if (unlikely(wl->state != WLCORE_STATE_ON)) goto out;
/* Tx went out in the meantime - everything is ok */ if (unlikely(wl->tx_allocated_blocks == 0)) goto out;
/* * if a ROC is in progress, we might not have any Tx for a long * time (e.g. pending Tx on the non-ROC channels)
*/ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_rearm_tx_watchdog_locked(wl); goto out;
}
/* * if a scan is in progress, we might not have any Tx for a long * time
*/ if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_rearm_tx_watchdog_locked(wl); goto out;
}
/* * AP might cache a frame for a long time for a sleeping station, * so rearm the timer if there's an AP interface with stations. If * Tx is genuinely stuck we will most hopefully discover it when all * stations are removed due to inactivity.
*/ if (wl->active_sta_count) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has " " %d stations",
wl->conf.tx.tx_watchdog_timeout,
wl->active_sta_count);
wl12xx_rearm_tx_watchdog_locked(wl); goto out;
}
wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_queue_recovery_work(wl);
/* * Wake up from high level PS if the STA is asleep with too little * packets in FW or if the STA is awake.
*/ if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_end(wl, wlvif, hlid);
/* * Start high-level PS if the STA is asleep with enough blocks in FW. * Make an exception if this is the only connected link. In this * case FW-memory congestion is less of a problem. * Note that a single connected STA means 2*ap_count + 1 active links, * since we must account for the global and broadcast AP links * for each AP. The "fw_ps" check assures us the other link is a STA * connected to the AP. Otherwise the FW would not set the PSM bit.
*/ elseif (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
if (diff) {
lnk->allocated_pkts -= diff;
lnk->prev_freed_pkts = tx_lnk_free_pkts;
}
/* Get the current sec_pn16 value if present */ if (status->counters.tx_lnk_sec_pn16)
sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]); else
sec_pn16 = 0; /* prevent wrap-around in pn16 counter */
diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
/* FIXME: since free_pkts is a 8-bit counter of packets that * rolls over, it can become zero. If it is zero, then we * omit processing below. Is that really correct?
*/ if (tx_lnk_free_pkts <= 0) continue;
/* For a station that has an authorized link: */ if (wlvifsta && wlvifsta->sta.hlid == i) { if (wlvifsta->encryption_type == KEY_TKIP ||
wlvifsta->encryption_type == KEY_AES) { if (diff16) {
lnk->prev_sec_pn16 = sec_pn16; /* accumulate the prev_freed_pkts * counter according to the PN from * firmware
*/
lnk->total_freed_pkts += diff16;
}
} else { if (diff) /* accumulate the prev_freed_pkts * counter according to the free packets * count from firmware
*/
lnk->total_freed_pkts += diff;
}
}
/* For an AP that has been started */ if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) { if (wlvifap->encryption_type == KEY_TKIP ||
wlvifap->encryption_type == KEY_AES) { if (diff16) {
lnk->prev_sec_pn16 = sec_pn16; /* accumulate the prev_freed_pkts * counter according to the PN from * firmware
*/
lnk->total_freed_pkts += diff16;
}
} else { if (diff) /* accumulate the prev_freed_pkts * counter according to the free packets * count from firmware
*/
lnk->total_freed_pkts += diff;
}
}
}
/* prevent wrap-around in total blocks counter */ if (likely(wl->tx_blocks_freed <= status->total_released_blks))
freed_blocks = status->total_released_blks -
wl->tx_blocks_freed; else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
status->total_released_blks;
/* * If the FW freed some blocks: * If we still have allocated blocks - re-arm the timer, Tx is * not stuck. Otherwise, cancel the timer (no Tx currently).
*/ if (freed_blocks) { if (wl->tx_allocated_blocks)
wl12xx_rearm_tx_watchdog_locked(wl); else
cancel_delayed_work(&wl->tx_watchdog_work);
}
/* * The FW might change the total number of TX memblocks before * we get a notification about blocks being released. Thus, the * available blocks calculation might yield a temporary result * which is lower than the actual available blocks. Keeping in * mind that only blocks that were allocated can be moved from * TX to RX, tx_blocks_available should never decrease here.
*/
wl->tx_blocks_available = max((int)wl->tx_blocks_available,
avail);
/* if more blocks are available now, tx work can be scheduled */ if (wl->tx_blocks_available > old_tx_blk_count)
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
wl->time_offset = (ktime_get_boottime_ns() >> 10) -
(s64)(status->fw_localtime);
do {
wl1271_flush_deferred_work(wl);
} while (skb_queue_len(&wl->deferred_rx_queue));
}
#define WL1271_IRQ_MAX_LOOPS 256
staticint wlcore_irq_locked(struct wl1271 *wl)
{ int ret = 0;
u32 intr; int loopcount = WL1271_IRQ_MAX_LOOPS; bool run_tx_queue = true; bool done = false; unsignedint defer_count; unsignedlong flags;
/* * In case edge triggered interrupt must be used, we cannot iterate * more than once without introducing race conditions with the hardirq.
*/ if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
loopcount = 1;
wl1271_debug(DEBUG_IRQ, "IRQ work");
if (unlikely(wl->state != WLCORE_STATE_ON)) goto out;
ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) goto out;
while (!done && loopcount--) {
smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status); if (ret < 0) goto err_ret;
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("HW watchdog interrupt received! starting recovery.");
wl->watchdog_recovery = true;
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */ goto err_ret;
}
if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
wl1271_error("SW watchdog interrupt received! " "starting recovery.");
wl->watchdog_recovery = true;
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */ goto err_ret;
}
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
ret = wlcore_rx(wl, wl->fw_status); if (ret < 0) goto err_ret;
/* Check if any tx blocks were freed */ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { if (spin_trylock_irqsave(&wl->wl_lock, flags)) { if (!wl1271_tx_total_queue_count(wl))
run_tx_queue = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* * In order to avoid starvation of the TX path, * call the work function directly.
*/ if (run_tx_queue) {
ret = wlcore_tx_work_locked(wl); if (ret < 0) goto err_ret;
}
}
/* check for tx results */
ret = wlcore_hw_tx_delayed_compl(wl); if (ret < 0) goto err_ret;
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
skb_queue_len(&wl->deferred_rx_queue); if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
ret = wl1271_event_handle(wl, 0); if (ret < 0) goto err_ret;
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
ret = wl1271_event_handle(wl, 1); if (ret < 0) goto err_ret;
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_INIT_COMPLETE");
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
/* complete the ELP completion */ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
spin_lock_irqsave(&wl->wl_lock, flags); if (wl->elp_compl)
complete(wl->elp_compl);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { /* don't enqueue a work right now. mark it as pending */
set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
wl1271_debug(DEBUG_IRQ, "should not enqueue work");
spin_lock_irqsave(&wl->wl_lock, flags);
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags); goto out_handled;
}
/* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
ret = wlcore_irq_locked(wl); if (ret)
wl12xx_queue_recovery_work(wl);
/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { if (spin_trylock_irqsave(&wl->wl_lock, flags)) { if (!wl1271_tx_total_queue_count(wl))
queue_tx_work = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
} if (queue_tx_work)
ieee80211_queue_work(wl->hw, &wl->tx_work);
}
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) return;
wl1271_info("Reading FW panic log");
/* * Make sure the chip is awake and the logger isn't active. * Do not send a stop fwlog command if the fw is hanged or if * dbgpins are used (due to some fw bug).
*/
error = pm_runtime_resume_and_get(wl->dev); if (error < 0) return; if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
/* Traverse the memory blocks linked list */ do {
end_of_log = wlcore_event_fw_logger(wl); if (end_of_log == 0) {
msleep(100);
end_of_log = wlcore_event_fw_logger(wl);
}
} while (end_of_log != 0);
}
/* * increment the initial seq number on recovery to account for * transmitted packets that we haven't yet got in the FW status
*/ if (wlvif->encryption_type == KEY_GEM)
sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wl_sta->total_freed_pkts += sqn_recovery_padding;
}
/* * Its safe to enable TX now - the queues are stopped after a request * to restart the HW.
*/
wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
staticint wl12xx_set_power_on(struct wl1271 *wl)
{ int ret;
msleep(WL1271_PRE_POWER_ON_SLEEP);
ret = wl1271_power_on(wl); if (ret < 0) goto out;
msleep(WL1271_POWER_ON_SLEEP);
wl1271_io_reset(wl);
wl1271_io_init(wl);
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); if (ret < 0) goto fail;
/* ELP module wake up */
ret = wlcore_fw_wakeup(wl); if (ret < 0) goto fail;
out: return ret;
fail:
wl1271_power_off(wl); return ret;
}
staticint wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
{ int ret = 0;
ret = wl12xx_set_power_on(wl); if (ret < 0) goto out;
/* * For wl127x based devices we could use the default block * size (512 bytes), but due to a bug in the sdio driver, we * need to set it explicitly after the chip is powered on. To * simplify the code and since the performance impact is * negligible, we use the same block size for all different * chip types. * * Check if the bus supports blocksize alignment and, if it * doesn't, make sure we don't have the quirk.
*/ if (!wl1271_set_block_size(wl))
wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
/* TODO: make sure the lower driver has set things up correctly */
ret = wl1271_setup(wl); if (ret < 0) goto out;
ret = wl12xx_fetch_firmware(wl, plt); if (ret < 0) {
kfree(wl->fw_status);
kfree(wl->raw_fw_status);
kfree(wl->tx_res_if);
}
out: return ret;
}
int wl1271_plt_start(struct wl1271 *wl, constenum plt_mode plt_mode)
{ int retries = WL1271_BOOT_RETRIES; struct wiphy *wiphy = wl->hw->wiphy;
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version));
int wl1271_plt_stop(struct wl1271 *wl)
{ int ret = 0;
wl1271_notice("power down");
/* * Interrupts must be disabled before setting the state to OFF. * Otherwise, the interrupt handler might be called and exit without * reading the interrupt status.
*/
wlcore_disable_interrupts(wl);
mutex_lock(&wl->mutex); if (!wl->plt) {
mutex_unlock(&wl->mutex);
/* * This will not necessarily enable interrupts as interrupts * may have been disabled when op_stop was called. It will, * however, balance the above call to disable_interrupts().
*/
wlcore_enable_interrupts(wl);
wl1271_error("cannot power down because not in PLT " "state: %d", wl->state);
ret = -EBUSY; goto out;
}
/* * drop the packet if the link is invalid or the queue is stopped * for any reason but watermark. Watermark is a "soft"-stop so we * allow these packets through.
*/ if (hlid == WL12XX_INVALID_LINK_ID ||
(!test_bit(hlid, wlvif->links_map)) ||
(wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb); goto out;
}
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
hlid, q, skb->len);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
/* * The workqueue is slow to process the tx_queue and we need stop * the queue here, otherwise the queue will get too long.
*/ if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
wlcore_stop_queue_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
/* * The chip specific setup must run before the first TX packet - * before that, the tx_work will not be initialized!
*/
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
/* The FW is low on RX memory blocks, so send the dummy packet asap */ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) return wlcore_tx_work_locked(wl);
/* * If the FW TX is busy, TX work will be scheduled by the threaded * interrupt handler function
*/ return 0;
}
/* * The size of the dummy packet should be at least 1400 bytes. However, in * order to minimize the number of bus transactions, aligning it to 512 bytes * boundaries could be beneficial, performance wise
*/ #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
/* Dummy packets require the TID to be management */
skb->priority = WL1271_TID_MGMT;
/* Initialize all fields that might be used */
skb_set_queue_mapping(skb, 0);
memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
return skb;
}
staticint
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{ int num_fields = 0, in_field = 0, fields_size = 0; int i, pattern_len = 0;
if (!p->mask) {
wl1271_warning("No mask in WoWLAN pattern"); return -EINVAL;
}
/* * The pattern is broken up into segments of bytes at different offsets * that need to be checked by the FW filter. Each segment is called * a field in the FW API. We verify that the total number of fields * required for this pattern won't exceed FW limits (8) * as well as the total fields buffer won't exceed the FW limit. * Note that if there's a pattern which crosses Ethernet/IP header * boundary a new field is required.
*/ for (i = 0; i < p->pattern_len; i++) { if (test_bit(i, (unsignedlong *)p->mask)) { if (!in_field) {
in_field = 1;
pattern_len = 1;
} else { if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
num_fields++;
fields_size += pattern_len +
RX_FILTER_FIELD_OVERHEAD;
pattern_len = 1;
} else
pattern_len++;
}
} else { if (in_field) {
in_field = 0;
fields_size += pattern_len +
RX_FILTER_FIELD_OVERHEAD;
num_fields++;
}
}
}
if (in_field) {
fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
num_fields++;
}
if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
wl1271_warning("RX Filter too complex. Too many segments"); return -EINVAL;
}
if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
wl1271_warning("RX filter pattern is too big"); return -E2BIG;
}
/* * Allocates an RX filter returned through f * which needs to be freed using rx_filter_free()
*/ staticint
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p, struct wl12xx_rx_filter **f)
{ int i, j, ret = 0; struct wl12xx_rx_filter *filter;
u16 offset;
u8 flags, len;
filter = wl1271_rx_filter_alloc(); if (!filter) {
wl1271_warning("Failed to alloc rx filter");
ret = -ENOMEM; goto err;
}
i = 0; while (i < p->pattern_len) { if (!test_bit(i, (unsignedlong *)p->mask)) {
i++; continue;
}
for (j = i; j < p->pattern_len; j++) { if (!test_bit(j, (unsignedlong *)p->mask)) break;
if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
j >= WL1271_RX_FILTER_ETH_HEADER_SIZE) break;
}
if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
offset = i;
flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
} else {
offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
}
len = j - i;
ret = wl1271_rx_filter_alloc_field(filter,
offset,
flags,
&p->pattern[i], len); if (ret) goto err;
i = j;
}
filter->action = FILTER_SIGNAL;
*f = filter; return 0;
err:
wl1271_rx_filter_free(filter);
*f = NULL;
return ret;
}
staticint wl1271_configure_wowlan(struct wl1271 *wl, struct cfg80211_wowlan *wow)
{ int i, ret;
if (!wow || wow->any || !wow->n_patterns) {
ret = wl1271_acx_default_rx_filter_enable(wl, 0,
FILTER_SIGNAL); if (ret) goto out;
ret = wl1271_rx_filter_clear_all(wl); if (ret) goto out;
return 0;
}
if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS)) return -EINVAL;
/* Validate all incoming patterns before clearing current FW state */ for (i = 0; i < wow->n_patterns; i++) {
ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]); if (ret) {
wl1271_warning("Bad wowlan pattern %d", i); return ret;
}
}
ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); if (ret) goto out;
ret = wl1271_rx_filter_clear_all(wl); if (ret) goto out;
/* Translate WoWLAN patterns into filters */ for (i = 0; i < wow->n_patterns; i++) { struct cfg80211_pkt_pattern *p; struct wl12xx_rx_filter *filter = NULL;
p = &wow->patterns[i];
ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter); if (ret) {
wl1271_warning("Failed to create an RX filter from " "wowlan pattern %d", i); goto out;
}
ret = wl1271_rx_filter_enable(wl, i, 1, filter);
wl1271_rx_filter_free(filter); if (ret) goto out;
}
ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
out: return ret;
}
staticint wl1271_configure_suspend_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_wowlan *wow)
{ int ret = 0;
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out;
ret = wl1271_configure_wowlan(wl, wow); if (ret < 0) goto out;
if ((wl->conf.conn.suspend_wake_up_event ==
wl->conf.conn.wake_up_event) &&
(wl->conf.conn.suspend_listen_interval ==
wl->conf.conn.listen_interval)) goto out;
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
if (ret < 0)
wl1271_error("suspend: set wake up conditions failed: %d", ret);
out: return ret;
}
staticint wl1271_configure_suspend_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_wowlan *wow)
{ int ret = 0;
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) goto out;
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); if (ret < 0) goto out;
ret = wl1271_configure_wowlan(wl, wow); if (ret < 0) goto out;
/* we want to perform the recovery before suspending */ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
wl1271_warning("postponing suspend to perform recovery"); return -EBUSY;
}
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) {
mutex_unlock(&wl->mutex); return ret;
}
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) continue;
ret = wl1271_configure_suspend(wl, wlvif, wow); if (ret < 0) { goto out_sleep;
}
}
/* disable fast link flow control notifications from FW */
ret = wlcore_hw_interrupt_notify(wl, false); if (ret < 0) goto out_sleep;
/* if filtering is enabled, configure the FW to drop all RX BA frames */
ret = wlcore_hw_rx_ba_filter(wl,
!!wl->conf.conn.suspend_rx_ba_activity); if (ret < 0) goto out_sleep;
if (ret < 0) {
wl1271_warning("couldn't prepare device to suspend"); return ret;
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
flush_work(&wl->tx_work);
/* * Cancel the watchdog even if above tx_flush failed. We will detect * it on resume anyway.
*/
cancel_delayed_work(&wl->tx_watchdog_work);
/* * set suspended flag to avoid triggering a new threaded_irq * work.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
ret = pm_runtime_force_resume(wl->dev); if (ret < 0) {
wl1271_error("ELP wakeup failure!"); goto out_sleep;
}
/* * re-enable irq_work enqueuing, and call irq_work directly if * there is a pending work.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
run_irq_work = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
mutex_lock(&wl->mutex);
/* test the recovery flag before calling any SDIO functions */
pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
&wl->flags);
if (run_irq_work) {
wl1271_debug(DEBUG_MAC80211, "run postponed irq_work directly");
/* don't talk to the HW if recovery is pending */ if (!pending_recovery) {
ret = wlcore_irq_locked(wl); if (ret)
wl12xx_queue_recovery_work(wl);
}
wlcore_enable_interrupts(wl);
}
if (pending_recovery) {
wl1271_warning("queuing forgotten recovery on resume");
ieee80211_queue_work(wl->hw, &wl->recovery_work); goto out_sleep;
}
ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) goto out;
wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) continue;
wl1271_configure_resume(wl, wlvif);
}
ret = wlcore_hw_interrupt_notify(wl, true); if (ret < 0) goto out_sleep;
/* if filtering is enabled, configure the FW to drop all RX BA frames */
ret = wlcore_hw_rx_ba_filter(wl, false); if (ret < 0) goto out_sleep;
/* * Set a flag to re-init the watchdog on the first Tx after resume. * That way we avoid possible conditions where Tx-complete interrupts * fail to arrive and we perform a spurious recovery.
*/
set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
/* * We have to delay the booting of the hardware because * we need to know the local MAC address before downloading and * initializing the firmware. The MAC address cannot be changed * after boot, and without the proper MAC address, the firmware * will not function properly. * * The MAC address is first known when the corresponding interface * is added. That is where we will initialize the hardware.
*/
return 0;
}
staticvoid wlcore_op_stop_locked(struct wl1271 *wl)
{ int i;
if (wl->state == WLCORE_STATE_OFF) { if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
&wl->flags))
wlcore_enable_interrupts(wl);
return;
}
/* * this must be before the cancel_work calls below, so that the work * functions don't perform further work.
*/
wl->state = WLCORE_STATE_OFF;
/* * Use the nosync variant to disable interrupts, so the mutex could be * held while doing so without deadlocking.
*/
wlcore_disable_interrupts_nosync(wl);
mutex_unlock(&wl->mutex);
wlcore_synchronize_interrupts(wl); if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
cancel_work_sync(&wl->recovery_work);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
mutex_lock(&wl->mutex);
wl12xx_tx_reset(wl);
wl1271_power_off(wl); /* * In case a recovery was scheduled, interrupts were disabled to avoid * an interrupt storm. Now that the power is down, it is safe to * re-enable interrupts to balance the disable depth
*/ if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wlcore_enable_interrupts(wl);
/* The system link is always allocated */
wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
/* * this is performed after the cancel_work calls and the associated * mutex_lock, so that wl1271_op_add_interface does not accidentally * get executed before all these vars have been reset.
*/
wl->flags = 0;
wl->tx_blocks_freed = 0;
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_pkts_freed[i] = 0;
wl->tx_allocated_pkts[i] = 0;
}
/* * FW channels must be re-calibrated after recovery, * save current Reg-Domain channel configuration and clear it.
*/
memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last, sizeof(wl->reg_ch_conf_pending));
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
if (unlikely(wl->state != WLCORE_STATE_ON)) goto out;
/* * Make sure a second really passed since the last auth reply. Maybe * a second auth reply arrived while we were stuck on the mutex. * Check for a little less than the timeout to protect from scheduler * irregularities.
*/
time_spare = jiffies +
msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50); if (!time_after(time_spare, wlvif->pending_auth_reply_time)) goto out;
ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) goto out;
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
/* * mac80211 configures some values globally, while we treat them * per-interface. thus, on init, we have to copy them from wl
*/
wlvif->band = wl->band;
wlvif->channel = wl->channel;
wlvif->power_level = wl->power_level;
wlvif->channel_type = wl->channel_type;
staticint wl12xx_init_fw(struct wl1271 *wl)
{ int retries = WL1271_BOOT_RETRIES; bool booted = false; struct wiphy *wiphy = wl->hw->wiphy; int ret;
while (retries) {
retries--;
ret = wl12xx_chip_wakeup(wl, false); if (ret < 0) goto power_off;
ret = wl->ops->boot(wl); if (ret < 0) goto power_off;
ret = wl1271_hw_init(wl); if (ret < 0) goto irq_disable;
booted = true; break;
irq_disable:
mutex_unlock(&wl->mutex); /* Unlocking the mutex in the middle of handling is inherently unsafe. In this case we deem it safe to do, because we need to let any possibly pending IRQ out of the system (and while we are WLCORE_STATE_OFF the IRQ work function will not do anything.) Also, any other possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
wlcore_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
}
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version));
/* * Now we know if 11a is supported (info from the NVS), so disable * 11a channels if not supported
*/ if (!wl->enable_11a)
wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
/* * Check whether a fw switch (i.e. moving from one loaded * fw to another) is needed. This function is also responsible * for updating wl->last_vif_count, so it must be called before * loading a non-plt fw (so the correct fw (single-role/multi-role) * will be used).
*/ staticbool wl12xx_need_fw_change(struct wl1271 *wl, struct vif_counter_data vif_counter_data, bool add)
{ enum wl12xx_fw_type current_fw = wl->fw_type;
u8 vif_count = vif_counter_data.counter;
if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags)) returnfalse;
/* increase the vif count if this is a new vif */ if (add && !vif_counter_data.cur_vif_running)
vif_count++;
wl->last_vif_count = vif_count;
/* no need for fw change if the device is OFF */ if (wl->state == WLCORE_STATE_OFF) returnfalse;
/* no need for fw change if a single fw is used */ if (!wl->mr_fw_name) returnfalse;
if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) returntrue; if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI) returntrue;
returnfalse;
}
/* * Enter "forced psm". Make sure the sta is in psm against the ap, * to make the fw switch a bit more disconnection-persistent.
*/ staticvoid wl12xx_force_active_psm(struct wl1271 *wl)
{ struct wl12xx_vif *wlvif;
struct wlcore_hw_queue_iter_data { unsignedlong hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)]; /* current vif */ struct ieee80211_vif *vif; /* is the current vif among those iterated */ bool cur_running;
};
/* mark all bits taken by active interfaces */
ieee80211_iterate_active_interfaces_atomic(wl->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
wlcore_hw_queue_iter, &iter_data);
/* the current vif is already running in mac80211 (resume/recovery) */ if (iter_data.cur_running) {
wlvif->hw_queue_base = vif->hw_queue[0];
wl1271_debug(DEBUG_MAC80211, "using pre-allocated hw queue base %d",
wlvif->hw_queue_base);
/* interface type might have changed type */ goto adjust_cab_queue;
}
q_base = find_first_zero_bit(iter_data.hw_queue_map,
WLCORE_NUM_MAC_ADDRESSES); if (q_base >= WLCORE_NUM_MAC_ADDRESSES) return -EBUSY;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.