/* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* * Macro to check if DMA buffer is WORD-aligned and DMA-able. * Most host controllers assume the buffer is DMA'able and will * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid * check fails on stack memory.
*/ staticinlinebool buf_needs_bounce(u8 *buf)
{ return ((unsignedlong) buf & 0x3) || !virt_addr_valid(buf);
}
/* fill SG entries */
sg = scat_req->sgentries;
sg_init_table(sg, scat_req->scat_entries);
/* assemble SG list */ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
i, scat_req->scat_list[i].buf,
scat_req->scat_list[i].len);
/* Fixup the address so that the last byte will fall on MBOX EOM */ if (scat_req->req & HIF_WRITE) { if (scat_req->addr == HIF_MBOX_BASE_ADDR)
scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; else /* Uses extended address range */
scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
}
if (!s_req->sgentries) {
kfree(s_req); return -ENOMEM;
}
}
/* allocate a bus request for this scatter request */
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); if (!bus_req) {
kfree(s_req->sgentries);
kfree(s_req->virt_dma_buf);
kfree(s_req); return -ENOMEM;
}
/* assign the scatter request to this bus request */
bus_req->scat_req = s_req;
s_req->busrequest = bus_req;
s_req->virt_scat = virt_scat;
/* add it to the scatter pool */
hif_scatter_req_add(ar_sdio->ar, s_req);
}
staticvoid ath6kl_sdio_irq_handler(struct sdio_func *func)
{ int status; struct ath6kl_sdio *ar_sdio;
ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
ar_sdio = sdio_get_drvdata(func);
atomic_set(&ar_sdio->irq_handling, 1); /* * Release the host during interrupts so we can pick it back up when * we process commands.
*/
sdio_release_host(ar_sdio->func);
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
} else {
spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
}
return status;
}
/* clean up scatter support */ staticvoid ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
{ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *s_req, *tmp_req;
/* empty the free list */
spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list);
spin_unlock_bh(&ar_sdio->scat_lock);
/* * FIXME: should we also call completion handler with * ath6kl_hif_rw_comp_handler() with status -ECANCELED so * that the packet is properly freed?
*/ if (s_req->busrequest) {
s_req->busrequest->scat_req = NULL;
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
}
kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries);
kfree(s_req);
if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) { /* enable 4-bit ASYNC interrupt on AR6003 or later */
ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
CCCR_SDIO_IRQ_MODE_REG,
SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); if (ret) {
ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
ret); goto out;
}
if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
!(flags & MMC_PM_KEEP_POWER)) return -EINVAL;
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) {
ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); return ret;
}
/* sdio irq wakes up host */
ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); if (ret)
ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) {
flags = sdio_get_host_pm_caps(func); if (!(flags & MMC_PM_KEEP_POWER)) goto cut_pwr;
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) goto cut_pwr;
/* * Workaround to support Deep Sleep with MSM, set the host pm * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable * the sdc2_clock and internally allows MSM to enter * TCXO shutdown properly.
*/ if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
ret = sdio_set_host_pm_flags(func,
MMC_PM_WAKE_SDIO_IRQ); if (ret) goto cut_pwr;
}
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
NULL); if (ret) goto cut_pwr;
return 0;
}
cut_pwr: if (func->card && func->card->host)
func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
staticint ath6kl_sdio_resume(struct ath6kl *ar)
{ switch (ar->state) { case ATH6KL_STATE_OFF: case ATH6KL_STATE_CUTPOWER:
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio resume configuring sdio\n");
/* need to set sdio settings after power is cut from sdio */
ath6kl_sdio_config(ar); break;
case ATH6KL_STATE_ON: break;
case ATH6KL_STATE_DEEPSLEEP: break;
case ATH6KL_STATE_WOW: break;
case ATH6KL_STATE_SUSPENDING: break;
case ATH6KL_STATE_RESUMING: break;
case ATH6KL_STATE_RECOVERY: break;
}
ath6kl_cfg80211_resume(ar);
return 0;
}
/* set the window address register (using 4-byte register access ). */ staticint ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
{ int status;
u8 addr_val[4];
s32 i;
/* * Write bytes 1,2,3 of the register to set the upper address bytes, * the LSB is written last to initiate the access cycle
*/
for (i = 1; i <= 3; i++) { /* * Fill the buffer with the address byte value we want to * hit 4 times.
*/
memset(addr_val, ((u8 *)&addr)[i], 4);
/* * Hit each byte of the register address with a 4-byte * write operation to the same address, this is a harmless * operation.
*/
status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
4, HIF_WR_SYNC_BYTE_FIX); if (status) break;
}
if (status) {
ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
__func__, addr, reg_addr); return status;
}
/* * Write the address register again, this time write the whole * 4-byte value. The effect here is that the LSB write causes the * cycle to start, the extra 3 byte write to bytes 1,2,3 has no * effect since we are writing the same values again
*/
status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
4, HIF_WR_SYNC_BYTE_INC);
if (status) {
ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
__func__, addr, reg_addr); return status;
}
/* set window register to start read cycle */
status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
address);
if (status) return status;
/* read the data */
status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
(u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); if (status) {
ath6kl_err("%s: failed to read from window data addr\n",
__func__); return status;
}
return status;
}
staticint ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
__le32 data)
{ int status;
u32 val = (__force u32) data;
/* set write data */
status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
(u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); if (status) {
ath6kl_err("%s: failed to write 0x%x to window data addr\n",
__func__, data); return status;
}
/* set window register, which starts the write cycle */ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
address);
}
/* Read the counter register to get the command credits */
addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { /* * Hit the credit counter with a 4-byte access, the first byte * read will hit the counter and cause a decrement, while the * remaining 3 bytes has no effect. The rationale behind this * is to make all HIF accesses 4-byte aligned.
*/
ret = ath6kl_sdio_read_write_sync(ar, addr,
(u8 *)&ar->bmi.cmd_credits, 4,
HIF_RD_SYNC_BYTE_INC); if (ret) {
ath6kl_err("Unable to decrement the command credit count register: %d\n",
ret); return ret;
}
/* The counter is only 8 bits. * Ignore anything in the upper 3 bytes
*/
ar->bmi.cmd_credits &= 0xFF;
}
if (!ar->bmi.cmd_credits) {
ath6kl_err("bmi communication timeout\n"); return -ETIMEDOUT;
}
return 0;
}
staticint ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
{ unsignedlong timeout;
u32 rx_word = 0; int ret = 0;
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); while ((time_before(jiffies, timeout)) && !rx_word) {
ret = ath6kl_sdio_read_write_sync(ar,
RX_LOOKAHEAD_VALID_ADDRESS,
(u8 *)&rx_word, sizeof(rx_word),
HIF_RD_SYNC_BYTE_INC); if (ret) {
ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); return ret;
}
/* all we really want is one bit */
rx_word &= (1 << ENDPOINT1);
}
if (!rx_word) {
ath6kl_err("bmi_recv_buf FIFO empty\n"); return -EINVAL;
}
ret = ath6kl_sdio_bmi_credits(ar); if (ret) return ret;
addr = ar->mbox_info.htc_addr;
ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
HIF_WR_SYNC_BYTE_INC); if (ret) {
ath6kl_err("unable to send the bmi data to the device\n"); return ret;
}
/* * During normal bootup, small reads may be required. * Rather than issue an HIF Read and then wait as the Target * adds successive bytes to the FIFO, we wait here until * we know that response data is available. * * This allows us to cleanly timeout on an unexpected * Target failure rather than risk problems at the HIF level. * In particular, this avoids SDIO timeouts and possibly garbage * data on some host controllers. And on an interconnect * such as Compact Flash (as well as some SDIO masters) which * does not provide any indication on data timeout, it avoids * a potential hang or garbage response. * * Synchronization is more difficult for reads larger than the * size of the MBOX FIFO (128B), because the Target is unable * to push the 129th byte of data until AFTER the Host posts an * HIF Read and removes some FIFO data. So for large reads the * Host proceeds to post an HIF Read BEFORE all the data is * actually available to read. Fortunately, large BMI reads do * not occur in practice -- they're supported for debug/development. * * So Host/Target BMI synchronization is divided into these cases: * CASE 1: length < 4 * Should not happen * * CASE 2: 4 <= length <= 128 * Wait for first 4 bytes to be in FIFO * If CONSERVATIVE_BMI_READ is enabled, also wait for * a BMI command credit, which indicates that the ENTIRE * response is available in the FIFO * * CASE 3: length > 128 * Wait for the first 4 bytes to be in FIFO * * For most uses, a small timeout should be sufficient and we will * usually see a response quickly; but there may be some unusual * (debug) cases of BMI_EXECUTE where we want an larger timeout. * For now, we use an unbounded busy loop while waiting for * BMI_EXECUTE. * * If BMI_EXECUTE ever needs to support longer-latency execution, * especially in production, this code needs to be enhanced to sleep * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently * a function of Host processor speed.
*/ if (len >= 4) { /* NB: Currently, always true */
ret = ath6kl_bmi_get_rx_lkahd(ar); if (ret) return ret;
}
addr = ar->mbox_info.htc_addr;
ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
HIF_RD_SYNC_BYTE_INC); if (ret) {
ath6kl_err("Unable to read the bmi data from the device: %d\n",
ret); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.