// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2020 Realtek Corporation
*/
#include <linux/pci.h>
#include "mac.h"
#include "pci.h"
#include "reg.h"
#include "ser.h"
static bool rtw89_pci_disable_clkreq;
static bool rtw89_pci_disable_aspm_l1;
static bool rtw89_pci_disable_l1ss;
module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool , 0644);
module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool , 0644);
module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool , 0644);
MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support" );
MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support" );
MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support" );
static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
u32 *phy_offset)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u32 val;
int ret;
ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
if (ret)
return ret;
val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
if (val == RTW89_PCIE_GEN1_SPEED) {
*phy_offset = R_RAC_DIRECT_OFFSET_G1;
} else if (val == RTW89_PCIE_GEN2_SPEED) {
*phy_offset = R_RAC_DIRECT_OFFSET_G2;
} else {
rtw89_warn(rtwdev, "Unknown PCI link speed %d\n" , val);
return -EFAULT;
}
return 0;
}
static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
{
u32 val;
int ret;
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
1, RTW89_PCI_POLL_BDRAM_RST_CNT, false ,
rtwdev, R_AX_PCIE_INIT_CFG1);
return ret;
}
static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_dma_ring *bd_ring,
u32 cur_idx, bool tx)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 cnt, cur_rp, wp, rp, len;
rp = bd_ring->rp;
wp = bd_ring->wp;
len = bd_ring->len;
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
if (tx) {
cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
} else {
if (info->rx_ring_eq_is_full)
wp += 1;
cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
}
bd_ring->rp = cur_rp;
return cnt;
}
static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true );
return cnt;
}
static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
u32 cnt, bool release_all)
{
struct rtw89_pci_tx_data *tx_data;
struct sk_buff *skb;
u32 qlen;
while (cnt--) {
skb = skb_dequeue(&rtwpci->h2c_queue);
if (!skb) {
rtw89_err(rtwdev, "failed to pre-release fwcmd\n" );
return ;
}
skb_queue_tail(&rtwpci->h2c_release_queue, skb);
}
qlen = skb_queue_len(&rtwpci->h2c_release_queue);
if (!release_all)
qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
while (qlen--) {
skb = skb_dequeue(&rtwpci->h2c_release_queue);
if (!skb) {
rtw89_err(rtwdev, "failed to release fwcmd\n" );
return ;
}
tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci)
{
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
u32 cnt;
cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
if (!cnt)
return ;
rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false );
}
static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false );
return cnt;
}
static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info;
dma_addr_t dma;
rx_info = RTW89_PCI_RX_SKB_CB(skb);
dma = rx_info->dma;
dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
DMA_FROM_DEVICE);
}
static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info;
dma_addr_t dma;
rx_info = RTW89_PCI_RX_SKB_CB(skb);
dma = rx_info->dma;
dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
DMA_FROM_DEVICE);
}
static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
struct rtw89_pci_rxbd_info *rxbd_info;
__le32 info;
rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
info = rxbd_info->dword;
rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
}
static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 target_rx_tag;
if (!info->check_rx_tag)
return 0;
/* valid range is 1 ~ 0x1FFF */
if (rx_ring->target_rx_tag == 0)
target_rx_tag = 1;
else
target_rx_tag = rx_ring->target_rx_tag;
if (rx_info->tag != target_rx_tag) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n" ,
rx_info->tag, target_rx_tag);
return -EAGAIN;
}
return 0;
}
static
int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
int rx_tag_retry = 1000;
int ret;
do {
rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
rtw89_pci_rxbd_info_update(rtwdev, skb);
ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
if (ret != -EAGAIN)
break ;
} while (rx_tag_retry--);
/* update target rx_tag for next RX */
rx_ring->target_rx_tag = rx_info->tag + 1;
return ret;
}
static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
if (enable) {
rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
if (dma_stop2->addr)
rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
} else {
rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
if (dma_stop2->addr)
rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
}
}
static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
if (enable)
rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
else
rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
}
static bool
rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
struct sk_buff *new ,
const struct sk_buff *skb, u32 offset,
const struct rtw89_pci_rx_info *rx_info,
const struct rtw89_rx_desc_info *desc_info)
{
u32 copy_len = rx_info->len - offset;
if (unlikely(skb_tailroom(new ) < copy_len)) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n" ,
rx_info->len, desc_info->pkt_size, offset, fs, ls);
rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: " ,
skb->data, rx_info->len);
/* length of a single segment skb is desc_info->pkt_size */
if (fs && ls) {
copy_len = desc_info->pkt_size;
} else {
rtw89_info(rtwdev, "drop rx data due to invalid length\n" );
return false ;
}
}
skb_put_data(new , skb->data + offset, copy_len);
return true ;
}
static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
struct rtw89_pci_dma_ring *bd_ring)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 wp = bd_ring->wp;
if (!info->rx_ring_eq_is_full)
return wp;
if (++wp >= bd_ring->len)
wp = 0;
return wp;
}
static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct sk_buff *new = rx_ring->diliver_skb;
struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
u32 rxinfo_size = sizeof (struct rtw89_pci_rxbd_info);
u32 skb_idx;
u32 offset;
u32 cnt = 1;
bool fs, ls;
int ret;
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n" ,
bd_ring->wp, ret);
goto err_sync_device;
}
rx_info = RTW89_PCI_RX_SKB_CB(skb);
fs = info->no_rxbd_fs ? !new : rx_info->fs;
ls = rx_info->ls;
if (unlikely(!fs || !ls))
rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
"unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n" ,
fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
if (fs) {
if (new ) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
"skb should not be ready before first segment start\n" );
goto err_sync_device;
}
if (desc_info->ready) {
rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n" );
goto err_sync_device;
}
rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
if (!new )
goto err_sync_device;
rx_ring->diliver_skb = new ;
/* first segment has RX desc */
offset = desc_info->offset + desc_info->rxd_len;
} else {
offset = sizeof (struct rtw89_pci_rxbd_info);
if (!new ) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n" );
goto err_sync_device;
}
}
if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new , skb, offset, rx_info, desc_info))
goto err_sync_device;
rtw89_pci_sync_skb_for_device(rtwdev, skb);
rtw89_pci_rxbd_increase(rx_ring, 1);
if (!desc_info->ready) {
rtw89_warn(rtwdev, "no rx desc information\n" );
goto err_free_resource;
}
if (ls) {
rtw89_core_rx(rtwdev, desc_info, new );
rx_ring->diliver_skb = NULL;
desc_info->ready = false ;
}
return cnt;
err_sync_device:
rtw89_pci_sync_skb_for_device(rtwdev, skb);
rtw89_pci_rxbd_increase(rx_ring, 1);
err_free_resource:
if (new )
dev_kfree_skb_any(new );
rx_ring->diliver_skb = NULL;
desc_info->ready = false ;
return cnt;
}
static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
u32 rx_cnt;
while (cnt && rtwdev->napi_budget_countdown > 0) {
rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
if (!rx_cnt) {
rtw89_err(rtwdev, "failed to deliver RXBD skb\n" );
/* skip the rest RXBD bufs */
rtw89_pci_rxbd_increase(rx_ring, cnt);
break ;
}
cnt -= rx_cnt;
}
rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci, int budget)
{
struct rtw89_pci_rx_ring *rx_ring;
int countdown = rtwdev->napi_budget_countdown;
u32 cnt;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (!cnt)
return 0;
cnt = min_t(u32, budget, cnt);
rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
/* In case of flushing pending SKBs, the countdown may exceed. */
if (rtwdev->napi_budget_countdown <= 0)
return budget;
return budget - countdown;
}
static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct sk_buff *skb, u8 tx_status)
{
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct ieee80211_tx_info *info;
if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE))
return ;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (tx_status == RTW89_TX_DONE) {
info->flags |= IEEE80211_TX_STAT_ACK;
tx_ring->tx_acked++;
} else {
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
rtw89_debug(rtwdev, RTW89_DBG_FW,
"failed to TX of status %x\n" , tx_status);
switch (tx_status) {
case RTW89_TX_RETRY_LIMIT:
tx_ring->tx_retry_lmt++;
break ;
case RTW89_TX_LIFE_TIME:
tx_ring->tx_life_time++;
break ;
case RTW89_TX_MACID_DROP:
tx_ring->tx_mac_id_drop++;
break ;
default :
rtw89_warn(rtwdev, "invalid TX status %x\n" , tx_status);
break ;
}
}
ieee80211_tx_status_ni(rtwdev->hw, skb);
}
static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_tx_wd *txwd;
u32 cnt;
cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
while (cnt--) {
txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
if (!txwd) {
rtw89_warn(rtwdev, "No busy txwd pages available\n" );
break ;
}
list_del_init(&txwd->list);
/* this skb has been freed by RPP */
if (skb_queue_len(&txwd->queue) == 0)
rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
}
static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
struct rtw89_pci_tx_wd *txwd;
int i;
for (i = 0; i < wd_ring->page_num; i++) {
txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
if (!txwd)
break ;
list_del_init(&txwd->list);
}
}
static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_wd *txwd, u16 seq,
u8 tx_status)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_data *tx_data;
struct sk_buff *skb, *tmp;
u8 txch = tx_ring->txch;
if (!list_empty(&txwd->list)) {
rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
/* In low power mode, RPP can receive before updating of TX BD.
* In normal mode, it should not happen so give it a warning.
*/
if (!rtwpci->low_power && !list_empty(&txwd->list))
rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n" ,
txch, seq);
}
skb_queue_walk_safe(&txwd->queue, skb, tmp) {
skb_unlink(skb, &txwd->queue);
tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
DMA_TO_DEVICE);
rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
}
if (list_empty(&txwd->list))
rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
struct rtw89_pci_rpp_fmt *rpp)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_tx_wd_ring *wd_ring;
struct rtw89_pci_tx_wd *txwd;
u16 seq;
u8 qsel, tx_status, txch;
seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
txch = rtw89_core_get_ch_dma(rtwdev, qsel);
if (txch == RTW89_TXCH_CH12) {
rtw89_warn(rtwdev, "should no fwcmd release report\n" );
return ;
}
tx_ring = &rtwpci->tx_rings[txch];
wd_ring = &tx_ring->wd_ring;
txwd = &wd_ring->pages[seq];
rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
}
static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
struct rtw89_pci_tx_wd *txwd;
int i;
for (i = 0; i < wd_ring->page_num; i++) {
txwd = &wd_ring->pages[i];
if (!list_empty(&txwd->list))
continue ;
rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
}
}
static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 max_cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
struct rtw89_pci_rx_info *rx_info;
struct rtw89_pci_rpp_fmt *rpp;
struct rtw89_rx_desc_info desc_info = {};
struct sk_buff *skb;
u32 cnt = 0;
u32 rpp_size = sizeof (struct rtw89_pci_rpp_fmt);
u32 rxinfo_size = sizeof (struct rtw89_pci_rxbd_info);
u32 skb_idx;
u32 offset;
int ret;
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n" ,
bd_ring->wp, ret);
goto err_sync_device;
}
rx_info = RTW89_PCI_RX_SKB_CB(skb);
if (!rx_info->fs || !rx_info->ls) {
rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n" );
return cnt;
}
rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
/* first segment has RX desc */
offset = desc_info.offset + desc_info.rxd_len;
for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
rtw89_pci_release_rpp(rtwdev, rpp);
}
rtw89_pci_sync_skb_for_device(rtwdev, skb);
rtw89_pci_rxbd_increase(rx_ring, 1);
cnt++;
return cnt;
err_sync_device:
rtw89_pci_sync_skb_for_device(rtwdev, skb);
return 0;
}
static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
u32 release_cnt;
while (cnt) {
release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
if (!release_cnt) {
rtw89_err(rtwdev, "failed to release TX skbs\n" );
/* skip the rest RXBD bufs */
rtw89_pci_rxbd_increase(rx_ring, cnt);
break ;
}
cnt -= release_cnt;
}
rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci, int budget)
{
struct rtw89_pci_rx_ring *rx_ring;
u32 cnt;
int work_done;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (cnt == 0)
goto out_unlock;
rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
out_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
/* always release all RPQ */
work_done = min_t(int , cnt, budget);
rtwdev->napi_budget_countdown -= work_done;
return work_done;
}
static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci)
{
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
u32 reg_idx;
u16 hw_idx, hw_idx_next, host_idx;
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
hw_idx_next = (hw_idx + 1) % bd_ring->len;
if (hw_idx_next == host_idx)
rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n" , i);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"%d RXD unavailable, idx=0x%08x, len=%d\n" ,
i, reg_idx, bd_ring->len);
}
}
void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs)
{
isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs)
{
isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
if (isrs->isrs[0])
rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
if (isrs->isrs[1])
rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs)
{
isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
if (isrs->isrs[0])
rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
if (isrs->isrs[1])
rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
}
EXPORT_SYMBOL(rtw89_pci_enable_intr);
void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
}
EXPORT_SYMBOL(rtw89_pci_disable_intr);
void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
}
EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
}
EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw89_chip_disable_intr(rtwdev, rtwpci);
rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw89_chip_disable_intr(rtwdev, rtwpci);
rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
int budget = NAPI_POLL_WEIGHT;
/* To prevent RXQ get stuck due to run out of budget. */
rtwdev->napi_budget_countdown = budget;
rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
}
static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
{
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_gen_def *gen_def = info->gen_def;
struct rtw89_pci_isrs isrs;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
if (unlikely(rtwpci->under_recovery))
goto enable_intr;
if (unlikely(rtwpci->low_power)) {
rtw89_pci_low_power_interrupt_handler(rtwdev);
goto enable_intr;
}
if (likely(rtwpci->running)) {
local_bh_disable();
napi_schedule(&rtwdev->napi);
local_bh_enable();
}
return IRQ_HANDLED;
enable_intr:
spin_lock_irqsave(&rtwpci->irq_lock, flags);
if (likely(rtwpci->running))
rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
{
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
irqreturn_t irqret = IRQ_WAKE_THREAD;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
/* If interrupt event is on the road, it is still trigger interrupt
* even we have done pci_stop() to turn off IMR.
*/
if (unlikely(!rtwpci->running)) {
irqret = IRQ_HANDLED;
goto exit ;
}
rtw89_chip_disable_intr(rtwdev, rtwpci);
exit :
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return irqret;
}
#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
[RTW89_TXCH_## ch_idx] = { \
.num = R_## gen## _## txch## _TXBD_NUM ## v, \
.idx = R_## gen## _## txch## _TXBD_IDX ## v, \
.bdram = 0, \
.desa_l = R_## gen## _## txch## _TXBD_DESA_L ## v, \
.desa_h = R_## gen## _## txch## _TXBD_DESA_H ## v, \
}
#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
[RTW89_TXCH_## txch] = { \
.num = R_AX_## txch## _TXBD_NUM ## v, \
.idx = R_AX_## txch## _TXBD_IDX ## v, \
.bdram = R_AX_## txch## _BDRAM_CTRL ## v, \
.desa_l = R_AX_## txch## _TXBD_DESA_L ## v, \
.desa_h = R_AX_## txch## _TXBD_DESA_H ## v, \
}
#define DEF_TXCHADDRS(info, txch, v...) \
[RTW89_TXCH_## txch] = { \
.num = R_AX_## txch## _TXBD_NUM, \
.idx = R_AX_## txch## _TXBD_IDX, \
.bdram = R_AX_## txch## _BDRAM_CTRL ## v, \
.desa_l = R_AX_## txch## _TXBD_DESA_L ## v, \
.desa_h = R_AX_## txch## _TXBD_DESA_H ## v, \
}
#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
[RTW89_RXCH_## ch_idx] = { \
.num = R_## gen## _## rxch## _RXBD_NUM ## v, \
.idx = R_## gen## _## rxch## _RXBD_IDX ## v, \
.desa_l = R_## gen## _## rxch## _RXBD_DESA_L ## v, \
.desa_h = R_## gen## _## rxch## _RXBD_DESA_H ## v, \
}
const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
.tx = {
DEF_TXCHADDRS(info, ACH0),
DEF_TXCHADDRS(info, ACH1),
DEF_TXCHADDRS(info, ACH2),
DEF_TXCHADDRS(info, ACH3),
DEF_TXCHADDRS(info, ACH4),
DEF_TXCHADDRS(info, ACH5),
DEF_TXCHADDRS(info, ACH6),
DEF_TXCHADDRS(info, ACH7),
DEF_TXCHADDRS(info, CH8),
DEF_TXCHADDRS(info, CH9),
DEF_TXCHADDRS_TYPE1(info, CH10),
DEF_TXCHADDRS_TYPE1(info, CH11),
DEF_TXCHADDRS(info, CH12),
},
.rx = {
DEF_RXCHADDRS(AX, RXQ, RXQ),
DEF_RXCHADDRS(AX, RPQ, RPQ),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
.tx = {
DEF_TXCHADDRS(info, ACH0, _V1),
DEF_TXCHADDRS(info, ACH1, _V1),
DEF_TXCHADDRS(info, ACH2, _V1),
DEF_TXCHADDRS(info, ACH3, _V1),
DEF_TXCHADDRS(info, ACH4, _V1),
DEF_TXCHADDRS(info, ACH5, _V1),
DEF_TXCHADDRS(info, ACH6, _V1),
DEF_TXCHADDRS(info, ACH7, _V1),
DEF_TXCHADDRS(info, CH8, _V1),
DEF_TXCHADDRS(info, CH9, _V1),
DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
DEF_TXCHADDRS(info, CH12, _V1),
},
.rx = {
DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
.tx = {
DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
},
.rx = {
DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
#undef DEF_TXCHADDRS_TYPE1
#undef DEF_TXCHADDRS
#undef DEF_RXCHADDRS
static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
enum rtw89_tx_channel txch,
const struct rtw89_pci_ch_dma_addr **addr)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
if (txch >= RTW89_TXCH_NUM)
return -EINVAL;
*addr = &info->dma_addr_set->tx[txch];
return 0;
}
static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
enum rtw89_rx_channel rxch,
const struct rtw89_pci_ch_dma_addr **addr)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
if (rxch >= RTW89_RXCH_NUM)
return -EINVAL;
*addr = &info->dma_addr_set->rx[rxch];
return 0;
}
static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
{
struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
/* reserved 1 desc check ring is full or not */
if (bd_ring->rp > bd_ring->wp)
return bd_ring->rp - bd_ring->wp - 1;
return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
}
static
u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
}
static
u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
if (txch != RTW89_TXCH_CH12)
cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
}
static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 bd_cnt, wd_cnt, min_cnt = 0;
struct rtw89_pci_rx_ring *rx_ring;
enum rtw89_debug_mask debug_mask;
u32 cnt;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
wd_cnt = wd_ring->curr_num;
if (wd_cnt == 0 || bd_cnt == 0) {
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (cnt)
rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
else if (wd_cnt == 0)
goto out_unlock;
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
if (bd_cnt == 0)
rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
}
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
wd_cnt = wd_ring->curr_num;
min_cnt = min(bd_cnt, wd_cnt);
if (min_cnt == 0) {
/* This message can be frequently shown in low power mode or
* high traffic with small FIFO chips, and we have recognized it as normal
* behavior, so print with mask RTW89_DBG_TXRX in these situations.
*/
if (rtwpci->low_power || chip->small_fifo_size)
debug_mask = RTW89_DBG_TXRX;
else
debug_mask = RTW89_DBG_UNEXP;
rtw89_debug(rtwdev, debug_mask,
"still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n" ,
wd_cnt, bd_cnt);
}
out_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
return min_cnt;
}
static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
if (rtwdev->hci.paused)
return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
if (txch == RTW89_TXCH_CH12)
return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
}
static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, addr;
spin_lock_bh(&rtwpci->trx_lock);
addr = bd_ring->addr.idx;
host_idx = bd_ring->wp;
rtw89_write16(rtwdev, addr, host_idx);
spin_unlock_bh(&rtwpci->trx_lock);
}
static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
int n_txbd)
{
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, len;
len = bd_ring->len;
host_idx = bd_ring->wp + n_txbd;
host_idx = host_idx < len ? host_idx : host_idx - len;
bd_ring->wp = host_idx;
}
static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
if (rtwdev->hci.paused) {
set_bit(txch, rtwpci->kick_map);
return ;
}
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
}
static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring;
int txch;
for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
if (!test_and_clear_bit(txch, rtwpci->kick_map))
continue ;
tx_ring = &rtwpci->tx_rings[txch];
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
}
}
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 cur_idx, cur_rp;
u8 i;
/* Because the time taked by the I/O is a bit dynamic, it's hard to
* define a reasonable fixed total timeout to use read_poll_timeout*
* helper. Instead, we can ensure a reasonable polling times, so we
* just use for loop with udelay here.
*/
for (i = 0; i < 60; i++) {
cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
if (cur_rp == bd_ring->wp)
return ;
udelay(1);
}
if (!drop)
rtw89_info(rtwdev, "timed out to flush pci txch: %d\n" , txch);
}
static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
bool drop)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u8 i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
/* It may be unnecessary to flush FWCMD queue. */
if (i == RTW89_TXCH_CH12)
continue ;
if (info->tx_dma_ch_mask & BIT(i))
continue ;
if (txchs & BIT(i))
__pci_flush_txch(rtwdev, i, drop);
}
}
static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
bool drop)
{
__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
}
u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr)
{
struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
__le16 option;
txaddr_info->length = cpu_to_le16(total_len);
option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
txaddr_info->option = option;
txaddr_info->dma = cpu_to_le32(dma);
*add_info_nr = 1;
return sizeof (*txaddr_info);
}
EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr)
{
struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
u32 remain = total_len;
u32 len;
u16 length_option;
int n;
for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
TXADDR_INFO_LENTHG_V1_MAX : remain;
remain -= len;
length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
length_option |= u16_encode_bits(upper_32_bits(dma),
B_PCIADDR_HIGH_SEL_V1_MASK);
txaddr_info->length_opt = cpu_to_le16(length_option);
txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
dma += len;
txaddr_info++;
}
WARN_ONCE(remain, "length overflow remain=%u total_len=%u" ,
remain, total_len);
*add_info_nr = n;
return n * sizeof (*txaddr_info);
}
EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_wd *txwd,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct rtw89_pci_tx_wp_info *txwp_info;
void *txaddr_info_addr;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
u32 txaddr_info_len;
dma_addr_t dma;
int ret;
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
rtw89_err(rtwdev, "failed to map skb dma data\n" );
ret = -EBUSY;
goto err;
}
tx_data->dma = dma;
txwp_len = sizeof (*txwp_info);
txwd_len = chip->txwd_body_size;
txwd_len += en_wd_info ? chip->txwd_info_size : 0;
txwp_info = txwd->vaddr + txwd_len;
txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
txwp_info->seq1 = 0;
txwp_info->seq2 = 0;
txwp_info->seq3 = 0;
tx_ring->tx_cnt++;
txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
txaddr_info_len =
rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
dma, &desc_info->addr_info_nr);
txwd->len = txwd_len + txwp_len + txaddr_info_len;
rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
skb_queue_tail(&txwd->queue, skb);
return 0;
err:
return ret;
}
static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_bd_32 *txbd,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
void *txdesc;
int txdesc_size = chip->h2c_desc_size;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_addr_t dma;
__le16 opt;
txdesc = skb_push(skb, txdesc_size);
memset(txdesc, 0, txdesc_size);
rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
rtw89_err(rtwdev, "failed to map fwcmd dma data\n" );
return -EBUSY;
}
tx_data->dma = dma;
opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
txbd->opt = opt;
txbd->length = cpu_to_le16(skb->len);
txbd->dma = cpu_to_le32(tx_data->dma);
skb_queue_tail(&rtwpci->h2c_queue, skb);
rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
return 0;
}
static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_bd_32 *txbd,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci_tx_wd *txwd;
__le16 opt;
int ret;
/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
* buffer with WD BODY only. So here we don't need to check the free
* pages of the wd ring.
*/
if (tx_ring->txch == RTW89_TXCH_CH12)
return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
txwd = rtw89_pci_dequeue_txwd(tx_ring);
if (!txwd) {
rtw89_err(rtwdev, "no available TXWD\n" );
ret = -ENOSPC;
goto err;
}
ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to submit TXWD %d\n" , txwd->seq);
goto err_enqueue_wd;
}
list_add_tail(&txwd->list, &tx_ring->busy_pages);
opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
txbd->opt = opt;
txbd->length = cpu_to_le16(txwd->len);
txbd->dma = cpu_to_le32(txwd->paddr);
rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
return 0;
err_enqueue_wd:
rtw89_pci_enqueue_txwd(tx_ring, txwd);
err:
return ret;
}
static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_tx_bd_32 *txbd;
u32 n_avail_txbd;
int ret = 0;
/* check the tx type and dma channel for fw cmd queue */
if ((txch == RTW89_TXCH_CH12 ||
tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
(txch != RTW89_TXCH_CH12 ||
tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n" );
return -EINVAL;
}
tx_ring = &rtwpci->tx_rings[txch];
spin_lock_bh(&rtwpci->trx_lock);
n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
if (n_avail_txbd == 0) {
rtw89_err(rtwdev, "no available TXBD\n" );
ret = -ENOSPC;
goto err_unlock;
}
txbd = rtw89_pci_get_next_txbd(tx_ring);
ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to submit TXBD\n" );
goto err_unlock;
}
spin_unlock_bh(&rtwpci->trx_lock);
return 0;
err_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
return ret;
}
static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
{
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
int ret;
ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
if (ret) {
rtw89_err(rtwdev, "failed to TX Queue %d\n" , desc_info->ch_dma);
return ret;
}
return 0;
}
const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
[RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
[RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
};
EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
[RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
[RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
[RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
};
EXPORT_SYMBOL(rtw89_bd_ram_table_single);
static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 addr = info->wp_sel_addr;
u32 val;
int i;
if (!info->wp_sel_addr)
return ;
for (i = 0; i < 16; i += 4) {
val = u32_encode_bits(i + 0, MASKBYTE0) |
u32_encode_bits(i + 1, MASKBYTE1) |
u32_encode_bits(i + 2, MASKBYTE2) |
u32_encode_bits(i + 3, MASKBYTE3);
rtw89_write32(rtwdev, addr + i, val);
}
}
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
const struct rtw89_pci_bd_ram *bd_ram;
u32 addr_num;
u32 addr_idx;
u32 addr_bdram;
u32 addr_desa_l;
u32 val32;
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue ;
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
addr_num = bd_ring->addr.num;
addr_bdram = bd_ring->addr.bdram;
addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
if (addr_bdram && bd_ram) {
val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
rtw89_write32(rtwdev, addr_bdram, val32);
}
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
addr_num = bd_ring->addr.num;
addr_idx = bd_ring->addr.idx;
addr_desa_l = bd_ring->addr.desa_l;
if (info->rx_ring_eq_is_full)
bd_ring->wp = bd_ring->len - 1;
else
bd_ring->wp = 0;
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false ;
rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
}
rtw89_pci_init_wp_16sel(rtwdev);
}
static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
}
void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
int txch;
rtw89_pci_reset_trx_rings(rtwdev);
spin_lock_bh(&rtwpci->trx_lock);
for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
if (info->tx_dma_ch_mask & BIT(txch))
continue ;
if (txch == RTW89_TXCH_CH12) {
rtw89_pci_release_fwcmd(rtwdev, rtwpci,
skb_queue_len(&rtwpci->h2c_queue), true );
continue ;
}
rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
}
spin_unlock_bh(&rtwpci->trx_lock);
}
static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = true ;
rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = false ;
rtw89_chip_disable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
{
rtw89_core_napi_start(rtwdev);
rtw89_pci_enable_intr_lock(rtwdev);
return 0;
}
static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
rtw89_pci_disable_intr_lock(rtwdev);
synchronize_irq(pdev->irq);
rtw89_core_napi_stop(rtwdev);
}
static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
if (pause) {
rtw89_pci_disable_intr_lock(rtwdev);
synchronize_irq(pdev->irq);
if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
napi_synchronize(&rtwdev->napi);
} else {
rtw89_pci_enable_intr_lock(rtwdev);
rtw89_pci_tx_kick_off_pending(rtwdev);
}
}
static
void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
int i;
if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n" ))
return ;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
tx_ring = &rtwpci->tx_rings[i];
tx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->tx_bd_addrs[i] :
dma_addr_set->tx[i].idx;
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->rx_bd_addrs[i] :
dma_addr_set->rx[i].idx;
}
}
static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
{
enum rtw89_pci_intr_mask_cfg cfg;
WARN(!rtwdev->hci.paused, "HCI isn't paused\n" );
cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
rtw89_chip_config_intr_mask(rtwdev, cfg);
rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
}
static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
u32 val = readl(rtwpci->mmap + addr);
int count;
for (count = 0; ; count++) {
if (val != RTW89_R32_DEAD)
return val;
if (count >= MAC_REG_POOL_COUNT) {
rtw89_warn(rtwdev, "addr %#x = %#x\n" , addr, val);
return RTW89_R32_DEAD;
}
rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
val = readl(rtwpci->mmap + addr);
}
return val;
}
static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
u32 addr32, val32, shift;
if (!ACCESS_CMAC(addr))
return readb(rtwpci->mmap + addr);
addr32 = addr & ~0x3;
shift = (addr & 0x3) * 8;
val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
return val32 >> shift;
}
static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
u32 addr32, val32, shift;
if (!ACCESS_CMAC(addr))
return readw(rtwpci->mmap + addr);
addr32 = addr & ~0x3;
shift = (addr & 0x3) * 8;
val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
return val32 >> shift;
}
static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
if (!ACCESS_CMAC(addr))
return readl(rtwpci->mmap + addr);
return rtw89_pci_ops_read32_cmac(rtwdev, addr);
}
static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
writeb(data, rtwpci->mmap + addr);
}
static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
writew(data, rtwpci->mmap + addr);
}
static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
writel(data, rtwpci->mmap + addr);
}
static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
if (enable)
rtw89_write32_set(rtwdev, info->init_cfg_reg,
info->rxhci_en_bit | info->txhci_en_bit);
else
rtw89_write32_clr(rtwdev, info->init_cfg_reg,
info->rxhci_en_bit | info->txhci_en_bit);
}
static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *reg = &info->dma_io_stop;
if (enable)
rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
else
rtw89_write32_set(rtwdev, reg->addr, reg->mask);
}
void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
{
rtw89_pci_ctrl_dma_io(rtwdev, enable);
rtw89_pci_ctrl_dma_trx(rtwdev, enable);
}
static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
{
u16 val;
rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
switch (speed) {
case PCIE_PHY_GEN1:
if (addr < 0x20)
val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
else
val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
break ;
case PCIE_PHY_GEN2:
if (addr < 0x20)
val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
else
val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
break ;
default :
rtw89_err(rtwdev, "[ERR]Error Speed %d!\n" , speed);
return -EINVAL;
}
rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
false , rtwdev, R_AX_MDIO_CFG);
}
static int
rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
{
int ret;
ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
if (ret) {
rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n" , addr, ret);
return ret;
}
*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
return 0;
}
static int
rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
{
int ret;
rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
if (ret) {
rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n" , addr, data, ret);
return ret;
}
return 0;
}
static int
rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
{
u32 shift;
int ret;
u16 val;
ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
if (ret)
return ret;
shift = __ffs(mask);
val &= ~mask;
val |= ((data << shift) & mask);
ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
if (ret)
return ret;
return 0;
}
static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
{
int ret;
u16 val;
ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
if (ret)
return ret;
ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
if (ret)
return ret;
return 0;
}
static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
{
int ret;
u16 val;
ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
if (ret)
return ret;
ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
if (ret)
return ret;
return 0;
}
static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
{
u16 addr_2lsb = addr & B_AX_DBI_2LSB;
u16 write_addr;
u8 flag;
int ret;
write_addr = addr & B_AX_DBI_ADDR_MSK;
write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
10 * RTW89_PCI_WR_RETRY_CNT, false ,
rtwdev, R_AX_DBI_FLAG + 2);
if (ret)
rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n" ,
addr);
return ret;
}
static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
{
u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
u8 flag;
int ret;
rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
10 * RTW89_PCI_WR_RETRY_CNT, false ,
rtwdev, R_AX_DBI_FLAG + 2);
if (ret) {
rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n" ,
addr);
return ret;
}
read_addr = R_AX_DBI_RDATA + (addr & 3);
*value = rtw89_read8(rtwdev, read_addr);
return 0;
}
static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
int ret;
ret = pci_write_config_byte(pdev, addr, data);
if (!ret)
return 0;
if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_write8(rtwdev, addr, data);
return ret;
}
static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 *value)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
int ret;
ret = pci_read_config_byte(pdev, addr, value);
if (!ret)
return 0;
if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_read8(rtwdev, addr, value);
return ret;
}
static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
u8 bit)
{
u8 value;
int ret;
ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value |= bit;
ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
u8 bit)
{
u8 value;
int ret;
ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value &= ~bit;
ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
static int
__get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
{
u16 val, tar;
int ret;
/* Enable counter */
ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
if (ret)
return ret;
ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
phy_rate);
if (ret)
return ret;
ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
phy_rate);
if (ret)
return ret;
fsleep(300);
ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
if (ret)
return ret;
ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
phy_rate);
if (ret)
return ret;
tar = tar & 0x0FFF;
if (tar == 0 || tar == 0x0FFF) {
rtw89_err(rtwdev, "[ERR]Get target failed.\n" );
return -EINVAL;
}
*target = tar;
return 0;
}
static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
{
int ret;
if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
PCIE_AUTOK_4, PCIE_PHY_GEN1);
return ret;
}
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
enum rtw89_pcie_phy phy_rate;
u16 val16, mgn_set, div_set, tar;
u8 val8, bdr_ori;
bool l1_flag = false ;
int ret = 0;
if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
if (ret) {
rtw89_err(rtwdev, "[ERR]pci config read %X\n" ,
RTW89_PCIE_PHY_RATE);
return ret;
}
if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
phy_rate = PCIE_PHY_GEN1;
} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
phy_rate = PCIE_PHY_GEN2;
} else {
rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n" , val8);
return -EOPNOTSUPP;
}
/* Disable L1BD */
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
if (ret) {
rtw89_err(rtwdev, "[ERR]pci config read %X\n" , RTW89_PCIE_L1_CTRL);
return ret;
}
if (bdr_ori & RTW89_PCIE_BIT_L1) {
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
bdr_ori & ~RTW89_PCIE_BIT_L1);
if (ret) {
rtw89_err(rtwdev, "[ERR]pci config write %X\n" ,
RTW89_PCIE_L1_CTRL);
return ret;
}
l1_flag = true ;
}
ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
if (val16 & B_AX_CALIB_EN) {
ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
val16 & ~B_AX_CALIB_EN, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
}
if (!autook_en)
goto end;
/* Set div */
ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
/* Obtain div and margin */
ret = __get_target(rtwdev, &tar, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]1st get target fail %d\n" , ret);
goto end;
}
mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
if (mgn_set >= 128) {
div_set = 0x0003;
mgn_set = 0x000F;
} else if (mgn_set >= 64) {
div_set = 0x0003;
mgn_set >>= 3;
} else if (mgn_set >= 32) {
div_set = 0x0002;
mgn_set >>= 2;
} else if (mgn_set >= 16) {
div_set = 0x0001;
mgn_set >>= 1;
} else if (mgn_set == 0) {
rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n" , tar);
goto end;
} else {
div_set = 0x0000;
}
ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
val16 |= u16_encode_bits(div_set, B_AX_DIV);
ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
ret = __get_target(rtwdev, &tar, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n" , ret);
goto end;
}
rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n" ,
tar, div_set, mgn_set);
ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
(tar & 0x0FFF) | (mgn_set << 12), phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_SET_PPR_V1);
goto end;
}
/* Enable function */
ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
if (ret) {
rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n" , RAC_CTRL_PPR_V1);
goto end;
}
/* CLK delay = 0 */
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
PCIE_CLKDLY_HW_0);
end:
/* Set L1BD to ori */
if (l1_flag) {
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
bdr_ori);
if (ret) {
rtw89_err(rtwdev, "[ERR]pci config write %X\n" ,
RTW89_PCIE_L1_CTRL);
return ret;
}
}
return ret;
}
static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
if (chip_id == RTL8852A) {
ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
PCIE_PHY_GEN1);
if (ret)
return ret;
ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
PCIE_PHY_GEN2);
if (ret)
return ret;
} else if (chip_id == RTL8852C) {
rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
B_AX_DEGLITCH);
rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
B_AX_DEGLITCH);
}
return 0;
}
static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev)
{
u16 g1_oobs, g2_oobs;
u32 backup_aspm;
u32 phy_offset;
u16 offset_cal;
u16 oobs_val;
int ret;
u8 gen;
if (rtwdev->chip->chip_id != RTL8852C)
return ;
g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=93 G=94
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland