// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include "acpi.h"
#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
#include "txrx.h"
#include "util.h"
static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
return phy->phy0_phy1_offset(rtwdev, addr);
}
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
u32 bit_rate = report->bit_rate;
/* lower than ofdm, do not aggregate */
if (bit_rate < 550)
return 1;
/* avoid AMSDU for legacy rate */
if (report->might_fallback_legacy)
return 1;
/* lower than 20M vht 2ss mcs8, make it small */
if (bit_rate < 1800)
return 1200;
/* lower than 40M vht 2ss mcs9, make it medium */
if (bit_rate < 4000)
return 2600;
/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
if (bit_rate < 7000)
return 3500;
return rtwdev->chip->max_amsdu_limit;
}
static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
{
u64 ra_mask = 0;
u8 mcs_cap;
int i, nss;
for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
mcs_cap = mcs_map & 0x3;
switch (mcs_cap) {
case 2:
ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
break ;
case 1:
ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
break ;
case 0:
ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
break ;
default :
break ;
}
}
return ra_mask;
}
static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta)
{
struct ieee80211_sta_he_cap cap = link_sta->he_cap;
u16 mcs_map;
switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
else
mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
break ;
default :
mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
}
/* MCS11, MCS9, MCS7 */
return get_mcs_ra_mask(mcs_map, 11, 2);
}
static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
{
u64 nss_mcs_shift;
u64 nss_mcs_val;
u64 mask = 0;
int i, j;
u8 nss;
for (i = 0; i < n_nss; i++) {
nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX);
if (!nss)
continue ;
nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0);
for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16)
mask |= nss_mcs_val << nss_mcs_shift;
}
return mask;
}
static u64 get_eht_ra_mask(struct rtw89_vif_link *rtwvif_link,
struct ieee80211_link_sta *link_sta)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
case IEEE80211_STA_RX_BW_160:
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
case IEEE80211_STA_RX_BW_20:
if (vif->type == NL80211_IFTYPE_AP &&
!(he_phy_cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
/* MCS 7, 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
}
fallthrough;
case IEEE80211_STA_RX_BW_80:
default :
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
}
}
#define RA_FLOOR_TABLE_SIZE 7
#define RA_FLOOR_UP_GAP 3
static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
u8 ratr_state)
{
u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
u8 rssi_lv = 0;
u8 i;
rssi >>= 1;
for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
if (i >= ratr_state)
rssi_lv_t[i] += RA_FLOOR_UP_GAP;
if (rssi < rssi_lv_t[i]) {
rssi_lv = i;
break ;
}
}
if (rssi_lv == 0)
return 0xffffffffffffffffULL;
else if (rssi_lv == 1)
return 0xfffffffffffffff0ULL;
else if (rssi_lv == 2)
return 0xffffffffffffefe0ULL;
else if (rssi_lv == 3)
return 0xffffffffffffcfc0ULL;
else if (rssi_lv == 4)
return 0xffffffffffff8f80ULL;
else if (rssi_lv >= 5)
return 0xffffffffffff0f00ULL;
return 0xffffffffffffffffULL;
}
static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
{
if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
if (ra_mask == 0)
ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
return ra_mask;
}
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
enum nl80211_band band;
u64 cfg_mask;
if (!rtwsta_link->use_cfg_mask)
return -1;
switch (chan->band_type) {
case RTW89_BAND_2G:
band = NL80211_BAND_2GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
break ;
case RTW89_BAND_5G:
band = NL80211_BAND_5GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
RA_MASK_OFDM_RATES);
break ;
case RTW89_BAND_6G:
band = NL80211_BAND_6GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
RA_MASK_OFDM_RATES);
break ;
default :
rtw89_warn(rtwdev, "unhandled band type %d\n" , chan->band_type);
return -1;
}
if (link_sta->he_cap.has_he) {
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
RA_MASK_HE_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
RA_MASK_HE_2SS_RATES);
} else if (link_sta->vht_cap.vht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
RA_MASK_VHT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
RA_MASK_VHT_2SS_RATES);
} else if (link_sta->ht_cap.ht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
RA_MASK_HT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
RA_MASK_HT_2SS_RATES);
}
return cfg_mask;
}
static const u64
rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
static const u64
rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
static const u64
rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
static const u64
rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
static const u64
rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan,
bool *fix_giltf_en, u8 *fix_giltf)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 he_ltf = mask->control[nl_band].he_ltf;
u8 he_gi = mask->control[nl_band].he_gi;
*fix_giltf_en = true ;
if (rtwdev->chip->chip_id == RTL8852C &&
chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
rtw89_sta_link_has_su_mu_4xhe08(link_sta))
*fix_giltf = RTW89_GILTF_SGI_4XHE08;
else
*fix_giltf = RTW89_GILTF_2XHE08;
if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
return ;
if (he_ltf == 2 && he_gi == 2) {
*fix_giltf = RTW89_GILTF_LGI_4XHE32;
} else if (he_ltf == 2 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_SGI_4XHE08;
} else if (he_ltf == 1 && he_gi == 1) {
*fix_giltf = RTW89_GILTF_2XHE16;
} else if (he_ltf == 1 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_2XHE08;
} else if (he_ltf == 0 && he_gi == 1) {
*fix_giltf = RTW89_GILTF_1XHE16;
} else if (he_ltf == 0 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_1XHE08;
}
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
struct ieee80211_link_sta *link_sta,
bool p2p, bool csi)
{
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta_link->ra;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
u64 ra_mask = 0;
u64 ra_mask_bak;
u8 mode = 0;
u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
u8 bw_mode = 0;
u8 stbc_en = 0;
u8 ldpc_en = 0;
u8 fix_giltf = 0;
u8 i;
bool sgi = false ;
bool fix_giltf_en = false ;
memset(ra, 0, sizeof (*ra));
/* Set the ra mask from sta's capability */
if (link_sta->eht_cap.has_eht) {
mode |= RTW89_RA_MODE_EHT;
ra_mask |= get_eht_ra_mask(rtwvif_link, link_sta);
if (rtwdev->hal.no_mcs_12_13)
high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
else
high_rate_masks = rtw89_ra_mask_eht_rates;
rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
ra_mask |= get_he_ra_mask(link_sta);
high_rate_masks = rtw89_ra_mask_he_rates;
if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
stbc_en = 1;
if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
mode |= RTW89_RA_MODE_VHT;
csi_mode = RTW89_RA_RPT_MODE_VHT;
/* MCS9 (non-20MHz), MCS8, MCS7 */
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
else
ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
high_rate_masks = rtw89_ra_mask_vht_rates;
if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = 1;
if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = 1;
} else if (link_sta->ht_cap.ht_supported) {
mode |= RTW89_RA_MODE_HT;
csi_mode = RTW89_RA_RPT_MODE_HT;
ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) |
((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) |
((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) |
((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = 1;
}
switch (chan->band_type) {
case RTW89_BAND_2G:
ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ];
if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
mode |= RTW89_RA_MODE_CCK;
if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
mode |= RTW89_RA_MODE_OFDM;
break ;
case RTW89_BAND_5G:
ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
break ;
case RTW89_BAND_6G:
ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
break ;
default :
rtw89_err(rtwdev, "Unknown band type\n" );
break ;
}
ra_mask_bak = ra_mask;
if (mode >= RTW89_RA_MODE_HT) {
u64 mask = 0;
for (i = 0; i < rtwdev->hal.tx_nss; i++)
mask |= high_rate_masks[i];
if (mode & RTW89_RA_MODE_OFDM)
mask |= RA_MASK_SUBOFDM_RATES;
if (mode & RTW89_RA_MODE_CCK)
mask |= RA_MASK_SUBCCK_RATES;
ra_mask &= mask;
} else if (mode & RTW89_RA_MODE_OFDM) {
ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
}
if (mode != RTW89_RA_MODE_CCK)
ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
bw_mode = RTW89_CHANNEL_WIDTH_160;
sgi = link_sta->vht_cap.vht_supported &&
(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
break ;
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW89_CHANNEL_WIDTH_80;
sgi = link_sta->vht_cap.vht_supported &&
(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
break ;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW89_CHANNEL_WIDTH_40;
sgi = link_sta->ht_cap.ht_supported &&
(link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
break ;
default :
bw_mode = RTW89_CHANNEL_WIDTH_20;
sgi = link_sta->ht_cap.ht_supported &&
(link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
break ;
}
if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
ra->dcm_cap = 1;
if (rate_pattern->enable && !p2p) {
ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
ra_mask &= rate_pattern->ra_mask;
mode = rate_pattern->ra_mode;
}
ra->bw_cap = bw_mode;
ra->er_cap = rtwsta_link->er_cap;
ra->mode_ctrl = mode;
ra->macid = rtwsta_link->mac_id;
ra->stbc_cap = stbc_en;
ra->ldpc_cap = ldpc_en;
ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
ra->en_sgi = sgi;
ra->ra_mask = ra_mask;
ra->fix_giltf_en = fix_giltf_en;
ra->fix_giltf = fix_giltf;
if (!csi)
return ;
ra->fixed_csi_rate_en = false ;
ra->ra_csi_rate_en = true ;
ra->cr_tbl_sel = false ;
ra->band_num = rtwvif_link->phy_idx;
ra->csi_bw = bw_mode;
ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
ra->csi_mcs_ss_idx = 5;
ra->csi_mode = csi_mode;
}
void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
u32 changed)
{
struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false );
rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
link_sta, vif->p2p, false );
rcu_read_unlock();
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
ra->upd_mask = 1;
if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
ra->upd_bw_nss_mask = 1;
rtw89_debug(rtwdev, RTW89_DBG_RA,
"ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d" ,
ra->macid,
ra->bw_cap,
ra->ss_num,
ra->en_sgi,
ra->giltf);
rtw89_fw_h2c_ra(rtwdev, ra, false );
}
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
u16 rate_base, u64 ra_mask, u8 ra_mode,
u32 rate_ctrl, u32 ctrl_skip, bool force)
{
u8 n, c;
if (rate_ctrl == ctrl_skip)
return true ;
n = hweight32(rate_ctrl);
if (n == 0)
return true ;
if (force && n != 1)
return false ;
if (next->enable)
return false ;
c = __fls(rate_ctrl);
next->rate = rate_base + c;
next->ra_mode = ra_mode;
next->ra_mask = ra_mask;
next->enable = true ;
return true ;
}
#define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
{ \
[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
}
static
void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
const struct cfg80211_bitrate_mask *mask)
{
struct ieee80211_supported_band *sband;
struct rtw89_phy_rate_pattern next_pattern = {0};
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
};
static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
};
static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
};
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
u8 tx_nss = rtwdev->hal.tx_nss;
u8 i;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
mask->control[nl_band].he_mcs[i],
0, true ))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
mask->control[nl_band].vht_mcs[i],
0, true ))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
mask->control[nl_band].ht_mcs[i],
0, true ))
goto out;
/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
* require at least one basic rate for ieee80211_set_bitrate_mask,
* so the decision just depends on if all bitrates are set or not.
*/
sband = rtwdev->hw->wiphy->bands[nl_band];
if (band == RTW89_BAND_2G) {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false ))
goto out;
} else {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false ))
goto out;
}
if (!next_pattern.enable)
goto out;
rtwvif_link->rate_pattern = next_pattern;
rtw89_debug(rtwdev, RTW89_DBG_RA,
"configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n" ,
next_pattern.rate,
next_pattern.ra_mask,
next_pattern.ra_mode);
return ;
out:
rtwvif_link->rate_pattern.enable = false ;
rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n" );
}
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
__rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask);
}
static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
{
ieee80211_iterate_stations_atomic(rtwdev->hw,
rtw89_phy_ra_update_sta_iter,
rtwdev);
}
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link)
{
struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
struct ieee80211_link_sta *link_sta;
bool csi;
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true );
csi = rtw89_sta_has_beamformer_cap(link_sta);
rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
link_sta, vif->p2p, csi);
rcu_read_unlock();
if (rssi > 40)
ra->init_rate_lv = 1;
else if (rssi > 20)
ra->init_rate_lv = 2;
else if (rssi > 1)
ra->init_rate_lv = 3;
else
ra->init_rate_lv = 0;
ra->upd_all = 1;
rtw89_debug(rtwdev, RTW89_DBG_RA,
"ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d" ,
ra->macid,
ra->mode_ctrl,
ra->bw_cap,
ra->ss_num,
ra->init_rate_lv);
rtw89_debug(rtwdev, RTW89_DBG_RA,
"ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d" ,
ra->dcm_cap,
ra->er_cap,
ra->ldpc_cap,
ra->stbc_cap,
ra->en_sgi,
ra->giltf);
rtw89_fw_h2c_ra(rtwdev, ra, csi);
}
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw)
{
enum rtw89_bandwidth cbw = chan->band_width;
u8 pri_ch = chan->primary_channel;
u8 central_ch = chan->channel;
u8 txsc_idx = 0;
u8 tmp = 0;
if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
return txsc_idx;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
txsc_idx = pri_ch > central_ch ? 1 : 2;
break ;
case RTW89_CHANNEL_WIDTH_80:
if (dbw == RTW89_CHANNEL_WIDTH_20) {
if (pri_ch > central_ch)
txsc_idx = (pri_ch - central_ch) >> 1;
else
txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
} else {
txsc_idx = pri_ch > central_ch ? 9 : 10;
}
break ;
case RTW89_CHANNEL_WIDTH_160:
if (pri_ch > central_ch)
tmp = (pri_ch - central_ch) >> 1;
else
tmp = ((central_ch - pri_ch) >> 1) + 1;
if (dbw == RTW89_CHANNEL_WIDTH_20) {
txsc_idx = tmp;
} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
if (tmp == 1 || tmp == 3)
txsc_idx = 9;
else if (tmp == 5 || tmp == 7)
txsc_idx = 11;
else if (tmp == 2 || tmp == 4)
txsc_idx = 10;
else if (tmp == 6 || tmp == 8)
txsc_idx = 12;
else
return 0xff;
} else {
txsc_idx = pri_ch > central_ch ? 13 : 14;
}
break ;
case RTW89_CHANNEL_WIDTH_80_80:
if (dbw == RTW89_CHANNEL_WIDTH_20) {
if (pri_ch > central_ch)
txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
else
txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
txsc_idx = pri_ch > central_ch ? 10 : 12;
} else {
txsc_idx = 14;
}
break ;
default :
break ;
}
return txsc_idx;
}
EXPORT_SYMBOL(rtw89_phy_get_txsc);
u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw)
{
enum rtw89_bandwidth cbw = chan->band_width;
u8 pri_ch = chan->primary_channel;
u8 central_ch = chan->channel;
u8 txsb_idx = 0;
if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
return txsb_idx;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
txsb_idx = pri_ch > central_ch ? 1 : 0;
break ;
case RTW89_CHANNEL_WIDTH_80:
if (dbw == RTW89_CHANNEL_WIDTH_20)
txsb_idx = (pri_ch - central_ch + 6) / 4;
else
txsb_idx = pri_ch > central_ch ? 1 : 0;
break ;
case RTW89_CHANNEL_WIDTH_160:
if (dbw == RTW89_CHANNEL_WIDTH_20)
txsb_idx = (pri_ch - central_ch + 14) / 4;
else if (dbw == RTW89_CHANNEL_WIDTH_40)
txsb_idx = (pri_ch - central_ch + 12) / 8;
else
txsb_idx = pri_ch > central_ch ? 1 : 0;
break ;
case RTW89_CHANNEL_WIDTH_320:
if (dbw == RTW89_CHANNEL_WIDTH_20)
txsb_idx = (pri_ch - central_ch + 30) / 4;
else if (dbw == RTW89_CHANNEL_WIDTH_40)
txsb_idx = (pri_ch - central_ch + 28) / 8;
else if (dbw == RTW89_CHANNEL_WIDTH_80)
txsb_idx = (pri_ch - central_ch + 24) / 16;
else
txsb_idx = pri_ch > central_ch ? 1 : 0;
break ;
default :
break ;
}
return txsb_idx;
}
EXPORT_SYMBOL(rtw89_phy_get_txsb);
static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
{
return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
!!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
}
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return INV_RF_DATA;
}
addr &= 0xff;
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
return val;
}
EXPORT_SYMBOL(rtw89_phy_read_rf);
static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path, u32 addr, u32 mask)
{
bool busy;
bool done;
u32 val;
int ret;
ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
1, 30, false , rtwdev);
if (ret) {
rtw89_err(rtwdev, "read rf busy swsi\n" );
return INV_RF_DATA;
}
mask &= RFREG_MASK;
val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
udelay(2);
ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
30, false , rtwdev, R_SWSI_V1,
B_SWSI_R_DATA_DONE_V1);
if (ret) {
if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
rtw89_err(rtwdev, "read swsi busy\n" );
return INV_RF_DATA;
}
return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
}
u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
{
bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return INV_RF_DATA;
}
if (ad_sel)
return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
else
return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path, u32 addr)
{
static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
bool busy, done;
int ret;
u32 val;
rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1, 3800, false ,
rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
if (ret) {
rtw89_warn(rtwdev, "poll HWSI is busy\n" );
return INV_RF_DATA;
}
rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
udelay(2);
ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
1, 3800, false ,
rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
if (ret) {
rtw89_warn(rtwdev, "read HWSI is busy\n" );
val = INV_RF_DATA;
goto out;
}
val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
out:
rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
return val;
}
static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path, u32 addr, u32 mask)
{
u32 val;
val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
return (val & mask) >> __ffs(mask);
}
u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
{
bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return INV_RF_DATA;
}
if (ad_sel)
return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
else
return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return false ;
}
addr &= 0xff;
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
/* delay to ensure writing properly */
udelay(1);
return true ;
}
EXPORT_SYMBOL(rtw89_phy_write_rf);
static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path, u32 addr, u32 mask,
u32 data)
{
u8 bit_shift;
u32 val;
bool busy, b_msk_en = false ;
int ret;
ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
1, 30, false , rtwdev);
if (ret) {
rtw89_err(rtwdev, "write rf busy swsi\n" );
return false ;
}
data &= RFREG_MASK;
mask &= RFREG_MASK;
if (mask != RFREG_MASK) {
b_msk_en = true ;
rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
mask);
bit_shift = __ffs(mask);
data = (data << bit_shift) & RFREG_MASK;
}
val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
return true ;
}
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return false ;
}
if (ad_sel)
return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
else
return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
static
bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 data)
{
static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
bool busy;
u32 val;
int ret;
ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1, 3800, false ,
rtwdev, addr_is_idle[rf_path], BIT(29));
if (ret) {
rtw89_warn(rtwdev, "[%s] HWSI is busy\n" , __func__);
return false ;
}
val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
u32_encode_bits(data, B_HWSI_DATA_VAL);
rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
return true ;
}
static
bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
u32 val;
if (mask == RFREG_MASK) {
val = data;
} else {
val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
val &= ~mask;
val |= (data << __ffs(mask)) & mask;
}
return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
}
bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
if (rf_path >= rtwdev->chip->rf_path_num) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n" , rf_path);
return INV_RF_DATA;
}
if (ad_sel)
return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
else
return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
}
static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
chip->ops->bb_reset(rtwdev, phy_idx);
}
static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev)
{
__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
if (rtwdev->dbcc_en)
__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1);
}
static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *extra_data)
{
u32 addr;
if (reg->addr == 0xfe) {
mdelay(50);
} else if (reg->addr == 0xfd) {
mdelay(5);
} else if (reg->addr == 0xfc) {
mdelay(1);
} else if (reg->addr == 0xfb) {
udelay(50);
} else if (reg->addr == 0xfa) {
udelay(5);
} else if (reg->addr == 0xf9) {
udelay(1);
} else if (reg->data == BYPASS_CR_DATA) {
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n" , reg->addr);
} else {
addr = reg->addr;
if ((uintptr_t)extra_data == RTW89_PHY_1)
addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
rtw89_phy_write32(rtwdev, addr, reg->data);
}
}
union rtw89_phy_bb_gain_arg {
u32 addr;
struct {
union {
u8 type;
struct {
u8 rxsc_start:4;
u8 bw:4;
};
};
u8 path;
u8 gain_band;
u8 cfg_type;
};
} __packed;
static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
int i;
switch (type) {
case 0:
for (i = 0; i < 4; i++, data >>= 8)
gain->lna_gain[gband][path][i] = data & 0xff;
break ;
case 1:
for (i = 4; i < 7; i++, data >>= 8)
gain->lna_gain[gband][path][i] = data & 0xff;
break ;
case 2:
for (i = 0; i < 2; i++, data >>= 8)
gain->tia_gain[gband][path][i] = data & 0xff;
break ;
default :
rtw89_warn(rtwdev,
"bb gain error {0x%x:0x%x} with unknown type: %d\n" ,
arg.addr, data, type);
break ;
}
}
enum rtw89_phy_bb_rxsc_start_idx {
RTW89_BB_RXSC_START_IDX_FULL = 0,
RTW89_BB_RXSC_START_IDX_20 = 1,
RTW89_BB_RXSC_START_IDX_20_1 = 5,
RTW89_BB_RXSC_START_IDX_40 = 9,
RTW89_BB_RXSC_START_IDX_80 = 13,
};
static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 rxsc_start = arg.rxsc_start;
u8 bw = arg.bw;
u8 path = arg.path;
u8 gband = arg.gain_band;
u8 rxsc;
s8 ofst;
int i;
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
gain->rpl_ofst_20[gband][path] = (s8)data;
break ;
case RTW89_CHANNEL_WIDTH_40:
if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
gain->rpl_ofst_40[gband][path][0] = (s8)data;
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
for (i = 0; i < 2; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_40[gband][path][rxsc] = ofst;
}
}
break ;
case RTW89_CHANNEL_WIDTH_80:
if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
gain->rpl_ofst_80[gband][path][0] = (s8)data;
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
for (i = 0; i < 4; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_80[gband][path][rxsc] = ofst;
}
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
for (i = 0; i < 2; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_80[gband][path][rxsc] = ofst;
}
}
break ;
case RTW89_CHANNEL_WIDTH_160:
if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
gain->rpl_ofst_160[gband][path][0] = (s8)data;
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
for (i = 0; i < 4; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_160[gband][path][rxsc] = ofst;
}
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
for (i = 0; i < 4; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_160[gband][path][rxsc] = ofst;
}
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
for (i = 0; i < 4; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_160[gband][path][rxsc] = ofst;
}
} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
for (i = 0; i < 2; i++, data >>= 8) {
rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
ofst = (s8)(data & 0xff);
gain->rpl_ofst_160[gband][path][rxsc] = ofst;
}
}
break ;
default :
rtw89_warn(rtwdev,
"bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n" ,
arg.addr, data, bw);
break ;
}
}
static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
int i;
switch (type) {
case 0:
for (i = 0; i < 4; i++, data >>= 8)
gain->lna_gain_bypass[gband][path][i] = data & 0xff;
break ;
case 1:
for (i = 4; i < 7; i++, data >>= 8)
gain->lna_gain_bypass[gband][path][i] = data & 0xff;
break ;
default :
rtw89_warn(rtwdev,
"bb gain bypass {0x%x:0x%x} with unknown type: %d\n" ,
arg.addr, data, type);
break ;
}
}
static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
int i;
switch (type) {
case 0:
for (i = 0; i < 4; i++, data >>= 8)
gain->lna_op1db[gband][path][i] = data & 0xff;
break ;
case 1:
for (i = 4; i < 7; i++, data >>= 8)
gain->lna_op1db[gband][path][i] = data & 0xff;
break ;
case 2:
for (i = 0; i < 4; i++, data >>= 8)
gain->tia_lna_op1db[gband][path][i] = data & 0xff;
break ;
case 3:
for (i = 4; i < 8; i++, data >>= 8)
gain->tia_lna_op1db[gband][path][i] = data & 0xff;
break ;
default :
rtw89_warn(rtwdev,
"bb gain op1db {0x%x:0x%x} with unknown type: %d\n" ,
arg.addr, data, type);
break ;
}
}
static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *extra_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
struct rtw89_efuse *efuse = &rtwdev->efuse;
if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
return ;
if (arg.path >= chip->rf_path_num)
return ;
if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
rtw89_warn(rtwdev, "bb gain table with flow ctrl\n" );
return ;
}
switch (arg.cfg_type) {
case 0:
rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
break ;
case 1:
rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
break ;
case 2:
rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
break ;
case 3:
rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
break ;
case 4:
/* This cfg_type is only used by rfe_type >= 50 with eFEM */
if (efuse->rfe_type < 50)
break ;
fallthrough;
default :
rtw89_warn(rtwdev,
"bb gain {0x%x:0x%x} with unknown cfg type: %d\n" ,
arg.addr, reg->data, arg.cfg_type);
break ;
}
}
static void
rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
struct rtw89_fw_h2c_rf_reg_info *info)
{
u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
if (page >= RTW89_H2C_RF_PAGE_NUM) {
rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d" ,
rf_path, info->curr_idx);
return ;
}
info->rtw89_phy_config_rf_h2c[page][idx] =
cpu_to_le32((reg->addr << 20) | reg->data);
info->curr_idx++;
}
static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info)
{
u16 remain = info->curr_idx;
u16 len = 0;
u8 i;
int ret = 0;
if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
rtw89_warn(rtwdev,
"rf reg h2c total len %d larger than %d\n" ,
remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
ret = -EINVAL;
goto out;
}
for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
if (ret)
goto out;
}
out:
info->curr_idx = 0;
return ret;
}
static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *extra_data)
{
u32 addr = reg->addr;
if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
addr == 0xfa || addr == 0xf9)
return ;
if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
return ;
rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
(struct rtw89_fw_h2c_rf_reg_info *)extra_data);
}
static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *extra_data)
{
if (reg->addr == 0xfe) {
mdelay(50);
} else if (reg->addr == 0xfd) {
mdelay(5);
} else if (reg->addr == 0xfc) {
mdelay(1);
} else if (reg->addr == 0xfb) {
udelay(50);
} else if (reg->addr == 0xfa) {
udelay(5);
} else if (reg->addr == 0xf9) {
udelay(1);
} else {
rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
(struct rtw89_fw_h2c_rf_reg_info *)extra_data);
}
}
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *extra_data)
{
rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
if (reg->addr < 0x100)
return ;
rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
(struct rtw89_fw_h2c_rf_reg_info *)extra_data);
}
EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
const struct rtw89_phy_table *table,
u32 *headline_size, u32 *headline_idx,
u8 rfe, u8 cv)
{
const struct rtw89_reg2_def *reg;
u32 headline;
u32 compare, target;
u8 rfe_para, cv_para;
u8 cv_max = 0;
bool case_matched = false ;
u32 i;
for (i = 0; i < table->n_regs; i++) {
reg = &table->regs[i];
headline = get_phy_headline(reg->addr);
if (headline != PHY_HEADLINE_VALID)
break ;
}
*headline_size = i;
if (*headline_size == 0)
return 0;
/* case 1: RFE match, CV match */
compare = get_phy_compare(rfe, cv);
for (i = 0; i < *headline_size; i++) {
reg = &table->regs[i];
target = get_phy_target(reg->addr);
if (target == compare) {
*headline_idx = i;
return 0;
}
}
/* case 2: RFE match, CV don't care */
compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
for (i = 0; i < *headline_size; i++) {
reg = &table->regs[i];
target = get_phy_target(reg->addr);
if (target == compare) {
*headline_idx = i;
return 0;
}
}
/* case 3: RFE match, CV max in table */
for (i = 0; i < *headline_size; i++) {
reg = &table->regs[i];
rfe_para = get_phy_cond_rfe(reg->addr);
cv_para = get_phy_cond_cv(reg->addr);
if (rfe_para == rfe) {
if (cv_para >= cv_max) {
cv_max = cv_para;
*headline_idx = i;
case_matched = true ;
}
}
}
if (case_matched)
return 0;
/* case 4: RFE don't care, CV max in table */
for (i = 0; i < *headline_size; i++) {
reg = &table->regs[i];
rfe_para = get_phy_cond_rfe(reg->addr);
cv_para = get_phy_cond_cv(reg->addr);
if (rfe_para == PHY_COND_DONT_CARE) {
if (cv_para >= cv_max) {
cv_max = cv_para;
*headline_idx = i;
case_matched = true ;
}
}
}
if (case_matched)
return 0;
return -EINVAL;
}
static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
const struct rtw89_phy_table *table,
void (*config)(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
void *data),
void *extra_data)
{
const struct rtw89_reg2_def *reg;
enum rtw89_rf_path rf_path = table->rf_path;
u8 rfe = rtwdev->efuse.rfe_type;
u8 cv = rtwdev->hal.cv;
u32 i;
u32 headline_size = 0, headline_idx = 0;
u32 target = 0, cfg_target;
u8 cond;
bool is_matched = true ;
bool target_found = false ;
int ret;
ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
&headline_idx, rfe, cv);
if (ret) {
rtw89_err(rtwdev, "invalid PHY package: %d/%d\n" , rfe, cv);
return ;
}
cfg_target = get_phy_target(table->regs[headline_idx].addr);
for (i = headline_size; i < table->n_regs; i++) {
reg = &table->regs[i];
cond = get_phy_cond(reg->addr);
switch (cond) {
case PHY_COND_BRANCH_IF:
case PHY_COND_BRANCH_ELIF:
target = get_phy_target(reg->addr);
break ;
case PHY_COND_BRANCH_ELSE:
is_matched = false ;
if (!target_found) {
rtw89_warn(rtwdev, "failed to load CR %x/%x\n" ,
reg->addr, reg->data);
return ;
}
break ;
case PHY_COND_BRANCH_END:
is_matched = true ;
target_found = false ;
break ;
case PHY_COND_CHECK:
if (target_found) {
is_matched = false ;
break ;
}
if (target == cfg_target) {
is_matched = true ;
target_found = true ;
} else {
is_matched = false ;
target_found = false ;
}
break ;
default :
if (is_matched)
config(rtwdev, reg, rf_path, extra_data);
break ;
}
}
}
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *bb_table;
const struct rtw89_phy_table *bb_gain_table;
bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
if (rtwdev->dbcc_en)
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
(void *)RTW89_PHY_1);
rtw89_chip_init_txpwr_unit(rtwdev);
bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
if (bb_gain_table)
rtw89_phy_init_reg(rtwdev, bb_gain_table,
chip->phy_def->config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev);
}
static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
{
rtw89_phy_write32(rtwdev, 0x8080, 0x4);
udelay(1);
return rtw89_phy_read32(rtwdev, 0x8080);
}
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
{
void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path, void *data);
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *rf_table;
struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
u8 path;
rf_reg_info = kzalloc(sizeof (*rf_reg_info), GFP_KERNEL);
if (!rf_reg_info)
return ;
for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
rf_table = elm_info->rf_radio[path] ?
elm_info->rf_radio[path] : chip->rf_table[path];
rf_reg_info->rf_path = rf_table->rf_path;
if (noio)
config = rtw89_phy_config_rf_reg_noio;
else
config = rf_table->config ? rf_table->config :
rtw89_phy_config_rf_reg;
rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n" ,
rf_reg_info->rf_path);
}
kfree(rf_reg_info);
}
static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val;
int ret;
/* IQK/DPK clock & reset */
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
if (chip->chip_id != RTL8851B)
rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
/* check 0x8080 */
rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1000, false , rtwdev);
if (ret)
rtw89_err(rtwdev, "failed to poll nctl block\n" );
}
static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *nctl_table;
rtw89_phy_preinit_rf_nctl(rtwdev);
nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
if (chip->nctl_post_table)
rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
{
u32 phy_page = addr >> 8;
u32 ofst = 0;
switch (phy_page) {
case 0x6:
case 0x7:
case 0x8:
case 0x9:
case 0xa:
case 0xb:
case 0xc:
case 0xd:
case 0x19:
case 0x1a:
case 0x1b:
ofst = 0x2000;
break ;
default :
/* warning case */
ofst = 0;
break ;
}
if (phy_page >= 0x40 && phy_page <= 0x4f)
ofst = 0x2000;
return ofst;
}
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx)
{
if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_mask(rtwdev, addr, mask, data);
}
EXPORT_SYMBOL(rtw89_phy_write32_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
enum rtw89_phy_idx phy_idx)
{
if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_set(rtwdev, addr, bits);
}
EXPORT_SYMBOL(rtw89_phy_write32_idx_set);
void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
enum rtw89_phy_idx phy_idx)
{
if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_clr(rtwdev, addr, bits);
}
EXPORT_SYMBOL(rtw89_phy_write32_idx_clr);
u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
enum rtw89_phy_idx phy_idx)
{
if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
return rtw89_phy_read32_mask(rtwdev, addr, mask);
}
EXPORT_SYMBOL(rtw89_phy_read32_idx);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
{
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
if (!rtwdev->dbcc_en)
return ;
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
}
EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl)
{
const struct rtw89_reg3_def *reg3;
int i;
for (i = 0; i < tbl->size; i++) {
reg3 = &tbl->reg3[i];
rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
}
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
{
switch (ant_gain_regd) {
case RTW89_ANT_GAIN_ETSI:
return RTW89_ETSI;
default :
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"unknown antenna gain domain: %d\n" ,
ant_gain_regd);
return RTW89_REGD_NUM;
}
}
/* antenna gain in unit of 0.25 dbm */
#define RTW89_ANT_GAIN_2GHZ_MIN -8
#define RTW89_ANT_GAIN_2GHZ_MAX 14
#define RTW89_ANT_GAIN_5GHZ_MIN -8
#define RTW89_ANT_GAIN_5GHZ_MAX 20
#define RTW89_ANT_GAIN_6GHZ_MIN -8
#define RTW89_ANT_GAIN_6GHZ_MAX 20
#define RTW89_ANT_GAIN_REF_2GHZ 14
#define RTW89_ANT_GAIN_REF_5GHZ 20
#define RTW89_ANT_GAIN_REF_6GHZ 20
void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_acpi_rtag_result res = {};
u32 domain;
int ret;
u8 i, j;
u8 regd;
u8 val;
if (!chip->support_ant_gain)
return ;
ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"acpi: cannot eval rtag: %d\n" , ret);
return ;
}
if (res.revision != 0) {
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"unknown rtag revision: %d\n" , res.revision);
return ;
}
domain = get_unaligned_le32(&res.domain);
for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
if (!(domain & BIT(i)))
continue ;
regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
if (regd >= RTW89_REGD_NUM)
continue ;
ant_gain->regd_enabled |= BIT(regd);
}
for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
val = res.ant_gain_table[i][j];
switch (j) {
default :
case RTW89_ANT_GAIN_2GHZ_SUBBAND:
val = RTW89_ANT_GAIN_REF_2GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_2GHZ_MIN,
RTW89_ANT_GAIN_2GHZ_MAX);
break ;
case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
val = RTW89_ANT_GAIN_REF_5GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_5GHZ_MIN,
RTW89_ANT_GAIN_5GHZ_MAX);
break ;
case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
val = RTW89_ANT_GAIN_REF_6GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_6GHZ_MIN,
RTW89_ANT_GAIN_6GHZ_MAX);
}
ant_gain->offset[i][j] = val;
}
}
}
static
enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
u32 center_freq)
{
switch (center_freq) {
default :
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"center freq: %u to antenna gain subband is unhandled\n" ,
center_freq);
fallthrough;
case 2412 ... 2484:
return RTW89_ANT_GAIN_2GHZ_SUBBAND;
case 5180 ... 5240:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
case 5250 ... 5320:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
case 5500 ... 5720:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
case 5745 ... 5885:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
case 5955 ... 6155:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
case 6175 ... 6415:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
case 6435 ... 6515:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
case 6535 ... 6695:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
case 6715 ... 6855:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
/* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
* and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
* struct rtw89_6ghz_span.
*/
case 6895 ... 7115:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
}
}
static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path, u32 center_freq)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
enum rtw89_ant_gain_subband subband_l, subband_h;
const struct rtw89_6ghz_span *span;
span = rtw89_get_6ghz_span(rtwdev, center_freq);
if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
subband_l = span->ant_gain_subband_low;
subband_h = span->ant_gain_subband_high;
} else {
subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
subband_h = subband_l;
}
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"center_freq %u: antenna gain subband {%u, %u}\n" ,
center_freq, subband_l, subband_h);
return min(ant_gain->offset[path][subband_l],
ant_gain->offset[path][subband_h]);
}
static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
{
s8 offset_patha, offset_pathb;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
return min(offset_patha, offset_pathb);
return max(offset_patha, offset_pathb);
}
static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, band);
if (!chip->support_ant_gain)
return false ;
if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
return false ;
if (!rfe_parms->has_da)
return false ;
return true ;
}
s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
s8 offset_patha, offset_pathb;
if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
return 0;
if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
}
EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
const struct rtw89_chan *chan)
{
char *p = buf, *end = buf + bufsz;
s8 offset_patha, offset_pathb;
if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
p += scnprintf(p, end - p, "no DAG is applied\n" );
goto out;
}
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
p += scnprintf(p, end - p, "ChainA offset: %d dBm\n" , offset_patha);
p += scnprintf(p, end - p, "ChainB offset: %d dBm\n" , offset_pathb);
out:
return p - buf;
}
static const u8 rtw89_rs_idx_num_ax[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
};
static const u8 rtw89_rs_nss_num_ax[] = {
[RTW89_RS_CCK] = 1,
[RTW89_RS_OFDM] = 1,
[RTW89_RS_MCS] = RTW89_NSS_NUM,
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
[RTW89_RS_OFFSET] = 1,
};
s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_byrate *head,
const struct rtw89_rate_desc *desc)
{
switch (desc->rs) {
case RTW89_RS_CCK:
return &head->cck[desc->idx];
case RTW89_RS_OFDM:
return &head->ofdm[desc->idx];
case RTW89_RS_MCS:
return &head->mcs[desc->ofdma][desc->nss][desc->idx];
case RTW89_RS_HEDCM:
return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
case RTW89_RS_OFFSET:
return &head->offset[desc->idx];
default :
rtw89_warn(rtwdev, "unrecognized byr rs: %d\n" , desc->rs);
return &head->trap;
}
}
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
struct rtw89_txpwr_byrate *byr_head;
struct rtw89_rate_desc desc = {};
s8 *byr;
u32 data;
u8 i;
for (; cfg < end; cfg++) {
byr_head = &rtwdev->byr[cfg->band][0];
desc.rs = cfg->rs;
desc.nss = cfg->nss;
data = cfg->data;
for (i = 0; i < cfg->len; i++, data >>= 8) {
desc.idx = cfg->shf + i;
byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
*byr = data & 0xff;
}
}
}
EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
{
const u8 tssi_deviation_point = 0;
const u8 tssi_max_deviation = 2;
if (dbm <= tssi_deviation_point)
dbm -= tssi_max_deviation;
return dbm;
}
static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe;
s8 cstr = S8_MAX;
if (band == RTW89_BAND_6G && tpe->valid)
cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
}
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc)
{
struct rtw89_txpwr_byrate *byr_head;
s8 *byr;
if (rate_desc->rs == RTW89_RS_CCK)
band = RTW89_BAND_2G;
byr_head = &rtwdev->byr[band][bw];
byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
}
static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
{
switch (channel_6g) {
case 1 ... 29:
return (channel_6g - 1) / 2;
case 33 ... 61:
return (channel_6g - 3) / 2;
case 65 ... 93:
return (channel_6g - 5) / 2;
case 97 ... 125:
return (channel_6g - 7) / 2;
case 129 ... 157:
return (channel_6g - 9) / 2;
case 161 ... 189:
return (channel_6g - 11) / 2;
case 193 ... 221:
return (channel_6g - 13) / 2;
case 225 ... 253:
return (channel_6g - 15) / 2;
default :
rtw89_warn(rtwdev, "unknown 6g channel: %d\n" , channel_6g);
return 0;
}
}
static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
{
if (band == RTW89_BAND_6G)
return rtw89_channel_6g_to_idx(rtwdev, channel);
switch (channel) {
case 1 ... 14:
return channel - 1;
case 36 ... 64:
return (channel - 36) / 2;
case 100 ... 144:
return ((channel - 100) / 2) + 15;
case 149 ... 177:
return ((channel - 149) / 2) + 38;
default :
rtw89_warn(rtwdev, "unknown channel: %d\n" , channel);
return 0;
}
}
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
struct rtw89_sar_parm sar_parm = {
.center_freq = freq,
.ntx = ntx,
};
s8 cstr;
switch (band) {
case RTW89_BAND_2G:
if (has_ant_gain)
da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break ;
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break ;
case RTW89_BAND_5G:
if (has_ant_gain)
da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break ;
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break ;
case RTW89_BAND_6G:
if (has_ant_gain)
da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
if (lmt)
break ;
lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
[RTW89_REG_6GHZ_POWER_DFLT]
[ch_idx];
break ;
default :
rtw89_warn(rtwdev, "unknown band type: %d\n" , band);
return 0;
}
da_lmt = da_lmt ?: S8_MAX;
if (da_lmt != S8_MAX)
offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
sar = rtw89_query_sar(rtwdev, &sar_parm);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
return min3(lmt, sar, cstr);
}
EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \
do { \
u8 __i; \
for (__i = 0; __i < RTW89_BF_NUM; __i++) \
ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
band, \
bw, ntx, \
rs, __i, \
(ch)); \
} while (0)
static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ax *lmt,
u8 band, u8 ntx, u8 ch)
{
__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch);
__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ax *lmt,
u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ax *lmt,
u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
u8 i;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=100 H=95 G=97
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland