// SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
staticint ath12k_core_rfkill_config(struct ath12k_base *ab)
{ struct ath12k *ar; int ret = 0, i;
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) return 0;
if (ath12k_acpi_get_disable_rfkill(ab)) return 0;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath12k_mac_rfkill_config(ar); if (ret && ret != -EOPNOTSUPP) {
ath12k_warn(ab, "failed to configure rfkill: %d", ret); return ret;
}
}
return ret;
}
/* Check if we need to continue with suspend/resume operation. * Return: * a negative value: error happens and don't continue. * 0: no error but don't continue. * positive value: no error and do continue.
*/ staticint ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
{ struct ath12k *ar;
if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP;
/* so far single_pdev_only chips have supports_suspend as true * so pass 0 as a dummy pdev_id here.
*/
ar = ab->pdevs[0].ar; if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF) return 0;
return 1;
}
int ath12k_core_suspend(struct ath12k_base *ab)
{ struct ath12k *ar; int ret, i;
ret = ath12k_core_continue_suspend_resume(ab); if (ret <= 0) return ret;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar; if (!ar) continue;
wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_mac_wait_tx_complete(ar); if (ret) {
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); return ret;
}
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks * if other devices report errors in their suspend callbacks. * However ath12k_core_resume() would still be called because * here we return success thus kernel put us on dpm_suspended_list. * Since we won't go through a power down/up cycle, there is * no chance to call complete(&ab->restart_completed) in * ath12k_core_restart(), making ath12k_core_resume() timeout. * So call it here to avoid this issue. This also works in case * no error happens thus suspend_late/resume_early get called, * because it will be reinitialized in ath12k_core_resume_early().
*/
complete(&ab->restart_completed);
return 0;
}
EXPORT_SYMBOL(ath12k_core_suspend);
int ath12k_core_suspend_late(struct ath12k_base *ab)
{ int ret;
ret = ath12k_core_continue_suspend_resume(ab); if (ret <= 0) return ret;
ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) goto next;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot found match %s for name '%s'",
ath12k_bd_ie_type_str(ie_id),
boardname);
} elseif (board_ie_id == data_id) { if (!name_match_found) /* no match found */ goto next;
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot found %s for '%s'",
ath12k_bd_ie_type_str(ie_id),
boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0; goto out;
} else {
ath12k_warn(ab, "unknown %s id found: %d\n",
ath12k_bd_ie_type_str(ie_id),
board_ie_id);
}
next: /* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
buf_len -= board_ie_len;
buf += board_ie_len;
}
/* no match found */
ret = -ENOENT;
out: return ret;
}
staticint ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, struct ath12k_board_data *bd, constchar *boardname, int ie_id_match, int name_id, int data_id)
{
size_t len, magic_len; const u8 *data; char *filename, filepath[100];
size_t ie_len; struct ath12k_fw_ie *hdr; int ret, ie_id;
filename = ATH12K_BOARD_API2_FILE;
if (!bd->fw)
bd->fw = ath12k_core_firmware_request(ab, filename);
/* magic has extra null byte padded */
magic_len = strlen(ATH12K_BOARD_MAGIC) + 1; if (len < magic_len) {
ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
filepath, len);
ret = -EINVAL; goto err;
}
if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
ath12k_err(ab, "found invalid board magic\n");
ret = -EINVAL; goto err;
}
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4); if (len < magic_len) {
ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
filepath, len);
ret = -EINVAL; goto err;
}
if (len < ALIGN(ie_len, 4)) {
ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL; goto err;
}
if (ie_id == ie_id_match) {
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
ie_id_match,
name_id,
data_id); if (ret == -ENOENT) /* no match found, continue */ goto next; elseif (ret) /* there was an error, bail out */ goto err; /* either found or error, so stop searching */ goto out;
}
next: /* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
out: if (!bd->data || !bd->len) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s for %s from %s\n",
ath12k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA; goto err;
}
return 0;
err:
ath12k_core_free_bdf(ab, bd); return ret;
}
int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, struct ath12k_board_data *bd, char *filename)
{
bd->fw = ath12k_core_firmware_request(ab, filename); if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw);
bd->data = bd->fw->data;
bd->len = bd->fw->size;
return 0;
}
#define BOARD_NAME_SIZE 200 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; char *filename, filepath[100]; int bd_api; int ret;
filename = ATH12K_BOARD_API2_FILE;
ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname)); if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret); return ret;
}
bd_api = 2;
ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH12K_BD_IE_BOARD,
ATH12K_BD_IE_BOARD_NAME,
ATH12K_BD_IE_BOARD_DATA); if (!ret) goto success;
ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname, sizeof(fallback_boardname)); if (ret) {
ath12k_err(ab, "failed to create fallback board name: %d", ret); return ret;
}
ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
ATH12K_BD_IE_BOARD,
ATH12K_BD_IE_BOARD_NAME,
ATH12K_BD_IE_BOARD_DATA); if (!ret) goto success;
bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); if (ret) {
ath12k_core_create_firmware_path(ab, filename,
filepath, sizeof(filepath));
ath12k_err(ab, "failed to fetch board data for %s from %s\n",
boardname, filepath); if (memcmp(boardname, fallback_boardname, strlen(boardname)))
ath12k_err(ab, "failed to fetch board data for %s from %s\n",
fallback_boardname, filepath);
ath12k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params->fw.dir); return ret;
}
int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
{ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE]; int ret;
ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to create board name for regdb: %d", ret); gotoexit;
}
ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH12K_BD_IE_REGDB,
ATH12K_BD_IE_REGDB_NAME,
ATH12K_BD_IE_REGDB_DATA); if (!ret) gotoexit;
ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
BOARD_NAME_SIZE); if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to create default board name for regdb: %d", ret); gotoexit;
}
ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
ATH12K_BD_IE_REGDB,
ATH12K_BD_IE_REGDB_NAME,
ATH12K_BD_IE_REGDB_DATA); if (!ret) gotoexit;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME); if (ret)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
exit: if (!ret)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
node = of_parse_phandle(dev->of_node, "memory-region", index); if (!node) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to parse memory-region for index %d\n", index); return NULL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node); if (!rmem) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get memory-region for index %d\n", index); return NULL;
}
if (!smbios->bdf_enabled) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n"); return;
}
/* Only one string exists (per spec) */ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant magic does not match.\n"); return;
}
len = min_t(size_t,
strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); for (i = 0; i < len; i++) { if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name contains non ascii chars.\n"); return;
}
}
/* Copy extension name without magic prefix */
copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), sizeof(ab->qmi.target.bdf_ext)); if (copied < 0) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate\n"); return;
}
staticint ath12k_core_start(struct ath12k_base *ab)
{ int ret;
lockdep_assert_held(&ab->core_lock);
ret = ath12k_wmi_attach(ab); if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret); return ret;
}
ret = ath12k_htc_init(ab); if (ret) {
ath12k_err(ab, "failed to init htc: %d\n", ret); goto err_wmi_detach;
}
ret = ath12k_hif_start(ab); if (ret) {
ath12k_err(ab, "failed to start HIF: %d\n", ret); goto err_wmi_detach;
}
ret = ath12k_htc_wait_target(&ab->htc); if (ret) {
ath12k_err(ab, "failed to connect to HTC: %d\n", ret); goto err_hif_stop;
}
ret = ath12k_dp_htt_connect(&ab->dp); if (ret) {
ath12k_err(ab, "failed to connect to HTT: %d\n", ret); goto err_hif_stop;
}
ret = ath12k_wmi_connect(ab); if (ret) {
ath12k_err(ab, "failed to connect wmi: %d\n", ret); goto err_hif_stop;
}
ret = ath12k_htc_start(&ab->htc); if (ret) {
ath12k_err(ab, "failed to start HTC: %d\n", ret); goto err_hif_stop;
}
ret = ath12k_wmi_wait_for_service_ready(ab); if (ret) {
ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
ret); goto err_hif_stop;
}
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab); if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret); goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
ret = ath12k_wmi_cmd_init(ab); if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret); goto err_reo_cleanup;
}
ret = ath12k_wmi_wait_for_unified_ready(ab); if (ret) {
ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
ret); goto err_reo_cleanup;
}
/* put hardware to DBS mode */ if (ab->hw_params->single_pdev_only) {
ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); if (ret) {
ath12k_err(ab, "failed to send dbs mode: %d\n", ret); goto err_reo_cleanup;
}
}
ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab); if (ret) {
ath12k_err(ab, "failed to send htt version request message: %d\n",
ret); goto err_reo_cleanup;
}
ath12k_acpi_set_dsm_func(ab);
/* Indicate the core start in the appropriate group */
ath12k_core_to_group_ref_get(ab);
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
for (i = 0; i < ag->num_hw; i++) {
ah = ath12k_ag_to_ah(ag, i); if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
ah->state == ATH12K_HW_STATE_TM) continue;
wiphy_lock(ah->hw->wiphy);
/* If queue 0 is stopped, it is safe to assume that all * other queues are stopped by driver via * ieee80211_stop_queues() below. This means, there is * no need to stop it again and hence continue
*/ if (ieee80211_queue_stopped(ah->hw, 0)) {
wiphy_unlock(ah->hw->wiphy); continue;
}
ieee80211_stop_queues(ah->hw);
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
switch (ah->state) { case ATH12K_HW_STATE_ON:
ah->state = ATH12K_HW_STATE_RESTARTING;
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
ath12k_core_halt(ar);
}
break; case ATH12K_HW_STATE_OFF:
ath12k_warn(ab, "cannot restart hw %d that hasn't been started\n",
i); break; case ATH12K_HW_STATE_RESTARTING: break; case ATH12K_HW_STATE_RESTARTED:
ah->state = ATH12K_HW_STATE_WEDGED;
fallthrough; case ATH12K_HW_STATE_WEDGED:
ath12k_warn(ab, "device is wedged, will not restart hw %d\n", i); break; case ATH12K_HW_STATE_TM:
ath12k_warn(ab, "fw mode reset done radio %d\n", i); break;
}
if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); return;
}
/* Sometimes the recovery will fail and then the next all recovery fail, * this is to avoid infinite recovery since it can not recovery success
*/
fail_cont_count = atomic_read(&ab->fail_cont_count);
if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL) return;
if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
time_before(jiffies, ab->reset_fail_timeout)) return;
if (reset_count > 1) { /* Sometimes it happened another reset worker before the previous one * completed, then the second reset worker will destroy the previous one, * thus below is to avoid that.
*/
ath12k_warn(ab, "already resetting count %d\n", reset_count);
next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint); if (!next_wsi_dev) {
of_node_put(next_rx_endpoint); return -ENODEV;
}
of_node_put(next_rx_endpoint);
device_count++; if (device_count > ATH12K_MAX_DEVICES) {
ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
device_count, ATH12K_MAX_DEVICES);
of_node_put(next_wsi_dev); return -EINVAL;
}
} while (wsi_dev != next_wsi_dev);
staticint ath12k_core_get_wsi_index(struct ath12k_hw_group *ag, struct ath12k_base *ab)
{ int i, wsi_controller_index = -1, node_index = -1; bool control;
for (i = 0; i < ag->num_devices; i++) {
control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller"); if (control)
wsi_controller_index = i;
if (ag->wsi_node[i] == ab->dev->of_node)
node_index = i;
}
if (wsi_controller_index == -1) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt"); return -EINVAL;
}
if (node_index == -1) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index"); return -EINVAL;
}
/* The grouping of multiple devices will be done based on device tree file. * The platforms that do not have any valid group information would have * each device to be part of its own invalid group. * * We use group id ATH12K_INVALID_GROUP_ID for single device group * which didn't have dt entry or wrong dt entry, there could be many * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So * default group id of ATH12K_INVALID_GROUP_ID combined with * num devices in ath12k_hw_group determines if the group is * multi device or single device group
*/
ag = ath12k_core_hw_group_find_by_dt(ab); if (!ag) {
ag = ath12k_core_hw_group_alloc(ab); if (!ag) {
ath12k_warn(ab, "unable to create new hw group\n"); return NULL;
}
if (ath12k_core_get_wsi_info(ag, ab) ||
ath12k_core_get_wsi_index(ag, ab)) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get wsi info from dt, grouping single device");
ag->id = ATH12K_INVALID_GROUP_ID;
ag->num_devices = 1;
memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
wsi->index = 0;
}
gotoexit;
} elseif (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
ag->id); goto invalid_group;
} else { if (ath12k_core_get_wsi_index(ag, ab)) goto invalid_group; gotoexit;
}
invalid_group:
ag = ath12k_core_hw_group_alloc(ab); if (!ag) {
ath12k_warn(ab, "unable to create new hw group\n"); return NULL;
}
staticint ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
{ struct ath12k_base *ab; int i, ret;
lockdep_assert_held(&ag->mutex);
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i]; if (!ab) continue;
mutex_lock(&ab->core_lock);
ret = ath12k_core_soc_create(ab); if (ret) {
mutex_unlock(&ab->core_lock);
ath12k_err(ab, "failed to create soc core: %d\n", ret); return ret;
}
mutex_unlock(&ab->core_lock);
}
return 0;
}
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
{ struct ath12k_base *ab; int i;
if (ath12k_ftm_mode) return;
lockdep_assert_held(&ag->mutex);
if (ag->num_devices == 1) {
ab = ag->ab[0]; /* QCN9274 firmware uses firmware IE for MLO advertisement */ if (ab->fw.fw_features_valid) {
ag->mlo_capable =
ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO); return;
}
/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
ag->mlo_capable = ab->single_chip_mlo_support; return;
}
ag->mlo_capable = true;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i]; if (!ab) continue;
/* even if 1 device's firmware feature indicates MLO * unsupported, make MLO unsupported for the whole group
*/ if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
ag->mlo_capable = false; return;
}
}
}
int ath12k_core_init(struct ath12k_base *ab)
{ struct ath12k_hw_group *ag; int ret;
ret = ath12k_core_panic_notifier_register(ab); if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
mutex_lock(&ath12k_hw_group_mutex);
ag = ath12k_core_hw_group_assign(ab); if (!ag) {
mutex_unlock(&ath12k_hw_group_mutex);
ath12k_warn(ab, "unable to get hw group\n");
ret = -ENODEV; goto err_unregister_notifier;
}
mutex_unlock(&ath12k_hw_group_mutex);
mutex_lock(&ag->mutex);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
ag->num_devices, ag->num_probed);
if (ath12k_core_hw_group_create_ready(ag)) {
ret = ath12k_core_hw_group_create(ag); if (ret) {
mutex_unlock(&ag->mutex);
ath12k_warn(ab, "unable to create hw group\n"); goto err_destroy_hw_group;
}
}
/* Device index used to identify the devices in a group. * * In Intra-device MLO, only one device present in a group, * so it is always zero. * * In Inter-device MLO, Multiple device present in a group, * expect non-zero value.
*/
ab->device_id = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.